{ "architectures": ["LWM"], "hidden_size": 64, "num_attention_heads": 12, "num_hidden_layers": 12 }