{ "_name_or_path": "mpt-340m-pruned", "apply_residual_connection_post_layernorm": false, "architectures": [ "MptForCausalLM" ], "attention_matrix_factorization_rank": 128, "attn_config": { "alibi_bias_max": 16, "clip_qkv": 6, "model_type": "" }, "attn_pdrop": 0.0, "auto_map": { "AutoConfig": "mosaicml/mpt-7b-storywriter--configuration_mpt.MPTConfig", "AutoModelForCausalLM": "mosaicml/mpt-7b-storywriter--modeling_mpt.MPTForCausalLM" }, "d_model": 1280, "emb_pdrop": 0, "embedding_fraction": 1.0, "expansion_ratio": 4, "feedforward_matrix_factorization_rank": 128, "init_config": { "emb_init_std": null, "emb_init_uniform_lim": null, "fan_mode": "fan_in", "init_div_is_residual": true, "init_gain": 0, "init_nonlinearity": "relu", "init_std": 0.02, "name": "kaiming_normal_", "verbose": 0 }, "init_device": "cpu", "initializer_range": 0.02, "layer_norm_epsilon": 1e-05, "learned_pos_emb": true, "logit_scale": null, "max_seq_len": 65536, "model_type": "mpt", "n_heads": 20, "n_layers": 14, "no_bias": true, "norm_type": "low_precision_layernorm", "quantization_aware_training": true, "resid_pdrop": 0.0, "tokenizer_name": "EleutherAI/gpt-neox-20b", "torch_dtype": "float16", "transformers_version": "4.44.2", "use_cache": true, "use_early_exiting": true, "use_reversible_layers": true, "verbose": 0, "vocab_size": 50432 }