{ "auto_mapping": null, "base_model_name_or_path": "meta-llama/Llama-2-7b-hf", "encoder_dropout": 0.0, "encoder_hidden_size": 128, "encoder_num_layers": 2, "encoder_reparameterization_type": "MLP", "inference_mode": true, "num_attention_heads": 32, "num_layers": 32, "num_transformer_submodules": 1, "num_virtual_tokens": 20, "peft_type": "P_TUNING", "revision": null, "task_type": "SEQ_CLS", "token_dim": 4096 }