yhyu13 commited on
Commit
3219ee8
·
1 Parent(s): 3618f17

Adjust model name

Browse files
Files changed (2) hide show
  1. README.md +2 -2
  2. adapter_config.json +2 -2
README.md CHANGED
@@ -5,7 +5,7 @@ tags:
5
  - llama-factory
6
  - lora
7
  - generated_from_trainer
8
- base_model: ./models/phi-2
9
  model-index:
10
  - name: phi-2-sft-alpaca_gpt4_en-1
11
  results: []
@@ -16,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # phi-2-sft-alpaca_gpt4_en-1
18
 
19
- This model is a fine-tuned version of [./models/phi-2](https://huggingface.co/./models/phi-2) on the alpaca_gpt4_en dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.8625
22
 
 
5
  - llama-factory
6
  - lora
7
  - generated_from_trainer
8
+ base_model: microsoft/phi-2
9
  model-index:
10
  - name: phi-2-sft-alpaca_gpt4_en-1
11
  results: []
 
16
 
17
  # phi-2-sft-alpaca_gpt4_en-1
18
 
19
+ This model is a fine-tuned version of [microsoft/phi-2](https://huggingface.co/microsoft/phi-2) on the alpaca_gpt4_en dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.8625
22
 
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "./models/phi-2",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -22,4 +22,4 @@
22
  "Wqkv"
23
  ],
24
  "task_type": "CAUSAL_LM"
25
- }
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "microsoft/phi-2",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
22
  "Wqkv"
23
  ],
24
  "task_type": "CAUSAL_LM"
25
+ }