Spaces:
Runtime error
Runtime error
Update to the new URL for model v5
Browse files
config/architectures.json
CHANGED
@@ -90,6 +90,13 @@
|
|
90 |
"steps": [
|
91 |
{"class": "HFInferenceEndpoint", "params": {"endpoint_url": "https://w5sw8v98v6nrx09k.eu-west-1.aws.endpoints.huggingface.cloud","model_name": "Fine-Tuned Meta Llama 2 chat", "system_prompt": "You are a helpful domestic appliance advisor for the ElectroHome company. Please answer customer questions and do not mention other brands. If you cannot answer please say so.", "max_new_tokens": 1000, "prompt_style": "raw"}}
|
92 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
}
|
94 |
]
|
95 |
}
|
|
|
90 |
"steps": [
|
91 |
{"class": "HFInferenceEndpoint", "params": {"endpoint_url": "https://w5sw8v98v6nrx09k.eu-west-1.aws.endpoints.huggingface.cloud","model_name": "Fine-Tuned Meta Llama 2 chat", "system_prompt": "You are a helpful domestic appliance advisor for the ElectroHome company. Please answer customer questions and do not mention other brands. If you cannot answer please say so.", "max_new_tokens": 1000, "prompt_style": "raw"}}
|
92 |
]
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"name": "11. Fine-tuning test 5 - raw passthrough",
|
96 |
+
"description": "Testing fine-tuning v 3",
|
97 |
+
"steps": [
|
98 |
+
{"class": "HFInferenceEndpoint", "params": {"endpoint_url": "https://pgzu02dvzupp5sml.eu-west-1.aws.endpoints.huggingface.cloud","model_name": "Fine-Tuned Meta Llama 2 chat", "system_prompt": "You are a helpful domestic appliance advisor for the ElectroHome company. Please answer customer questions and do not mention other brands. If you cannot answer please say so.", "max_new_tokens": 1000, "prompt_style": "raw"}}
|
99 |
+
]
|
100 |
}
|
101 |
]
|
102 |
}
|
src/training/prep_finetuning.py
CHANGED
@@ -180,7 +180,7 @@ def training_string_from_q_and_a(q: str, a: str, sys_prompt: str = None) -> str:
|
|
180 |
Build the single llama formatted training string from a question
|
181 |
answer pair
|
182 |
"""
|
183 |
-
return f'
|
184 |
|
185 |
|
186 |
def fine_tuning_out_dir(out_model: str) -> str:
|
|
|
180 |
Build the single llama formatted training string from a question
|
181 |
answer pair
|
182 |
"""
|
183 |
+
return f'User: {q}\nBot: {a}'
|
184 |
|
185 |
|
186 |
def fine_tuning_out_dir(out_model: str) -> str:
|