Update README.md
Browse files
README.md
CHANGED
@@ -39,12 +39,12 @@ Install the transformers library and load Aya Expanse 8B as follows:
|
|
39 |
```python
|
40 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
41 |
|
42 |
-
model_id = "Svngoku/
|
43 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
44 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
45 |
|
46 |
# Format the message with the chat template
|
47 |
-
messages = [{"role": "user", "content": "
|
48 |
input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
49 |
## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Anneme onu ne kadar sevdiğimi anlatan bir mektup yaz<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
|
50 |
|
|
|
39 |
```python
|
40 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
41 |
|
42 |
+
model_id = "Svngoku/French-Aya-Expanse-8B"
|
43 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
44 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
45 |
|
46 |
# Format the message with the chat template
|
47 |
+
messages = [{"role": "user", "content": "Quels est la superfice de Paris"}]
|
48 |
input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
49 |
## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Anneme onu ne kadar sevdiğimi anlatan bir mektup yaz<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
|
50 |
|