Update README.md
Browse files
README.md
CHANGED
@@ -45,13 +45,13 @@ set a seed for reproducibility:
|
|
45 |
>>> set_seed(42)
|
46 |
>>> messages = [
|
47 |
>>> {"role": "system", 'content': 'You are a helpful assistant. Please give short and concise answers.'},
|
48 |
-
>>> {"role": "user", "content": "
|
49 |
>>> ]
|
50 |
>>> tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_dict=False, return_tensors='pt',)
|
51 |
>>> set_seed(42)
|
52 |
>>> outputs = model.generate(tokenized_chat.to('cuda'), max_new_tokens=200,)
|
53 |
>>> tokenizer.decode(out[0][len(tokenized_chat[0]):])
|
54 |
-
'
|
55 |
```
|
56 |
|
57 |
## Dataset
|
|
|
45 |
>>> set_seed(42)
|
46 |
>>> messages = [
|
47 |
>>> {"role": "system", 'content': 'You are a helpful assistant. Please give short and concise answers.'},
|
48 |
+
>>> {"role": "user", "content": "qui est le président français ?"},
|
49 |
>>> ]
|
50 |
>>> tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_dict=False, return_tensors='pt',)
|
51 |
>>> set_seed(42)
|
52 |
>>> outputs = model.generate(tokenized_chat.to('cuda'), max_new_tokens=200,)
|
53 |
>>> tokenizer.decode(out[0][len(tokenized_chat[0]):])
|
54 |
+
'Le président français est Emmanuel Macron.'
|
55 |
```
|
56 |
|
57 |
## Dataset
|