Update README.md
Browse files
README.md
CHANGED
@@ -23,7 +23,7 @@ import transformers
|
|
23 |
tokenizer = transformers.AutoTokenizer.from_pretrained("RekaAI/reka-flash-3")
|
24 |
model = transformers.AutoModelForCausalLM.from_pretrained("RekaAI/reka-flash-3", torch_dtype='auto', device_map='auto')
|
25 |
|
26 |
-
prompt = {"role": "
|
27 |
text = tokenizer.apply_chat_template([prompt], tokenize=False, add_generation_prompt=True)
|
28 |
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
29 |
outputs = model.generate(**model_inputs, max_new_tokens=512)
|
|
|
23 |
tokenizer = transformers.AutoTokenizer.from_pretrained("RekaAI/reka-flash-3")
|
24 |
model = transformers.AutoModelForCausalLM.from_pretrained("RekaAI/reka-flash-3", torch_dtype='auto', device_map='auto')
|
25 |
|
26 |
+
prompt = {"role": "human", "content": "Write a poem about large language model."}
|
27 |
text = tokenizer.apply_chat_template([prompt], tokenize=False, add_generation_prompt=True)
|
28 |
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
29 |
outputs = model.generate(**model_inputs, max_new_tokens=512)
|