belyakoff's picture
Update README.md
17c541d verified
|
raw
history blame
1.04 kB
metadata
library_name: transformers
license: apache-2.0
datasets:
  - Vikhrmodels/GrandMaster-PRO-MAX
language:
  - ru
base_model:
  - HuggingFaceTB/SmolLM2-360M-Instruct
pipeline_tag: text-generation

v1: SFT -- 7658aab7702e56d9f5fa3b33bf7adcdae92f536b


rom transformers import AutoModelForCausalLM, AutoTokenizer
checkpoint = "HuggingFaceTB/SmolLM2-360M-Instruct"

device = "cuda" # for GPU usage or "cpu" for CPU usage
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
# for multiple GPUs install accelerate and do `model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto")`
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)

messages = [{"role": "user", "content": "What is the capital of France."}]
input_text=tokenizer.apply_chat_template(messages, tokenize=False)
print(input_text)
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
outputs = model.generate(inputs, max_new_tokens=50, temperature=0.2, top_p=0.9, do_sample=True)
print(tokenizer.decode(outputs[0]))