WillHeld commited on
Commit
a60fda2
·
verified ·
1 Parent(s): 8c2d4de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -10,7 +10,7 @@ model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
10
  @spaces.GPU(duration=120)
11
  def predict(message, history):
12
  history.append({"role": "user", "content": message})
13
- input_text = tokenizer.apply_chat_template(history, tokenize=False) + "<|assistant|>"
14
  inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
15
  outputs = model.generate(inputs, max_new_tokens=1024, temperature=0.7, top_p=0.9, do_sample=True)
16
  decoded = tokenizer.decode(outputs[0])
 
10
  @spaces.GPU(duration=120)
11
  def predict(message, history):
12
  history.append({"role": "user", "content": message})
13
+ input_text = tokenizer.apply_chat_template(history, tokenize=False, add_generation_prompt=True)
14
  inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
15
  outputs = model.generate(inputs, max_new_tokens=1024, temperature=0.7, top_p=0.9, do_sample=True)
16
  decoded = tokenizer.decode(outputs[0])