crystal99 commited on
Commit
f083103
·
verified ·
1 Parent(s): b42e02f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -38,7 +38,7 @@ model.to(device)
38
  def generate_text(prompt):
39
  # Prevent gradient calculation to speed up inference
40
  with torch.no_grad():
41
- inputs = tokenizer(f"<|STARTOFTEXT|> <|USER|> {propmt} <|BOT|>", return_tensors="pt").to(device)
42
  outputs = model.generate(inputs['input_ids'], max_length=100, num_return_sequences=1, do_sample=False)
43
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=False)
44
  return generated_text
 
38
  def generate_text(prompt):
39
  # Prevent gradient calculation to speed up inference
40
  with torch.no_grad():
41
+ inputs = tokenizer(f"<|STARTOFTEXT|> <|USER|> {prompt} <|BOT|>", return_tensors="pt").to(device)
42
  outputs = model.generate(inputs['input_ids'], max_length=100, num_return_sequences=1, do_sample=False)
43
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=False)
44
  return generated_text