vilarin commited on
Commit
6f1ee3e
1 Parent(s): 5312535

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -42,12 +42,12 @@ def stream_chat(message: str, history: list, temperature: float, max_new_tokens:
42
  conversation = []
43
  for prompt, answer in history:
44
  conversation.extend([{"role": "user", "content": prompt}, {"role": "assistant", "content": answer}])
45
- conversation.append({"role": "user", "content": message})
46
 
47
  print(f"Conversation is -\n{conversation}")
48
 
49
-
50
- input_ids = tokenizer.apply_chat_template(conversation, tokenize=True, add_generation_prompt=True, return_tensors="pt", return_dict=True).to(model.device)
51
  streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
52
 
53
  generate_kwargs = dict(
 
42
  conversation = []
43
  for prompt, answer in history:
44
  conversation.extend([{"role": "user", "content": prompt}, {"role": "assistant", "content": answer}])
45
+ #conversation.append({"role": "user", "content": message})
46
 
47
  print(f"Conversation is -\n{conversation}")
48
 
49
+ input_ids = tokenizer.build_chat_input(message, history=conversation, role='user').input_ids.to(model.device)
50
+ #input_ids = tokenizer.apply_chat_template(conversation, tokenize=True, add_generation_prompt=True, return_tensors="pt", return_dict=True).to(model.device)
51
  streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
52
 
53
  generate_kwargs = dict(