Zenithwang commited on
Commit
b282751
1 Parent(s): 25a236c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -2
app.py CHANGED
@@ -49,8 +49,14 @@ def predict(message, history):
49
  # messages = system_prompt + sft_end_token.join([sft_end_token.join([f"\n{sft_start_token}{user_role}\n" + item[0], f"\n{sft_start_token}{assistant_role}\n" + item[1]])
50
  # for item in history_transformer_format])
51
 
52
- messages = [{user_role: item[0], assistant_role: item[1]} for item in history_transformer_format]
53
- model_inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(device)
 
 
 
 
 
 
54
  # model_inputs = tokenizer([messages], return_tensors="pt").to(device)
55
 
56
  streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
 
49
  # messages = system_prompt + sft_end_token.join([sft_end_token.join([f"\n{sft_start_token}{user_role}\n" + item[0], f"\n{sft_start_token}{assistant_role}\n" + item[1]])
50
  # for item in history_transformer_format])
51
 
52
+ model_messages = []
53
+ for item in history_transformer_format:
54
+ model_messages.append({"role": user_role, "content": item[0]})
55
+ model_messages.append({"role": assistant_role, "content": item[1]})
56
+
57
+ print(f'model_messages: {model_messages}')
58
+
59
+ model_inputs = tokenizer.apply_chat_template(model_messages, add_generation_prompt=True, return_tensors="pt").to(device)
60
  # model_inputs = tokenizer([messages], return_tensors="pt").to(device)
61
 
62
  streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)