KvrParaskevi commited on
Commit
5f5cf3f
·
verified ·
1 Parent(s): a06ba34

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -53,8 +53,7 @@ with st.container():
53
  with st.chat_message(message["role"]):
54
  st.write(message["content"])
55
 
56
- tokenized_chat = tokenizer.apply_chat_template(st.session_state.chat_history, tokenize=True, add_generation_prompt=True, return_tensors="pt")
57
- st.write(tokenizer.decode(tokenized_chat[0]))
58
  #Set up input text field
59
  input_text = st.chat_input(placeholder="Here you can chat with our hotel booking model.")
60
 
@@ -65,8 +64,10 @@ with st.container():
65
 
66
  #chat_response = demo_chat.demo_chain(input_text=input_text, memory=st.session_state.memory, model= chat_model)
67
  #first_answer = chat_response.split("Human")[0] #Because of Predict it prints the whole conversation.Here we seperate the first answer only.
 
 
68
  outputs = model.generate(tokenized_chat, max_new_tokens=128)
69
- first_answer = tokenizer.decode(outputs[0])
70
 
71
  with st.chat_message("assistant"):
72
  st.write(first_answer)
 
53
  with st.chat_message(message["role"]):
54
  st.write(message["content"])
55
 
56
+
 
57
  #Set up input text field
58
  input_text = st.chat_input(placeholder="Here you can chat with our hotel booking model.")
59
 
 
64
 
65
  #chat_response = demo_chat.demo_chain(input_text=input_text, memory=st.session_state.memory, model= chat_model)
66
  #first_answer = chat_response.split("Human")[0] #Because of Predict it prints the whole conversation.Here we seperate the first answer only.
67
+ tokenized_chat = tokenizer.apply_chat_template(st.session_state.chat_history, tokenize=True, add_generation_prompt=True, return_tensors="pt")
68
+ st.write(tokenizer.decode(tokenized_chat[0]))
69
  outputs = model.generate(tokenized_chat, max_new_tokens=128)
70
+ first_answer = tokenizer.decode(outputs[0],skip_special_tokens=True)
71
 
72
  with st.chat_message("assistant"):
73
  st.write(first_answer)