savage1221 commited on
Commit
19a6bcf
Β·
verified Β·
1 Parent(s): cacb656

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -5
app.py CHANGED
@@ -122,6 +122,35 @@ def is_partial_stop(output, stop_str):
122
  tokenizer.chat_template = "{% for message in messages %}{{message['role'] + ': ' + message['content'] + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}"
123
 
124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  def prepare_history_for_model(history):
126
  """
127
  Converts the history to a tokenized prompt in the format expected by the model.
@@ -131,15 +160,23 @@ def prepare_history_for_model(history):
131
  Tokenized prompt
132
  """
133
  messages = []
 
 
 
 
 
134
  for idx, (user_msg, model_msg) in enumerate(history):
135
- # skip the last assistant message if its empty, the tokenizer will do the formating
 
 
 
 
136
  if idx == len(history) - 1 and not model_msg:
137
- messages.append({"role": "User", "content": user_msg})
138
  break
139
- if user_msg:
140
- messages.append({"role": "User", "content": user_msg})
141
  if model_msg:
142
- messages.append({"role": "Assistant", "content": model_msg})
 
143
  input_token = tokenizer.apply_chat_template(
144
  messages,
145
  add_generation_prompt=True,
@@ -150,6 +187,7 @@ def prepare_history_for_model(history):
150
  return input_token
151
 
152
 
 
153
  def generate(history, temperature, max_new_tokens, top_p, repetition_penalty, assisted):
154
  """
155
  Generates the assistant's reponse given the chatbot history and generation parameters
 
122
  tokenizer.chat_template = "{% for message in messages %}{{message['role'] + ': ' + message['content'] + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}"
123
 
124
 
125
+ # def prepare_history_for_model(history):
126
+ # """
127
+ # Converts the history to a tokenized prompt in the format expected by the model.
128
+ # Params:
129
+ # history: dialogue history
130
+ # Returns:
131
+ # Tokenized prompt
132
+ # """
133
+ # messages = []
134
+ # for idx, (user_msg, model_msg) in enumerate(history):
135
+ # # skip the last assistant message if its empty, the tokenizer will do the formating
136
+ # if idx == len(history) - 1 and not model_msg:
137
+ # messages.append({"role": "User", "content": user_msg})
138
+ # break
139
+ # if user_msg:
140
+ # messages.append({"role": "User", "content": user_msg})
141
+ # if model_msg:
142
+ # messages.append({"role": "Assistant", "content": model_msg})
143
+ # input_token = tokenizer.apply_chat_template(
144
+ # messages,
145
+ # add_generation_prompt=True,
146
+ # tokenize=True,
147
+ # return_tensors="pt",
148
+ # return_dict=True
149
+ # )
150
+ # return input_token
151
+
152
+
153
+
154
  def prepare_history_for_model(history):
155
  """
156
  Converts the history to a tokenized prompt in the format expected by the model.
 
160
  Tokenized prompt
161
  """
162
  messages = []
163
+
164
+ # Add instruction
165
+ instruction = "Generate quotes for AWS RDS services"
166
+ messages.append({"role": "Instruction", "content": instruction})
167
+
168
  for idx, (user_msg, model_msg) in enumerate(history):
169
+ # Assuming the user message contains the product information
170
+ if user_msg:
171
+ messages.append({"role": "Input", "content": user_msg})
172
+
173
+ # Skip the last assistant message if it's empty
174
  if idx == len(history) - 1 and not model_msg:
 
175
  break
176
+
 
177
  if model_msg:
178
+ messages.append({"role": "Output", "content": model_msg})
179
+
180
  input_token = tokenizer.apply_chat_template(
181
  messages,
182
  add_generation_prompt=True,
 
187
  return input_token
188
 
189
 
190
+
191
  def generate(history, temperature, max_new_tokens, top_p, repetition_penalty, assisted):
192
  """
193
  Generates the assistant's reponse given the chatbot history and generation parameters