paralym commited on
Commit
929eddb
·
verified ·
1 Parent(s): 103b441

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -8
app.py CHANGED
@@ -64,6 +64,7 @@ repo_name = os.environ["LOG_REPO"]
64
 
65
  external_log_dir = "./logs"
66
  LOGDIR = external_log_dir
 
67
 
68
 
69
  def install_gradio_4_35_0():
@@ -89,6 +90,37 @@ def get_conv_log_filename():
89
  name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-user_conv.json")
90
  return name
91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  class InferenceDemo(object):
93
  def __init__(
94
  self, args, model_path, tokenizer, model, image_processor, context_len
@@ -212,6 +244,7 @@ def add_message(history, message):
212
  history.append(((x,), None))
213
  if message["text"] is not None:
214
  history.append((message["text"], None))
 
215
  return history, gr.MultimodalTextbox(value=None, interactive=False)
216
 
217
 
@@ -242,10 +275,11 @@ def bot(history, temperature, top_p, max_output_tokens):
242
  # for message in history[-i-1:]:
243
  # images_this_term.append(message[0][0])
244
 
 
245
  assert len(images_this_term) > 0, "must have an image"
246
  # image_files = (args.image_file).split(',')
247
  # image = [load_image(f) for f in images_this_term if f]
248
-
249
  all_image_hash = []
250
  all_image_path = []
251
  for image_path in images_this_term:
@@ -296,13 +330,14 @@ def bot(history, temperature, top_p, max_output_tokens):
296
  our_chatbot.conversation.append_message(our_chatbot.conversation.roles[1], None)
297
  prompt = our_chatbot.conversation.get_prompt()
298
 
299
- # input_ids = (
300
- # tokenizer_image_token(
301
- # prompt, our_chatbot.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt"
302
- # )
303
- # .unsqueeze(0)
304
- # .to(our_chatbot.model.device)
305
- # )
 
306
  input_ids = tokenizer_image_token(
307
  prompt, our_chatbot.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt"
308
  ).unsqueeze(0).to(our_chatbot.model.device)
@@ -502,7 +537,17 @@ with gr.Blocks(
502
  fn=clear_history, inputs=[chatbot], outputs=[chatbot], api_name="clear_all"
503
  )
504
 
 
 
 
505
 
 
 
 
 
 
 
 
506
  demo.queue()
507
 
508
  if __name__ == "__main__":
 
64
 
65
  external_log_dir = "./logs"
66
  LOGDIR = external_log_dir
67
+ VOTEDIR = "./votes"
68
 
69
 
70
  def install_gradio_4_35_0():
 
90
  name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-user_conv.json")
91
  return name
92
 
93
+ def get_conv_vote_filename():
94
+ t = datetime.datetime.now()
95
+ name = os.path.join(VOTEDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-user_vote.json")
96
+ if not os.path.isfile(name):
97
+ os.makedirs(os.path.dirname(name), exist_ok=True)
98
+ return name
99
+
100
+ def vote_last_response(state, vote_type, model_selector):
101
+ with open(get_conv_vote_filename(), "a") as fout:
102
+ data = {
103
+ "type": vote_type,
104
+ "model": model_selector,
105
+ "state": state,
106
+ }
107
+ fout.write(json.dumps(data) + "\n")
108
+ api.upload_file(
109
+ path_or_fileobj=get_conv_vote_filename(),
110
+ path_in_repo=get_conv_vote_filename().replace("./votes/", ""),
111
+ repo_id=repo_name,
112
+ repo_type="dataset")
113
+
114
+
115
+ def upvote_last_response(state):
116
+ vote_last_response(state, "upvote", "PULSE-7B")
117
+ return state
118
+
119
+ def downvote_last_response(state):
120
+ vote_last_response(state, "downvote", "PULSE-7B")
121
+ return state
122
+
123
+
124
  class InferenceDemo(object):
125
  def __init__(
126
  self, args, model_path, tokenizer, model, image_processor, context_len
 
244
  history.append(((x,), None))
245
  if message["text"] is not None:
246
  history.append((message["text"], None))
247
+
248
  return history, gr.MultimodalTextbox(value=None, interactive=False)
249
 
250
 
 
275
  # for message in history[-i-1:]:
276
  # images_this_term.append(message[0][0])
277
 
278
+
279
  assert len(images_this_term) > 0, "must have an image"
280
  # image_files = (args.image_file).split(',')
281
  # image = [load_image(f) for f in images_this_term if f]
282
+
283
  all_image_hash = []
284
  all_image_path = []
285
  for image_path in images_this_term:
 
330
  our_chatbot.conversation.append_message(our_chatbot.conversation.roles[1], None)
331
  prompt = our_chatbot.conversation.get_prompt()
332
 
333
+ if len(images_this_term) == 0:
334
+ gr.Warning("You should upload an image. Please upload the image and try again.")
335
+ return history
336
+
337
+ if len(images_this_term) > 1:
338
+ gr.Warning("Only one image can be uploaded in a conversation. Please reduce the number of images and start a new conversation.")
339
+ return history
340
+
341
  input_ids = tokenizer_image_token(
342
  prompt, our_chatbot.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt"
343
  ).unsqueeze(0).to(our_chatbot.model.device)
 
537
  fn=clear_history, inputs=[chatbot], outputs=[chatbot], api_name="clear_all"
538
  )
539
 
540
+ upvote_btn.click(
541
+ fn=upvote_last_response, inputs=[chatbot], outputs=[chatbot], api_name="upvote_last_response"
542
+ )
543
 
544
+
545
+ downvote_btn.click(
546
+ fn=downvote_last_response, inputs=[chatbot], outputs=[chatbot], api_name="upvote_last_response"
547
+ )
548
+
549
+
550
+
551
  demo.queue()
552
 
553
  if __name__ == "__main__":