sandz7 commited on
Commit
0464368
Β·
1 Parent(s): 44a8369

formatted the input prompt when it's passed to gpt and set it to none before ifs and else

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -59,6 +59,7 @@ def multimodal_and_generation(message, history):
59
  depending on the request from prompt, that prompt output will return here.
60
  """
61
  print(f"Message:\n{message}\nType:\n{type(message)}")
 
62
  if message["files"]:
63
  if type(message["files"][-1]) == dict:
64
  image_path = message["files"][-1]["path"]
@@ -70,20 +71,20 @@ def multimodal_and_generation(message, history):
70
  if type(hist[0]) == tuple:
71
  image_path = hist[0][0] # item inside items for history
72
 
73
- prompt = f"<|start_header_id|>user<|end_header_id|>\n\n<image>\n{message['text']}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
74
-
75
  if image_path is None:
 
76
  # GPT Generation
77
  client = OpenAI(api_key=API_KEY)
78
  stream = client.chat.completions.create(
79
  model="gpt-3.5-turbo",
80
  messages=[{"role": "system", "content": "You are a helpful assistant called 'chimera'."},
81
- {"role": "user", "content": message}],
82
  stream=True,
83
  )
84
  return stream
85
 
86
  else:
 
87
  # Time to instance the llava
88
  image = Image.open(image_path)
89
  inputs = processor(prompt, image, return_tensors='pt').to(0, torch.float16)
 
59
  depending on the request from prompt, that prompt output will return here.
60
  """
61
  print(f"Message:\n{message}\nType:\n{type(message)}")
62
+ image_path = None
63
  if message["files"]:
64
  if type(message["files"][-1]) == dict:
65
  image_path = message["files"][-1]["path"]
 
71
  if type(hist[0]) == tuple:
72
  image_path = hist[0][0] # item inside items for history
73
 
 
 
74
  if image_path is None:
75
+ input_prompt = message["text"]
76
  # GPT Generation
77
  client = OpenAI(api_key=API_KEY)
78
  stream = client.chat.completions.create(
79
  model="gpt-3.5-turbo",
80
  messages=[{"role": "system", "content": "You are a helpful assistant called 'chimera'."},
81
+ {"role": "user", "content": input_prompt}],
82
  stream=True,
83
  )
84
  return stream
85
 
86
  else:
87
+ prompt = f"<|start_header_id|>user<|end_header_id|>\n\n<image>\n{message['text']}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
88
  # Time to instance the llava
89
  image = Image.open(image_path)
90
  inputs = processor(prompt, image, return_tensors='pt').to(0, torch.float16)