Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
9f4e146
1
Parent(s):
9d74996
Add predict function for image processing and refactor http_bot for cleaner code
Browse files
app.py
CHANGED
@@ -181,6 +181,23 @@ model = AutoModel.from_pretrained(
|
|
181 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, use_fast=False)
|
182 |
|
183 |
@spaces.GPU
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
def http_bot(
|
185 |
state,
|
186 |
temperature,
|
@@ -231,23 +248,9 @@ def http_bot(
|
|
231 |
# Stream output
|
232 |
# response = requests.post(worker_addr, json=pload, headers=headers, stream=True, timeout=300)
|
233 |
print(f"all_image_paths: {all_image_paths}")
|
234 |
-
|
235 |
-
pixel_values = load_image(all_image_paths[0], max_num=6).to(torch.bfloat16).cuda()
|
236 |
-
print(f"pixel_values: {pixel_values}")
|
237 |
-
generation_config = dict(max_new_tokens= 700, do_sample=False, num_beams = 3, repetition_penalty=2.5)
|
238 |
message = state.get_user_message(source=state.USER)
|
239 |
-
print(f"######################")
|
240 |
print(f"message: {message}")
|
241 |
-
|
242 |
-
question = '<image>\n'+message
|
243 |
-
else:
|
244 |
-
question = message
|
245 |
-
print("Model: ", model)
|
246 |
-
print("Tokenizer: ", tokenizer)
|
247 |
-
print("Question: ", question)
|
248 |
-
response, conv_history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True)
|
249 |
-
print(f"AI response: {response}")
|
250 |
-
|
251 |
|
252 |
# streamer = TextIteratorStreamer(
|
253 |
# tokenizer, skip_prompt=True, skip_special_tokens=True
|
|
|
181 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, use_fast=False)
|
182 |
|
183 |
@spaces.GPU
|
184 |
+
def predict(message, image_path):
|
185 |
+
pixel_values = load_image(image_path, max_num=6).to(torch.bfloat16).cuda()
|
186 |
+
print(f"pixel_values: {pixel_values}")
|
187 |
+
generation_config = dict(max_new_tokens= 700, do_sample=False, num_beams = 3, repetition_penalty=2.5)
|
188 |
+
print(f"######################")
|
189 |
+
print(f"message: {message}")
|
190 |
+
if pixel_values is not None:
|
191 |
+
question = '<image>\n'+message
|
192 |
+
else:
|
193 |
+
question = message
|
194 |
+
print("Model: ", model)
|
195 |
+
print("Tokenizer: ", tokenizer)
|
196 |
+
print("Question: ", question)
|
197 |
+
response, conv_history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True)
|
198 |
+
print(f"AI response: {response}")
|
199 |
+
return response, conv_history
|
200 |
+
|
201 |
def http_bot(
|
202 |
state,
|
203 |
temperature,
|
|
|
248 |
# Stream output
|
249 |
# response = requests.post(worker_addr, json=pload, headers=headers, stream=True, timeout=300)
|
250 |
print(f"all_image_paths: {all_image_paths}")
|
|
|
|
|
|
|
|
|
251 |
message = state.get_user_message(source=state.USER)
|
|
|
252 |
print(f"message: {message}")
|
253 |
+
response, conv_history = predict(message, all_image_paths[0])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
254 |
|
255 |
# streamer = TextIteratorStreamer(
|
256 |
# tokenizer, skip_prompt=True, skip_special_tokens=True
|