Spaces:
Running
on
Zero
Running
on
Zero
Update chatbot.py
Browse files- chatbot.py +8 -8
chatbot.py
CHANGED
@@ -80,6 +80,12 @@ EXAMPLES = [
|
|
80 |
"files": [f"{examples_path}/example_video/spiderman.gif"],
|
81 |
}
|
82 |
],
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
[
|
84 |
{
|
85 |
"text": "Who are they? Tell me about both of them",
|
@@ -99,13 +105,7 @@ EXAMPLES = [
|
|
99 |
],
|
100 |
[
|
101 |
{
|
102 |
-
"text": "
|
103 |
-
"files": [f"{examples_path}/example_images/paper_with_text.png"],
|
104 |
-
}
|
105 |
-
],
|
106 |
-
[
|
107 |
-
{
|
108 |
-
"text": "Create an online ad for this product.",
|
109 |
"files": [f"{examples_path}/example_images/shampoo.jpg"],
|
110 |
}
|
111 |
],
|
@@ -268,7 +268,7 @@ def model_inference( user_prompt, chat_history, web_search):
|
|
268 |
|
269 |
inputs = processor(prompt, image, return_tensors="pt").to("cuda", torch.float16)
|
270 |
streamer = TextIteratorStreamer(processor, skip_prompt=True, **{"skip_special_tokens": True})
|
271 |
-
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=
|
272 |
generated_text = ""
|
273 |
|
274 |
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
|
|
80 |
"files": [f"{examples_path}/example_video/spiderman.gif"],
|
81 |
}
|
82 |
],
|
83 |
+
[
|
84 |
+
{
|
85 |
+
"text": "What's written on this paper",
|
86 |
+
"files": [f"{examples_path}/example_images/paper_with_text.png"],
|
87 |
+
}
|
88 |
+
],
|
89 |
[
|
90 |
{
|
91 |
"text": "Who are they? Tell me about both of them",
|
|
|
105 |
],
|
106 |
[
|
107 |
{
|
108 |
+
"text": "Create an ad script for this product.",
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
"files": [f"{examples_path}/example_images/shampoo.jpg"],
|
110 |
}
|
111 |
],
|
|
|
268 |
|
269 |
inputs = processor(prompt, image, return_tensors="pt").to("cuda", torch.float16)
|
270 |
streamer = TextIteratorStreamer(processor, skip_prompt=True, **{"skip_special_tokens": True})
|
271 |
+
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=2048)
|
272 |
generated_text = ""
|
273 |
|
274 |
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|