NGUYEN, Xuan Phi commited on
Commit
d3c19b3
1 Parent(s): c1660f6
app.py CHANGED
@@ -6,7 +6,10 @@
6
  Demo script to launch Language chat model
7
  """
8
 
9
- import spaces
 
 
 
10
  import os
11
  from gradio.themes import ThemeClass as Theme
12
  import numpy as np
@@ -102,12 +105,15 @@ def launch_demo():
102
  MODEL_INFO.format(model_path=model_path)
103
  )
104
 
105
- demo = CustomTabbedInterface(
106
- interface_list=list(demos.values()),
107
- tab_names=demos_names,
108
- title=f"{MODEL_TITLE}",
109
- description=descriptions,
110
- )
 
 
 
111
 
112
  demo.title = MODEL_NAME
113
 
 
6
  Demo script to launch Language chat model
7
  """
8
 
9
+ try:
10
+ import spaces
11
+ except ModuleNotFoundError:
12
+ print(f'Cannot import hf `spaces` with `import spaces`.')
13
  import os
14
  from gradio.themes import ThemeClass as Theme
15
  import numpy as np
 
105
  MODEL_INFO.format(model_path=model_path)
106
  )
107
 
108
+ if len(demos) == 1:
109
+ demo = demos[DEMOS[0]]
110
+ else:
111
+ demo = CustomTabbedInterface(
112
+ interface_list=list(demos.values()),
113
+ tab_names=demos_names,
114
+ title=f"{MODEL_TITLE}",
115
+ description=descriptions,
116
+ )
117
 
118
  demo.title = MODEL_NAME
119
 
multipurpose_chatbot/configs.py CHANGED
@@ -1,4 +1,3 @@
1
-
2
  import os
3
 
4
  # ! UI Markdown information
@@ -35,19 +34,19 @@ MODEL_DESC = f"""
35
  <span style="color: red">The chatbot may produce false and harmful content!</span>
36
  By using our service, you are required to agree to our <a href="https://huggingface.co/SeaLLMs/SeaLLM-Chat-13b/blob/main/LICENSE" target="_blank" style="color: red">Terms Of Use</a>
37
  </span>
38
-
39
  """.strip()
40
 
 
41
  MODEL_DESC = f"""
42
  <div style='display:flex; gap: 0.25rem; '>
 
43
  <a href='https://github.com/damo-nlp-sg/seallms'><img src='https://img.shields.io/badge/Github-Code-success'></a>
44
  <a href='https://huggingface.co/spaces/SeaLLMs/SeaLLM-7B-v2.5'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a>
45
  <a href='https://huggingface.co/SeaLLMs/SeaLLM-7B-v2.5'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Model-blue'></a>
46
- <a href='https://arxiv.org/pdf/2312.00738.pdf'><img src='https://img.shields.io/badge/Paper-PDF-red'></a>
47
  </div>
48
  <span style="font-size: larger">
49
- <a href="https://huggingface.co/SeaLLMs/SeaLLM-7B-v2.5" target="_blank">SeaLLM-7B-v2.5</a> - a helpful assistant for Southeast Asian Languages 🇬🇧 🇻🇳 🇮🇩 🇹🇭 🇲🇾 🇰🇭 🇱🇦 🇵🇭 🇲🇲.
50
- Explore <a href="https://huggingface.co/spaces/SeaLLMs/SeaLLM-7B" target="_blank">SeaLMMM-7B</a> - our multi-modal version of SeaLLMs.
51
  </span>
52
  <br>
53
  <span>
@@ -68,10 +67,8 @@ By using our service, you are required to agree to our <a href="https://huggingf
68
  not to use our service to generate any harmful, inappropriate or illegal content.
69
  The service collects user dialogue data for testing and improvement under
70
  <a href="https://creativecommons.org/licenses/by/4.0/">(CC-BY)</a> or similar license. So do not enter any personal information!
71
-
72
  """
73
 
74
-
75
  # MODEL_INFO = """
76
  # <h4 style="display: hidden;">Model Name: {model_path}</h4>
77
  # """
@@ -82,7 +79,9 @@ CITE_MARKDOWN = """
82
  If you find our project useful, hope you can star our repo and cite our paper as follows:
83
  ```
84
  @article{damonlpsg2023seallm,
85
- author = {Xuan-Phi Nguyen*, Wenxuan Zhang*, Xin Li*, Mahani Aljunied*, Zhiqiang Hu, Chenhui Shen^, Yew Ken Chia^, Xingxuan Li, Jianyu Wang, Qingyu Tan, Liying Cheng, Guanzheng Chen, Yue Deng, Sen Yang, Chaoqun Liu, Hang Zhang, Lidong Bing},
 
 
86
  title = {SeaLLMs - Large Language Models for Southeast Asia},
87
  year = 2023,
88
  }
@@ -137,7 +136,7 @@ PRESENCE_PENALTY = float(os.environ.get("PRESENCE_PENALTY", "0.0"))
137
 
138
 
139
  # Transformers or vllm
140
- MODEL_PATH = os.environ.get("MODEL_PATH", "mistralai/Mistral-7B-Instruct-v0.2")
141
  MODEL_NAME = os.environ.get("MODEL_NAME", "Cool-Chatbot")
142
  DTYPE = os.environ.get("DTYPE", "bfloat16")
143
  DEVICE = os.environ.get("DEVICE", "cuda")
 
 
1
  import os
2
 
3
  # ! UI Markdown information
 
34
  <span style="color: red">The chatbot may produce false and harmful content!</span>
35
  By using our service, you are required to agree to our <a href="https://huggingface.co/SeaLLMs/SeaLLM-Chat-13b/blob/main/LICENSE" target="_blank" style="color: red">Terms Of Use</a>
36
  </span>
 
37
  """.strip()
38
 
39
+ # Explore <a href="https://huggingface.co/spaces/SeaLLMs/SeaLLM-7B" target="_blank">SeaLMMM-7B</a> - our multi-modal version of SeaLLMs.
40
  MODEL_DESC = f"""
41
  <div style='display:flex; gap: 0.25rem; '>
42
+ <a href='https://damo-nlp-sg.github.io/SeaLLMs/'><img src='https://img.shields.io/badge/Blog-red'></a>
43
  <a href='https://github.com/damo-nlp-sg/seallms'><img src='https://img.shields.io/badge/Github-Code-success'></a>
44
  <a href='https://huggingface.co/spaces/SeaLLMs/SeaLLM-7B-v2.5'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a>
45
  <a href='https://huggingface.co/SeaLLMs/SeaLLM-7B-v2.5'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Model-blue'></a>
46
+ <a href='https://arxiv.org/pdf/2312.00738.pdf'><img src='https://img.shields.io/badge/Paper-red'></a>
47
  </div>
48
  <span style="font-size: larger">
49
+ <a href="https://huggingface.co/SeaLLMs/SeaLLM-7B-v2.5" target="_blank">SeaLLM-7B-v2.5</a> - a assistant for Southeast Asian Languages 🇬🇧 🇻🇳 🇮🇩 🇹🇭 🇲🇾 🇰🇭 🇱🇦 🇵🇭 🇲🇲.
 
50
  </span>
51
  <br>
52
  <span>
 
67
  not to use our service to generate any harmful, inappropriate or illegal content.
68
  The service collects user dialogue data for testing and improvement under
69
  <a href="https://creativecommons.org/licenses/by/4.0/">(CC-BY)</a> or similar license. So do not enter any personal information!
 
70
  """
71
 
 
72
  # MODEL_INFO = """
73
  # <h4 style="display: hidden;">Model Name: {model_path}</h4>
74
  # """
 
79
  If you find our project useful, hope you can star our repo and cite our paper as follows:
80
  ```
81
  @article{damonlpsg2023seallm,
82
+ author = {Xuan-Phi Nguyen*, Wenxuan Zhang*, Xin Li*, Mahani Aljunied*, Weiwen Xu, Hou Pong Chan,
83
+ Zhiqiang Hu, Chenhui Shen^, Yew Ken Chia^, Xingxuan Li, Jianyu Wang, Qingyu Tan, Liying Cheng,
84
+ Guanzheng Chen, Yue Deng, Sen Yang, Chaoqun Liu, Hang Zhang, Lidong Bing},
85
  title = {SeaLLMs - Large Language Models for Southeast Asia},
86
  year = 2023,
87
  }
 
136
 
137
 
138
  # Transformers or vllm
139
+ MODEL_PATH = os.environ.get("MODEL_PATH", "SeaLLMs/SeaLLM-7B-v2")
140
  MODEL_NAME = os.environ.get("MODEL_NAME", "Cool-Chatbot")
141
  DTYPE = os.environ.get("DTYPE", "bfloat16")
142
  DEVICE = os.environ.get("DEVICE", "cuda")
multipurpose_chatbot/demos/__init__.py CHANGED
@@ -6,4 +6,5 @@ from .rag_chat_interface import RagChatInterfaceDemo
6
  from .multimodal_chat_interface import *
7
  from .text_completion import *
8
  from .batch_inference import *
9
- from .multimodal_preference_interface import *
 
 
6
  from .multimodal_chat_interface import *
7
  from .text_completion import *
8
  from .batch_inference import *
9
+ from .multimodal_preference_interface import *
10
+ from .mm_chat_interface import *
multipurpose_chatbot/demos/chat_interface.py CHANGED
@@ -1,5 +1,12 @@
1
- import os
2
- import spaces
 
 
 
 
 
 
 
3
  from gradio.themes import ThemeClass as Theme
4
  import numpy as np
5
  import argparse
@@ -122,7 +129,7 @@ def format_conversation(history, system_prompt=None):
122
  return _str
123
 
124
 
125
- @spaces.GPU
126
  def chat_response_stream_multiturn_engine(
127
  message: str,
128
  history: List[Tuple[str, str]],
 
1
+ try:
2
+ import spaces
3
+ def maybe_spaces_gpu(fn):
4
+ fn = spaces.GPU(fn)
5
+ return fn
6
+ except ModuleNotFoundError:
7
+ print(f'Cannot import hf `spaces` with `import spaces`.')
8
+ def maybe_spaces_gpu(fn):
9
+ return fn
10
  from gradio.themes import ThemeClass as Theme
11
  import numpy as np
12
  import argparse
 
129
  return _str
130
 
131
 
132
+ @maybe_spaces_gpu
133
  def chat_response_stream_multiturn_engine(
134
  message: str,
135
  history: List[Tuple[str, str]],
multipurpose_chatbot/demos/mm_chat_interface.py ADDED
@@ -0,0 +1,965 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from gradio.themes import ThemeClass as Theme
3
+ import numpy as np
4
+ import argparse
5
+ import gradio as gr
6
+ from typing import Any, Iterator
7
+ from typing import Iterator, List, Optional, Tuple
8
+ import filelock
9
+ import glob
10
+ import json
11
+ import time
12
+ from gradio.routes import Request
13
+ from gradio.utils import SyncToAsyncIterator, async_iteration
14
+ from gradio.helpers import special_args
15
+ import anyio
16
+ from typing import AsyncGenerator, Callable, Literal, Union, cast, Generator
17
+
18
+ from gradio_client.documentation import document, set_documentation_group
19
+ from gradio.components import Button, Component
20
+ from gradio.events import Dependency, EventListenerMethod
21
+ from typing import List, Optional, Union, Dict, Tuple
22
+ from tqdm.auto import tqdm
23
+ from huggingface_hub import snapshot_download
24
+ from gradio.components.base import Component
25
+
26
+ from .base_demo import register_demo, get_demo_class, BaseDemo
27
+
28
+
29
+ from .chat_interface import (
30
+ SYSTEM_PROMPT,
31
+ MODEL_NAME,
32
+ MAX_TOKENS,
33
+ TEMPERATURE,
34
+ CHAT_EXAMPLES,
35
+ format_conversation,
36
+ gradio_history_to_openai_conversations,
37
+ gradio_history_to_conversation_prompt,
38
+ DATETIME_FORMAT,
39
+ get_datetime_string,
40
+ chat_response_stream_multiturn_engine,
41
+ ChatInterfaceDemo,
42
+ CustomizedChatInterface,
43
+ )
44
+
45
+ from gradio.events import Events
46
+
47
+ import inspect
48
+ from typing import AsyncGenerator, Callable, Literal, Union, cast
49
+
50
+ import anyio
51
+ from gradio_client import utils as client_utils
52
+ from gradio_client.documentation import document
53
+
54
+ from gradio.blocks import Blocks
55
+ from gradio.components import (
56
+ Button,
57
+ Chatbot,
58
+ Component,
59
+ Markdown,
60
+ State,
61
+ Textbox,
62
+ get_component_instance,
63
+ )
64
+ from gradio.events import Dependency, on
65
+ from gradio.helpers import create_examples as Examples # noqa: N812
66
+ from gradio.helpers import special_args
67
+ from gradio.layouts import Accordion, Group, Row
68
+ from gradio.routes import Request
69
+ from gradio.themes import ThemeClass as Theme
70
+ from gradio.utils import SyncToAsyncIterator, async_iteration
71
+
72
+ from ..globals import MODEL_ENGINE
73
+
74
+ from ..configs import (
75
+ USE_PANEL,
76
+ IMAGE_TOKEN,
77
+ IMAGE_TOKEN_INTERACTIVE,
78
+ CHATBOT_HEIGHT,
79
+ )
80
+
81
+ from .multimodal_chat_interface import (
82
+ undo_history,
83
+ undo_history_until_last_assistant_turn,
84
+ vision_chat_response_stream_multiturn_engine,
85
+ doc_chat_response_stream_multiturn_engine,
86
+ vision_doc_chat_response_stream_multiturn_engine,
87
+ gradio_history_to_conversation_prompt,
88
+ gradio_history_to_openai_conversations,
89
+ gradio_history_to_doc_conversation_prompt,
90
+ gradio_history_to_vision_conversation_prompt_paths,
91
+ gradio_history_to_vision_doc_conversation_prompt_paths,
92
+ )
93
+
94
+ # .message-fit {
95
+ # min-width: 20em;
96
+ # width: fit-content !important;
97
+ # }
98
+
99
+ EXAMPLES_PER_PAGE = int(os.environ.get("EXAMPLES_PER_PAGE", 10))
100
+
101
+ CSS = """
102
+ .message.svelte-1lcyrx4.svelte-1lcyrx4.svelte-1lcyrx4 {
103
+ padding-top: 1em;
104
+ }
105
+ """
106
+
107
+ CSS = """
108
+ .panel-full-width.svelte-1lcyrx4.svelte-1lcyrx4.svelte-1lcyrx4 {
109
+ padding: calc(var(--spacing-xxl) * 1);
110
+ width: 100%
111
+ }
112
+ """
113
+
114
+ DOC_TEMPLATE = """###
115
+ {content}
116
+ ###
117
+
118
+ """
119
+
120
+ DOC_INSTRUCTION = """Answer the following query exclusively based on the information provided in the document above. \
121
+ If the information is not found, please say so instead of making up facts! Remember to answer the question in the same language as the user query!
122
+ """
123
+
124
+
125
+ MultimodalTextbox = None
126
+
127
+ try:
128
+ from gradio import MultimodalTextbox
129
+ except ImportError as e:
130
+ print(f'Cannot import MultiModalTextbox: {MultimodalTextbox}')
131
+
132
+
133
+ class MultiModalTextChatInterface(CustomizedChatInterface):
134
+ def __init__(
135
+ self,
136
+ fn: Callable,
137
+ *,
138
+ chatbot: Chatbot | None = None,
139
+ textbox: Textbox | None = None,
140
+ additional_inputs: str | Component | list[str | Component] | None = None,
141
+ additional_inputs_accordion_name: str | None = None,
142
+ additional_inputs_accordion: str | Accordion | None = None,
143
+ examples: list[str] | None = None,
144
+ cache_examples: bool | None = None,
145
+ title: str | None = None,
146
+ description: str | None = None,
147
+ theme: Theme | str | None = None,
148
+ css: str | None = None,
149
+ js: str | None = None,
150
+ head: str | None = None,
151
+ analytics_enabled: bool | None = None,
152
+ submit_btn: str | None | Button = "Submit",
153
+ stop_btn: str | None | Button = "Stop",
154
+ retry_btn: str | None | Button = "🔄 Retry",
155
+ undo_btn: str | None | Button = "↩️ Undo",
156
+ clear_btn: str | None | Button = "🗑️ Clear",
157
+ autofocus: bool = True,
158
+ concurrency_limit: int | None | Literal["default"] = "default",
159
+ fill_height: bool = True,
160
+ ):
161
+ """
162
+ Parameters:
163
+ fn: The function to wrap the chat interface around. Should accept two parameters: a string input message and list of two-element lists of the form [[user_message, bot_message], ...] representing the chat history, and return a string response. See the Chatbot documentation for more information on the chat history format.
164
+ chatbot: An instance of the gr.Chatbot component to use for the chat interface, if you would like to customize the chatbot properties. If not provided, a default gr.Chatbot component will be created.
165
+ textbox: An instance of the gr.Textbox component to use for the chat interface, if you would like to customize the textbox properties. If not provided, a default gr.Textbox component will be created.
166
+ additional_inputs: An instance or list of instances of gradio components (or their string shortcuts) to use as additional inputs to the chatbot. If components are not already rendered in a surrounding Blocks, then the components will be displayed under the chatbot, in an accordion.
167
+ additional_inputs_accordion_name: Deprecated. Will be removed in a future version of Gradio. Use the `additional_inputs_accordion` parameter instead.
168
+ additional_inputs_accordion: If a string is provided, this is the label of the `gr.Accordion` to use to contain additional inputs. A `gr.Accordion` object can be provided as well to configure other properties of the container holding the additional inputs. Defaults to a `gr.Accordion(label="Additional Inputs", open=False)`. This parameter is only used if `additional_inputs` is provided.
169
+ examples: Sample inputs for the function; if provided, appear below the chatbot and can be clicked to populate the chatbot input.
170
+ cache_examples: If True, caches examples in the server for fast runtime in examples. The default option in HuggingFace Spaces is True. The default option elsewhere is False.
171
+ title: a title for the interface; if provided, appears above chatbot in large font. Also used as the tab title when opened in a browser window.
172
+ description: a description for the interface; if provided, appears above the chatbot and beneath the title in regular font. Accepts Markdown and HTML content.
173
+ theme: Theme to use, loaded from gradio.themes.
174
+ css: Custom css as a string or path to a css file. This css will be included in the demo webpage.
175
+ js: Custom js or path to js file to run when demo is first loaded. This javascript will be included in the demo webpage.
176
+ head: Custom html to insert into the head of the demo webpage. This can be used to add custom meta tags, scripts, stylesheets, etc. to the page.
177
+ analytics_enabled: Whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable if defined, or default to True.
178
+ submit_btn: Text to display on the submit button. If None, no button will be displayed. If a Button object, that button will be used.
179
+ stop_btn: Text to display on the stop button, which replaces the submit_btn when the submit_btn or retry_btn is clicked and response is streaming. Clicking on the stop_btn will halt the chatbot response. If set to None, stop button functionality does not appear in the chatbot. If a Button object, that button will be used as the stop button.
180
+ retry_btn: Text to display on the retry button. If None, no button will be displayed. If a Button object, that button will be used.
181
+ undo_btn: Text to display on the delete last button. If None, no button will be displayed. If a Button object, that button will be used.
182
+ clear_btn: Text to display on the clear button. If None, no button will be displayed. If a Button object, that button will be used.
183
+ autofocus: If True, autofocuses to the textbox when the page loads.
184
+ concurrency_limit: If set, this is the maximum number of chatbot submissions that can be running simultaneously. Can be set to None to mean no limit (any number of chatbot submissions can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `.queue()`, which is 1 by default).
185
+ fill_height: If True, the chat interface will expand to the height of window.
186
+ """
187
+ try:
188
+ super(gr.ChatInterface, self).__init__(
189
+ analytics_enabled=analytics_enabled,
190
+ mode="chat_interface",
191
+ css=css,
192
+ title=title or "Gradio",
193
+ theme=theme,
194
+ js=js,
195
+ head=head,
196
+ fill_height=fill_height,
197
+ )
198
+ except Exception as e:
199
+ # Handling some old gradio version with out fill_height
200
+ super(gr.ChatInterface, self).__init__(
201
+ analytics_enabled=analytics_enabled,
202
+ mode="chat_interface",
203
+ css=css,
204
+ title=title or "Gradio",
205
+ theme=theme,
206
+ js=js,
207
+ head=head,
208
+ # fill_height=fill_height,
209
+ )
210
+ self.concurrency_limit = concurrency_limit
211
+ self.fn = fn
212
+ self.is_async = inspect.iscoroutinefunction(
213
+ self.fn
214
+ ) or inspect.isasyncgenfunction(self.fn)
215
+ self.is_generator = inspect.isgeneratorfunction(
216
+ self.fn
217
+ ) or inspect.isasyncgenfunction(self.fn)
218
+ self.examples = examples
219
+ if self.space_id and cache_examples is None:
220
+ self.cache_examples = True
221
+ else:
222
+ self.cache_examples = cache_examples or False
223
+ self.buttons: list[Button | None] = []
224
+
225
+ if additional_inputs:
226
+ if not isinstance(additional_inputs, list):
227
+ additional_inputs = [additional_inputs]
228
+ self.additional_inputs = [
229
+ get_component_instance(i)
230
+ for i in additional_inputs # type: ignore
231
+ ]
232
+ else:
233
+ self.additional_inputs = []
234
+ if additional_inputs_accordion_name is not None:
235
+ print(
236
+ "The `additional_inputs_accordion_name` parameter is deprecated and will be removed in a future version of Gradio. Use the `additional_inputs_accordion` parameter instead."
237
+ )
238
+ self.additional_inputs_accordion_params = {
239
+ "label": additional_inputs_accordion_name
240
+ }
241
+ if additional_inputs_accordion is None:
242
+ self.additional_inputs_accordion_params = {
243
+ "label": "Additional Inputs",
244
+ "open": False,
245
+ }
246
+ elif isinstance(additional_inputs_accordion, str):
247
+ self.additional_inputs_accordion_params = {
248
+ "label": additional_inputs_accordion
249
+ }
250
+ elif isinstance(additional_inputs_accordion, Accordion):
251
+ self.additional_inputs_accordion_params = (
252
+ additional_inputs_accordion.recover_kwargs(
253
+ additional_inputs_accordion.get_config()
254
+ )
255
+ )
256
+ else:
257
+ raise ValueError(
258
+ f"The `additional_inputs_accordion` parameter must be a string or gr.Accordion, not {type(additional_inputs_accordion)}"
259
+ )
260
+
261
+ with self:
262
+ if title:
263
+ Markdown(
264
+ f"<h1 style='text-align: center; margin-bottom: 1rem'>{self.title}</h1>"
265
+ )
266
+ if description:
267
+ Markdown(description)
268
+
269
+ if chatbot:
270
+ self.chatbot = chatbot.render()
271
+ else:
272
+ self.chatbot = Chatbot(
273
+ label="Chatbot", scale=1, height=200 if fill_height else None
274
+ )
275
+
276
+ with Row():
277
+ for btn in [retry_btn, undo_btn, clear_btn]:
278
+ if btn is not None:
279
+ if isinstance(btn, Button):
280
+ btn.render()
281
+ elif isinstance(btn, str):
282
+ btn = Button(btn, variant="secondary", size="sm")
283
+ else:
284
+ raise ValueError(
285
+ f"All the _btn parameters must be a gr.Button, string, or None, not {type(btn)}"
286
+ )
287
+ self.buttons.append(btn) # type: ignore
288
+
289
+ # =------
290
+ with Row():
291
+ if textbox:
292
+ # textbox.container = False
293
+ # textbox.show_label = False
294
+ textbox_ = textbox.render()
295
+ # assert isinstance(textbox_, Textbox)
296
+ self.textbox = textbox_
297
+ else:
298
+ self.textbox = Textbox(
299
+ container=False,
300
+ show_label=False,
301
+ label="Message",
302
+ placeholder="Type a message...",
303
+ scale=7,
304
+ autofocus=autofocus,
305
+ )
306
+ if stop_btn is not None:
307
+ if isinstance(stop_btn, Button):
308
+ stop_btn.visible = False
309
+ stop_btn.render()
310
+ elif isinstance(stop_btn, str):
311
+ stop_btn = Button(
312
+ stop_btn,
313
+ variant="stop",
314
+ visible=False,
315
+ scale=2,
316
+ min_width=150,
317
+ )
318
+ else:
319
+ raise ValueError(
320
+ f"The stop_btn parameter must be a gr.Button, string, or None, not {type(stop_btn)}"
321
+ )
322
+ self.buttons.extend([stop_btn]) # type: ignore
323
+
324
+ self.num_tokens = Textbox(
325
+ # container=False,
326
+ show_label=False,
327
+ label="# Tokens",
328
+ placeholder="0 tokens",
329
+ scale=1,
330
+ interactive=False,
331
+ # autofocus=autofocus,
332
+ min_width=10
333
+ )
334
+
335
+ self.fake_api_btn = Button("Fake API", visible=False)
336
+ self.fake_response_textbox = Textbox(label="Response", visible=False)
337
+ (
338
+ self.retry_btn,
339
+ self.undo_btn,
340
+ self.clear_btn,
341
+ # self.submit_btn,
342
+ self.stop_btn,
343
+ ) = self.buttons
344
+ self.submit_btn = None
345
+
346
+ if examples:
347
+ if self.is_generator:
348
+ examples_fn = self._examples_stream_fn
349
+ else:
350
+ # examples_fn = self._examples_fn
351
+ raise NotImplementedError()
352
+
353
+ def copy_to_mm_textbox(message, image, filename):
354
+ save_input = {"text": message, "files": []}
355
+ if filename is not None and os.path.exists(filename):
356
+ # save_input['files'].append({"path": file})
357
+ save_input['files'].append(filename)
358
+ if image is not None and os.path.exists(image):
359
+ # save_input['files'].append({"path": file})
360
+ save_input['files'].append(image)
361
+ print(save_input)
362
+ return save_input
363
+
364
+ # self.example_textbox = gr.Textbox(visible=False)
365
+ # self.example_file = gr.File(file_count='single', type='filepath', visible=False)
366
+ # self.example_image = gr.Image(type='filepath', visible=False)
367
+
368
+ # self.examples_handler = Examples(
369
+ # examples=examples,
370
+ # inputs=[self.example_textbox, self.example_image, self.example_file],
371
+ # outputs=self.textbox,
372
+ # # fn=examples_fn,
373
+ # fn=copy_to_mm_textbox,
374
+ # run_on_click=True
375
+ # )
376
+ self.examples_handler = Examples(
377
+ examples=examples,
378
+ # inputs=[self.textbox] + self.additional_inputs,
379
+ inputs=[self.textbox],
380
+ # outputs=self.chatbot,
381
+ # fn=examples_fn,
382
+ examples_per_page=EXAMPLES_PER_PAGE,
383
+ )
384
+
385
+ any_unrendered_inputs = any(
386
+ not inp.is_rendered for inp in self.additional_inputs
387
+ )
388
+ if self.additional_inputs and any_unrendered_inputs:
389
+ with Accordion(**self.additional_inputs_accordion_params): # type: ignore
390
+ for input_component in self.additional_inputs:
391
+ if not input_component.is_rendered:
392
+ input_component.render()
393
+
394
+ # The example caching must happen after the input components have rendered
395
+ if cache_examples:
396
+ client_utils.synchronize_async(self.examples_handler.cache)
397
+
398
+ self.saved_input = State()
399
+ self.chatbot_state = (
400
+ State(self.chatbot.value) if self.chatbot.value else State([])
401
+ )
402
+
403
+ self._setup_events()
404
+ self._setup_api()
405
+
406
+ def _clear_and_save_textbox(self, saved_input: Dict[str, Union[str, list]]) -> Tuple[Dict[str, Union[str, list]], Dict[str, Union[str, list]]]:
407
+ return {"text": "", "files": []}, saved_input
408
+
409
+ def _add_inputs_to_history(self, history: List[List[Union[str, None]]], save_input: Dict[str, Union[str, list]]):
410
+ message = save_input['text']
411
+ files = save_input['files']
412
+ if files is not None and len(files) > 0:
413
+ for f in files:
414
+ fpath = f['path'] if isinstance(f, dict) else f
415
+ history.append([(fpath, ), None])
416
+ if message is not None and message.strip() != "":
417
+ history.append([message, None])
418
+ return history
419
+
420
+ def _display_input(
421
+ self, saved_input: Dict[str, Union[str, list]], history: List[List[Union[str, None]]]
422
+ ) -> Tuple[List[List[Union[str, None]]], List[List[list[Union[str, None]]]]]:
423
+ message = saved_input["text"]
424
+ files = saved_input['files']
425
+ if files is not None and len(files) > 0:
426
+ print(files)
427
+ for f in files:
428
+ fpath = f['path'] if isinstance(f, dict) else f
429
+ history.append([(fpath, ), None])
430
+ if message is not None and message.strip() != "":
431
+ history.append([message, None])
432
+ return history, history
433
+
434
+ def _delete_prev_fn(
435
+ self, history: list[list[str | None]]
436
+ ) -> tuple[list[list[str | None]], str, list[list[str | None]]]:
437
+ try:
438
+ message, _ = history.pop()
439
+ except IndexError:
440
+ message = ""
441
+ # saved_input = [message or ""] + [None] * len(self.multimodal_inputs)
442
+ saved_input = {"text": message, "files": []}
443
+ return history, saved_input, history
444
+
445
+ def _setup_events(self) -> None:
446
+ from gradio.components import State
447
+ has_on = False
448
+ try:
449
+ from gradio.events import Dependency, EventListenerMethod, on
450
+ has_on = True
451
+ except ImportError as ie:
452
+ has_on = False
453
+ submit_fn = self._stream_fn if self.is_generator else self._submit_fn
454
+ if not self.is_generator:
455
+ raise NotImplementedError(f'should use generator')
456
+
457
+ if has_on:
458
+ # new version
459
+ submit_triggers = (
460
+ # [self.textbox.submit, self.submit_btn.click]
461
+ [self.textbox.submit]
462
+ if self.submit_btn
463
+ else [self.textbox.submit]
464
+ )
465
+ submit_event = (
466
+ on(
467
+ submit_triggers,
468
+ self._clear_and_save_textbox,
469
+ [self.textbox],
470
+ [self.textbox] + [self.saved_input],
471
+ api_name=False,
472
+ queue=False,
473
+ )
474
+ .then(
475
+ self._display_input,
476
+ [self.saved_input, self.chatbot_state],
477
+ [self.chatbot, self.chatbot_state],
478
+ api_name=False,
479
+ queue=False,
480
+ )
481
+ .success(
482
+ submit_fn,
483
+ [self.chatbot_state] + self.additional_inputs,
484
+ [self.chatbot, self.chatbot_state, self.num_tokens],
485
+ api_name=False,
486
+ )
487
+ )
488
+ self._setup_stop_events(submit_triggers, submit_event)
489
+ else:
490
+ raise ValueError(f'Better install new gradio version than 3.44.0')
491
+
492
+ if self.retry_btn:
493
+ retry_event = (
494
+ self.retry_btn.click(
495
+ self._delete_prev_fn,
496
+ [self.chatbot_state],
497
+ [self.chatbot, self.saved_input, self.chatbot_state],
498
+ api_name=False,
499
+ queue=False,
500
+ )
501
+ .then(
502
+ self._display_input,
503
+ [self.saved_input, self.chatbot_state],
504
+ [self.chatbot, self.chatbot_state],
505
+ api_name=False,
506
+ queue=False,
507
+ )
508
+ .success(
509
+ submit_fn,
510
+ [self.chatbot_state] + self.additional_inputs,
511
+ [self.chatbot, self.chatbot_state, self.num_tokens],
512
+ api_name=False,
513
+ )
514
+ )
515
+ self._setup_stop_events([self.retry_btn.click], retry_event)
516
+
517
+ if self.undo_btn:
518
+ self.undo_btn.click(
519
+ # self._delete_prev_fn,
520
+ # [self.chatbot_state],
521
+ # [self.chatbot, self.saved_input, self.chatbot_state],
522
+ undo_history_until_last_assistant_turn,
523
+ [self.chatbot_state],
524
+ [self.chatbot, self.chatbot_state],
525
+ api_name=False,
526
+ queue=False,
527
+ )
528
+ # .then(
529
+ # lambda x: x,
530
+ # [self.saved_input],
531
+ # [self.textbox],
532
+ # api_name=False,
533
+ # queue=False,
534
+ # )
535
+
536
+ def _setup_stop_events(
537
+ self, event_triggers: list[EventListenerMethod], event_to_cancel: Dependency
538
+ ) -> None:
539
+ from gradio.components import State
540
+ event_triggers = event_triggers if isinstance(event_triggers, (list, tuple)) else [event_triggers]
541
+ if self.stop_btn and self.is_generator:
542
+ if self.submit_btn:
543
+ for event_trigger in event_triggers:
544
+ event_trigger(
545
+ lambda: (
546
+ Button(visible=False),
547
+ Button(visible=True),
548
+ ),
549
+ None,
550
+ [self.submit_btn, self.stop_btn],
551
+ api_name=False,
552
+ queue=False,
553
+ )
554
+ event_to_cancel.then(
555
+ lambda: (Button(visible=True), Button(visible=False)),
556
+ None,
557
+ [self.submit_btn, self.stop_btn],
558
+ api_name=False,
559
+ queue=False,
560
+ )
561
+ else:
562
+ for event_trigger in event_triggers:
563
+ event_trigger(
564
+ lambda: Button(visible=True),
565
+ None,
566
+ [self.stop_btn],
567
+ api_name=False,
568
+ queue=False,
569
+ )
570
+ event_to_cancel.then(
571
+ lambda: Button(visible=False),
572
+ None,
573
+ [self.stop_btn],
574
+ api_name=False,
575
+ queue=False,
576
+ )
577
+ self.stop_btn.click(
578
+ None,
579
+ None,
580
+ None,
581
+ cancels=event_to_cancel,
582
+ api_name=False,
583
+ )
584
+ else:
585
+ if self.submit_btn:
586
+ for event_trigger in event_triggers:
587
+ event_trigger(
588
+ lambda: Button(interactive=False),
589
+ None,
590
+ [self.submit_btn],
591
+ api_name=False,
592
+ queue=False,
593
+ )
594
+ event_to_cancel.then(
595
+ lambda: Button(interactive=True),
596
+ None,
597
+ [self.submit_btn],
598
+ api_name=False,
599
+ queue=False,
600
+ )
601
+ # upon clear, cancel the submit event as well
602
+ if self.clear_btn:
603
+ if self.submit_btn:
604
+ self.clear_btn.click(
605
+ lambda: ([], [], None, Button(interactive=True)),
606
+ None,
607
+ [self.chatbot, self.chatbot_state, self.saved_input, self.submit_btn],
608
+ queue=False,
609
+ api_name=False,
610
+ cancels=event_to_cancel,
611
+ )
612
+ else:
613
+ self.clear_btn.click(
614
+ lambda: ([], [], None),
615
+ None,
616
+ [self.chatbot, self.chatbot_state, self.saved_input],
617
+ queue=False,
618
+ api_name=False,
619
+ cancels=event_to_cancel,
620
+ )
621
+
622
+ async def _stream_fn(
623
+ self,
624
+ # message: str,
625
+ history_with_input,
626
+ request: Request,
627
+ *args,
628
+ ) -> AsyncGenerator:
629
+ history = history_with_input[:-1]
630
+ message = history_with_input[-1][0]
631
+ inputs, _, _ = special_args(
632
+ self.fn, inputs=[history_with_input, *args], request=request
633
+ )
634
+
635
+ if self.is_async:
636
+ generator = self.fn(*inputs)
637
+ else:
638
+ generator = await anyio.to_thread.run_sync(
639
+ self.fn, *inputs, limiter=self.limiter
640
+ )
641
+ generator = SyncToAsyncIterator(generator, self.limiter)
642
+
643
+ # ! In case of error, yield the previous history & undo any generation before raising error
644
+ try:
645
+ first_response_pack = await async_iteration(generator)
646
+ if isinstance(first_response_pack, (tuple, list)):
647
+ first_response, num_tokens = first_response_pack
648
+ else:
649
+ first_response, num_tokens = first_response_pack, -1
650
+ update = history + [[message, first_response]]
651
+ # print(f"===\n{update}")
652
+ yield update, update, f"{num_tokens} toks"
653
+ except StopIteration:
654
+ update = history + [[message, None]]
655
+ yield update, update, "NaN toks"
656
+ except Exception as e:
657
+ yield history, history, "NaN toks"
658
+ raise e
659
+
660
+ try:
661
+ async for response_pack in generator:
662
+ if isinstance(response_pack, (tuple, list)):
663
+ response, num_tokens = response_pack
664
+ else:
665
+ response, num_tokens = response_pack, "NaN toks"
666
+ update = history + [[message, response]]
667
+ # print(f"------\n{update}")
668
+ yield update, update, f"{num_tokens} toks"
669
+ except Exception as e:
670
+ yield history, history, "NaN toks"
671
+ raise e
672
+
673
+ async def _examples_stream_fn(
674
+ self,
675
+ # message: str,
676
+ *args,
677
+ ) -> AsyncGenerator:
678
+ raise ValueError(f'invalid')
679
+ history = []
680
+ # input_len = 1 + len(self.multimodal_inputs)
681
+ # input_len = 2
682
+ # saved_input = args[:input_len]
683
+ # saved_input = args[0]
684
+ # message = saved_input['text']
685
+ # files = saved_input['files']
686
+ message = args[0]
687
+ fname = args[1]
688
+ saved_input = {
689
+ "text": message,
690
+ "files": []
691
+ }
692
+ if fname is not None and os.path.exists(fname):
693
+ # saved_input['files'].append({"path": fname})
694
+ saved_input['files'].append(fname)
695
+
696
+ additional_inputs = args[2:]
697
+ history = self._add_inputs_to_history(history, saved_input)
698
+ inputs, _, _ = special_args(self.fn, inputs=[history, *additional_inputs], request=None)
699
+
700
+ if self.is_async:
701
+ generator = self.fn(*inputs)
702
+ else:
703
+ generator = await anyio.to_thread.run_sync(
704
+ self.fn, *inputs, limiter=self.limiter
705
+ )
706
+ generator = SyncToAsyncIterator(generator, self.limiter)
707
+ # async for response in generator:
708
+ # yield [[message, response]]
709
+
710
+ try:
711
+ async for response_pack in generator:
712
+ if isinstance(response_pack, (tuple, list)):
713
+ response, num_tokens = response_pack
714
+ else:
715
+ response, num_tokens = response_pack, "NaN toks"
716
+ update = history + [[message, response]]
717
+ yield update, update, f"{num_tokens} toks"
718
+ except Exception as e:
719
+ yield history, history, "NaN toks"
720
+ raise e
721
+
722
+
723
+
724
+ @register_demo
725
+ class VisionMMChatInterfaceDemo(ChatInterfaceDemo):
726
+ """
727
+ Accept vision image
728
+ """
729
+
730
+ @property
731
+ def tab_name(self):
732
+ return "Vision Chat"
733
+
734
+ @property
735
+ def examples(self):
736
+ from pathlib import Path
737
+ from gradio.data_classes import FileData, GradioModel
738
+ # return [
739
+ # ["What's strange about this image?", "assets/dog_monalisa.jpeg", None],
740
+ # ["Explain why the sky is blue.", None,],
741
+ # ]
742
+ return [
743
+ # [{"text": "Summarize the document", "files": [{
744
+ # "path": "assets/attention_short.pdf", "orig_name": "attention_short", "mime_type": "application/pdf",
745
+ # "size": Path("assets/attention_short.pdf").stat().st_size
746
+ # }
747
+ # ]}],
748
+ # [{"text": "Summarize the document", "files": ["assets/attention_short.pdf"]}],
749
+ # [{"text": "Summarize the document", "files": [
750
+ # FileData(
751
+ # path="assets/attention_short.pdf",
752
+ # mime_type="application/pdf",
753
+ # orig_name="attention_short",
754
+ # size=Path("assets/attention_short.pdf").stat().st_size,
755
+ # url="attention_short.pdf",
756
+ # )
757
+ # ]}],
758
+ # [{"text": "What's strange about this image?", "files": ["assets/dog_monalisa.jpeg"]},],
759
+ # [{"text": "Explain why the sky is blue.", "files": []},],
760
+ [{"text": "Mô tả chi tiết bức ảnh.", "files": ["assets/imgs/athlete.jpeg", ]} ],
761
+ [{"text": "Mô tả chi tiết bức ảnh.", "files": ["assets/imgs/chart_algo.png", ]} ],
762
+ [{"text": "Explain the image.", "files": ["assets/imgs/chart_soap_sense_cycle.png", ]} ],
763
+ [{"text": "Provide a detailed description of the poster.", "files": ["assets/imgs/covid.jpeg", ]} ],
764
+ [{"text": "Where is this place exactly?", "files": ["assets/imgs/danang.jpeg", ]} ],
765
+ [{"text": "What's strange about this image?", "files": ["assets/dog_monalisa.jpeg",]} ],
766
+ [{"text": "Đây là ở đâu?", "files": ["assets/imgs/great_wall.png", ]} ],
767
+ [{"text": "Giới thiệu về nơi này.", "files": ["assets/imgs/hochiminh_city.jpeg", ]} ],
768
+ [{"text": "Đây là ở đâu?", "files": ["assets/imgs/hochiminh_mausoleum.jpeg", ]} ],
769
+ [{"text": "Suy nghĩ từng bước một để tìm x.", "files": ["assets/imgs/find_x_triangle.jpeg", ]} ],
770
+ [{"text": "Provide a detailed description of the poster.", "files": ["assets/imgs/home_injury.jpeg", ]} ],
771
+ [{"text": "Đây là hành tinh gì?", "files": ["assets/imgs/jupyter.jpeg", ]} ],
772
+ [{"text": "Miêu tả bức ảnh trên.", "files": ["assets/imgs/leaf.png", ]} ],
773
+ [{"text": "Đây là đâu?", "files": ["assets/imgs/mbs.png", ]} ],
774
+ [{"text": "Introduce this figure.", "files": ["assets/imgs/merlion_2.jpeg", ]} ],
775
+ [{"text": "Explain the figure.", "files": ["assets/imgs/photosynthesis.png", ]} ],
776
+ [{"text": "List out all the details of the image.", "files": ["assets/imgs/sewing_tools.png", ]} ],
777
+ [{"text": "What happened in this photo.", "files": ["assets/imgs/tiananmen_tankman.jpeg", ]} ],
778
+ [{"text": "Có gì ngoài 2 con mèo?", "files": ["assets/imgs/two_cats.jpeg", ]} ],
779
+ [{"text": "Biển báo nói gì?", "files": ["assets/imgs/cau_oo.jpeg", ]} ],
780
+ [{"text": "Đây là món gì và hướng dẫn cách làm.", "files": ["assets/imgs/banhmy.jpeg", ]} ],
781
+ [{"text": "Hãy hướng dẫn nấu món này.", "files": ["assets/imgs/cach-nau-pho-bo-nam-dinh.jpeg", ]} ],
782
+ [{"text": "Bức tường nói gì?", "files": ["assets/imgs/camdaibay.jpeg", ]} ],
783
+ [{"text": "Công thức này là gì", "files": ["assets/imgs/eistein_field_equation.png", ]} ],
784
+ [{"text": "What is this formula about?", "files": ["assets/imgs/eistein_field_equation.png", ]} ],
785
+ [{"text": "Hãy tìm góc còn lại.", "files": ["assets/imgs/triangle_find_angle.png", ]} ],
786
+ [{"text": "Đây là đâu?", "files": ["assets/imgs/seattle_space_needle.jpeg", ]} ],
787
+ [{"text": "Describe the image", "files": ["assets/imgs/seal_logo.png", ]} ],
788
+ # [{"text": "Explain why the sky is blue.", None,} ],
789
+ [{"text": "Hãy giải thích thuyết tương đối rộng.", "files": []},],
790
+ [{"text": "Hãy giải thích vấn đề P vs NP.", "files": []},],
791
+ [{"text": "Explain general relativity.", "files": []},],
792
+ [{"text": 'Vừa gà vừa chó, bó lại cho tròn, 36 con và 100 chân chẵn. Hỏi có bao nhiêu gà và chó?', "files": []},],
793
+ [{"text": 'Hôm nay tôi có 5 quả cam. Hôm qua tôi ăn 2 quả. Vậy hôm nay tôi có mấy quả cam?', "files": []},],
794
+ [{"text": '5 điều bác Hồ dạy là gì?', "files": []},],
795
+ [{"text": "Tolong bantu saya menulis email ke lembaga pemerintah untuk mencari dukungan finansial untuk penelitian AI.", "files": []},],
796
+ [{"text": "ຂໍແຈ້ງ 5 ສະຖານທີ່ທ່ອງທ່ຽວໃນນະຄອນຫຼວງວຽງຈັນ", "files": []},],
797
+ [{"text": 'ငွေကြေးအခက်အခဲကြောင့် ပညာသင်ဆုတောင်းဖို့ တက္ကသိုလ်ကို စာတစ်စောင်ရေးပြီး ကူညီပေးပါ။', "files": []},],
798
+ [{"text": "Sally has 3 brothers, each brother has 2 sisters. How many sister sally has?", "files": []},],
799
+ [{"text": "There are 3 killers in a room. Someone enters the room and kills 1 of them. Assuming no one leaves the room. How many killers are left in the room?", "files": []},],
800
+ [{"text": "Assume the laws of physics on Earth. A small marble is put into a normal cup and the cup is placed upside down on a table. Someone then takes the cup and puts it inside the microwave. Where is the ball now? Explain your reasoning step by step.", "files": []},],
801
+ [{"text": "Why my parents did not invited me to their weddings?", "files": []},],
802
+ ]
803
+
804
+ @property
805
+ def mm_textbox_placeholder(self):
806
+ return "Type message or upload an image"
807
+
808
+ @property
809
+ def mm_accept_file_types(self):
810
+ return ["image"]
811
+
812
+ @property
813
+ def gradio_fn(self):
814
+ return vision_chat_response_stream_multiturn_engine
815
+
816
+ def create_demo(
817
+ self,
818
+ title: str | None = None,
819
+ description: str | None = None,
820
+ additional_inputs: List[Any] | None = None,
821
+ **kwargs
822
+ ) -> gr.Blocks:
823
+ system_prompt = kwargs.get("system_prompt", SYSTEM_PROMPT)
824
+ max_tokens = kwargs.get("max_tokens", MAX_TOKENS)
825
+ temperature = kwargs.get("temperature", TEMPERATURE)
826
+ model_name = kwargs.get("model_name", MODEL_NAME)
827
+ description = description or """Upload an image to ask question about it."""
828
+
829
+ assert MultimodalTextbox is not None
830
+
831
+ additional_inputs = additional_inputs or [
832
+ gr.Number(value=temperature, label='Temperature', min_width=20),
833
+ gr.Number(value=max_tokens, label='Max-tokens', min_width=20),
834
+ gr.Textbox(value=system_prompt, label='System prompt', lines=1),
835
+ gr.Textbox(value=IMAGE_TOKEN, label='Visual token', lines=1, interactive=IMAGE_TOKEN_INTERACTIVE, min_width=20),
836
+ ]
837
+
838
+
839
+ demo_chat = MultiModalTextChatInterface(
840
+ self.gradio_fn,
841
+ chatbot=gr.Chatbot(
842
+ label=model_name,
843
+ bubble_full_width=False,
844
+ latex_delimiters=[
845
+ { "left": "$", "right": "$", "display": False},
846
+ { "left": "$$", "right": "$$", "display": True},
847
+ ],
848
+ show_copy_button=True,
849
+ layout="panel" if USE_PANEL else "bubble",
850
+ height=CHATBOT_HEIGHT,
851
+ ),
852
+ # textbox=gr.Textbox(placeholder='Type message', lines=4, max_lines=128, min_width=200),
853
+ textbox=MultimodalTextbox(
854
+ placeholder=self.mm_textbox_placeholder,
855
+ interactive=True,
856
+ scale=9,
857
+ show_label=False,
858
+ # file_types=["image", '.pdf', '.docx', '.txt'],
859
+ file_types=self.mm_accept_file_types,
860
+ ),
861
+ title=title,
862
+ description=description,
863
+ additional_inputs=additional_inputs,
864
+ additional_inputs_accordion=gr.Accordion("Additional Inputs", open=False),
865
+ examples=self.examples,
866
+ cache_examples=False,
867
+ css=CSS,
868
+ fill_height=True,
869
+ )
870
+
871
+ return demo_chat
872
+
873
+
874
+
875
+ @register_demo
876
+ class DocMMChatInterfaceDemo(VisionMMChatInterfaceDemo):
877
+ """
878
+ Accept vision image
879
+ """
880
+
881
+ @property
882
+ def tab_name(self):
883
+ return "Doc Chat"
884
+
885
+ @property
886
+ def mm_textbox_placeholder(self):
887
+ return "Type message or upload a doc file (pdf, docx, txt)"
888
+
889
+ @property
890
+ def mm_accept_file_types(self):
891
+ return ['.pdf', '.docx', '.txt']
892
+
893
+ @property
894
+ def examples(self):
895
+ from pathlib import Path
896
+ from gradio.data_classes import FileData, GradioModel
897
+ return [
898
+ [{"text": "Hãy giải thích thuyết tương đối rộng.", "files": []},],
899
+ [{"text": "Hãy giải thích vấn đề P vs NP.", "files": []},],
900
+ [{"text": "Explain general relativity in details.", "files": []},],
901
+ # [{"text": 'Vừa gà v���a chó, bó lại cho tròn, 36 con và 100 chân chẵn. Hỏi có bao nhiêu gà và chó?', "files": []},],
902
+ # [{"text": 'Hôm nay tôi có 5 quả cam. Hôm qua tôi ăn 2 quả. Vậy hôm nay tôi có mấy quả cam?', "files": []},],
903
+ # [{"text": '5 điều bác Hồ dạy là gì?', "files": []},],
904
+ [{"text": "Tolong bantu saya menulis email ke lembaga pemerintah untuk mencari dukungan finansial untuk penelitian AI.", "files": []},],
905
+ [{"text": "ຂໍແຈ້ງ 5 ສະຖານທີ່ທ່ອງທ່ຽວໃນນະຄອນຫຼວງວຽງຈັນ", "files": []},],
906
+ # [{"text": 'ငွေကြေးအခက်အခဲကြောင့် ပညာသင်ဆုတောင်းဖို့ တက္ကသိုလ်ကို စာတစ်စောင်ရေးပြီး ကူညီပေးပါ။', "files": []},],
907
+ # [{"text": "Sally has 3 brothers, each brother has 2 sisters. How many sister sally has?", "files": []},],
908
+ # [{"text": "There are 3 killers in a room. Someone enters the room and kills 1 of them. Assuming no one leaves the room. How many killers are left in the room?", "files": []},],
909
+ # [{"text": "Assume the laws of physics on Earth. A small marble is put into a normal cup and the cup is placed upside down on a table. Someone then takes the cup and puts it inside the microwave. Where is the ball now? Explain your reasoning step by step.", "files": []},],
910
+ # [{"text": "Why my parents did not invited me to their weddings?", "files": []},],
911
+ ]
912
+
913
+ def create_demo(
914
+ self,
915
+ title: str | None = None,
916
+ description: str | None = None,
917
+ additional_inputs: List[Any] | None = None,
918
+ **kwargs
919
+ ) -> gr.Blocks:
920
+ system_prompt = kwargs.get("system_prompt", SYSTEM_PROMPT)
921
+ max_tokens = kwargs.get("max_tokens", MAX_TOKENS)
922
+ temperature = kwargs.get("temperature", TEMPERATURE)
923
+ additional_inputs = additional_inputs or [
924
+ gr.Number(value=temperature, label='Temperature', min_width=20),
925
+ gr.Number(value=max_tokens, label='Max-tokens', min_width=20),
926
+ gr.Textbox(value=system_prompt, label='System prompt', lines=1),
927
+ ]
928
+ return super().create_demo(title, description, additional_inputs, **kwargs)
929
+
930
+ @property
931
+ def gradio_fn(self):
932
+ # return vision_chat_response_stream_multiturn_engine
933
+ return doc_chat_response_stream_multiturn_engine
934
+
935
+
936
+
937
+
938
+
939
+ @register_demo
940
+ class VisionDocMMChatInterfaceDemo(VisionMMChatInterfaceDemo):
941
+ """
942
+ Accept vision image
943
+ """
944
+
945
+ @property
946
+ def tab_name(self):
947
+ return "Vision Doc Chat"
948
+
949
+ @property
950
+ def mm_textbox_placeholder(self):
951
+ return "Type message or upload an image or doc file (pdf, docx, txt)"
952
+
953
+ @property
954
+ def mm_accept_file_types(self):
955
+ return ['image', '.pdf', '.docx', '.txt']
956
+
957
+ @property
958
+ def gradio_fn(self):
959
+ # return vision_chat_response_stream_multiturn_engine
960
+ return vision_doc_chat_response_stream_multiturn_engine
961
+
962
+
963
+
964
+
965
+
multipurpose_chatbot/demos/multimodal_chat_interface.py CHANGED
@@ -1,4 +1,14 @@
1
- import spaces
 
 
 
 
 
 
 
 
 
 
2
  import os
3
  from gradio.themes import ThemeClass as Theme
4
  import numpy as np
@@ -904,7 +914,7 @@ def gradio_history_to_vision_doc_conversation_prompt_paths(
904
  return full_prompt, image_paths, conversations
905
 
906
 
907
- @spaces.GPU
908
  def vision_chat_response_stream_multiturn_engine(
909
  history: List[Tuple[str, str]],
910
  temperature: float,
@@ -957,7 +967,7 @@ def vision_chat_response_stream_multiturn_engine(
957
  yield response, num_tokens
958
 
959
 
960
- @spaces.GPU
961
  def doc_chat_response_stream_multiturn_engine(
962
  history: List[Tuple[str, str]],
963
  temperature: float,
@@ -1008,7 +1018,7 @@ def doc_chat_response_stream_multiturn_engine(
1008
 
1009
 
1010
 
1011
- @spaces.GPU
1012
  def vision_doc_chat_response_stream_multiturn_engine(
1013
  history: List[Tuple[str, str]],
1014
  temperature: float,
 
1
+ try:
2
+ import spaces
3
+ def maybe_spaces_gpu(fn):
4
+ fn = spaces.GPU(fn)
5
+ return fn
6
+ except ModuleNotFoundError:
7
+ print(f'Cannot import hf `spaces` with `import spaces`.')
8
+ def maybe_spaces_gpu(fn):
9
+ return fn
10
+
11
+
12
  import os
13
  from gradio.themes import ThemeClass as Theme
14
  import numpy as np
 
914
  return full_prompt, image_paths, conversations
915
 
916
 
917
+ @maybe_spaces_gpu
918
  def vision_chat_response_stream_multiturn_engine(
919
  history: List[Tuple[str, str]],
920
  temperature: float,
 
967
  yield response, num_tokens
968
 
969
 
970
+ @maybe_spaces_gpu
971
  def doc_chat_response_stream_multiturn_engine(
972
  history: List[Tuple[str, str]],
973
  temperature: float,
 
1018
 
1019
 
1020
 
1021
+ @maybe_spaces_gpu
1022
  def vision_doc_chat_response_stream_multiturn_engine(
1023
  history: List[Tuple[str, str]],
1024
  temperature: float,
multipurpose_chatbot/demos/text_completion.py CHANGED
@@ -1,4 +1,13 @@
1
- import spaces
 
 
 
 
 
 
 
 
 
2
  import os
3
  from gradio.themes import ThemeClass as Theme
4
  import numpy as np
@@ -62,7 +71,7 @@ from ..configs import (
62
 
63
  from ..globals import MODEL_ENGINE
64
 
65
- @spaces.GPU
66
  def generate_text_completion_stream_engine(
67
  message: str,
68
  temperature: float,
 
1
+ try:
2
+ import spaces
3
+ def maybe_spaces_gpu(fn):
4
+ fn = spaces.GPU(fn)
5
+ return fn
6
+ except ModuleNotFoundError:
7
+ print(f'Cannot import hf `spaces` with `import spaces`.')
8
+ def maybe_spaces_gpu(fn):
9
+ return fn
10
+
11
  import os
12
  from gradio.themes import ThemeClass as Theme
13
  import numpy as np
 
71
 
72
  from ..globals import MODEL_ENGINE
73
 
74
+ @maybe_spaces_gpu
75
  def generate_text_completion_stream_engine(
76
  message: str,
77
  temperature: float,
multipurpose_chatbot/engines/sealmmm_engine.py CHANGED
@@ -1,7 +1,10 @@
1
  # from transformers_stream_generator import init_stream_support
2
  # init_stream_support()
3
 
4
- import spaces
 
 
 
5
  import os
6
  import numpy as np
7
  import argparse
 
1
  # from transformers_stream_generator import init_stream_support
2
  # init_stream_support()
3
 
4
+ try:
5
+ import spaces
6
+ except ModuleNotFoundError:
7
+ print(f'Cannot import hf `spaces` with `import spaces`.')
8
  import os
9
  import numpy as np
10
  import argparse
multipurpose_chatbot/engines/transformers_engine.py CHANGED
@@ -1,5 +1,8 @@
1
 
2
- import spaces
 
 
 
3
  import os
4
  import numpy as np
5
  import argparse
 
1
 
2
+ try:
3
+ import spaces
4
+ except ModuleNotFoundError:
5
+ print(f'Cannot import hf `spaces` with `import spaces`.')
6
  import os
7
  import numpy as np
8
  import argparse