File size: 12,377 Bytes
f661a7e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
import random
import gradio as gr
from huggingface_hub import InferenceClient
import spaces
import transformers
from fastchat.conversation import get_conv_template
import subprocess
import os
from utils import toolgen_request
import ast
import modelscope_studio.components.antd as antd
import modelscope_studio.components.base as ms

"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")


# def respond(
#     message,
#     history: list[tuple[str, str]],
#     system_message,
#     max_tokens,
#     temperature,
#     top_p,
# ):
#     messages = [{"role": "system", "content": system_message}]

#     for val in history:
#         if val[0]:
#             messages.append({"role": "user", "content": val[0]})
#         if val[1]:
#             messages.append({"role": "assistant", "content": val[1]})

#     messages.append({"role": "user", "content": message})

#     response = ""

#     for message in client.chat_completion(
#         messages,
#         max_tokens=max_tokens,
#         stream=True,
#         temperature=temperature,
#         top_p=top_p,
#     ):
#         token = message.choices[0].delta.content

#         response += token
#         yield response


# rapidapi_wrapper = RapidAPIWrapper(
#     toolbench_key="2FrSLuUC37WJypPTsv0Rqw8kE5nD1YzMaxtbich4KIde9NOHjG",
#     rapidapi_key="",
# )
# toolgen = ToolGen(
#     "reasonwang/ToolGen-WoSystem-Llama-3-8B-Instruct",
#     indexing="Atomic",
#     tools=rapidapi_wrapper,
# )
# subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)

# @spaces.GPU(duration=15)
# def respond(
#     message,
#     history: list[tuple[str, str]],
#     system_message,
#     max_tokens,
#     temperature,
#     top_p,
# ):
#     messages = [{"role": "system", "content": system_message}]
#     for val in history:
#         if val[0]:
#             messages.append({"role": "user", "content": val[0]})
#         if val[1]:
#             messages.append({"role": "assistant", "content": val[1]})
#     messages.append({"role": "user", "content": message})
#     conv = get_conv_template("llama-3")
#     for message in messages:
#         conv.append_message(conv.roles[0] if message['role'] == 'user' else conv.roles[1], message['content'])
#     conv.append_message(conv.roles[1], None)
#     prompt = conv.get_prompt()
#     # print(prompt)
    
#     inputs = tokenizer(prompt, return_tensors='pt')
#     input_length = inputs["input_ids"].shape[1]

#     for k, v in inputs.items():
#         inputs[k] = v.to("cuda")
#     outputs = model.generate(
#         **inputs, 
#         max_new_tokens=max_tokens,
#         temperature=temperature,
#         top_p=top_p,
#     )
#     output_ids = outputs[0][input_length:-1]
#     output_length = output_ids.shape[0]
#     generated_text = tokenizer.decode(output_ids)
#     yield generated_text

# @spaces.GPU(duration=30)
# def toolgen_respond(
#     message,
#     history: list[tuple[str, str]],
#     system_message,
#     max_tokens,
#     temperature,
#     top_p,
# ):
#     messages = [
#         {"role": "system", "content": system_message},
#         {"role": "user", "content": message},
#     ]
#     toolgen.restart()
#     contents = ""
#     for content in toolgen.start(
#         single_chain_max_step=10,
#         start_messages=messages,
#         streaming=True,
#     ):
#         contents += content
#         yield contents

# toolgen_respond.zerogpu = True

"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
def test_click(query):
    print(query)

    return f'''<p style="color:red;">I am red: {query} is a query </p>''', f'''<p style="color:blue;">I am blue: {query} is a query </p>'''

def resolve_image(filename):
    return os.path.join(os.path.dirname(__file__), filename)

# @gr.render(inputs=[query])
# def show_split(text):
#     if len(text) == 0:
#         gr.Markdown("## No Input Provided")
#     else:
#         for letter in text:
#             with gr.Row():
#                 text = gr.Textbox(letter)
#                 btn = gr.Button("Clear")
#                 btn.click(lambda: gr.Textbox(value=""), None, text)

example_queries = [
    ["I'm a football fan and I'm curious about the different team names used in different leagues and countries. Can you provide me with an extensive list of football team names and their short names? It would be great if I could access more than 7000 team names. Additionally, I would like to see the first 25 team names and their short names using the basic plan."],
    ["I want to eat some cakes. Please recommend some websites about cakes for me."],
    ["My company is hosting a rugby event and we need to provide pre-match form information to the participants. Can you fetch the pre-match form for a specific rugby match? We would also like to see the incidents that occurred during the match."],
    ["I'm a photographer and want to capture the beauty of the beach during sunrise and sunset. Can you provide me with the astronomy data for a specific location? It would be great to know the sunrise and sunset times. Additionally, I would like to retrieve the high and low tide information for that location."],
    ["Give me some information about the president of the United States. I want you to directly answer this question, do not call any tool."],
    ["I want you to give up this task."],
]

action_color_palette = [
    "#b9d9eb",
    "#fde8f7",
    "#d9d577",
    "#b784e7",
    "#f9a596",
    "#ece2d0",
    "#d3c2ce",
]


def run(query):
    endpoint = "http://localhost:5000/generate"
    random.shuffle(action_color_palette)

    agent_content = []
    observation_content = []

    unfinished_content = {
        "title": "Unfinished"
    }
    yield [unfinished_content], [unfinished_content]
    action_count = 0

    for status in toolgen_request(endpoint, query, system_prompt=None):
        
        if "message" in status:
            with gr.Column():
                agent_content.append({
                    "title": "Thought",
                    "content": status['message']['content']
                })
                # gr.Markdown(f"Thought: {status['message']['content']}")
                agent_content.append({
                    "title": "Action",
                    "content": status['message']['action'],
                    "color": action_color_palette[action_count % len(action_color_palette)]
                })
                action_count += 1
                # gr.Markdown(f"Action: {status['message']['action']}")
                agent_content.append({
                    "title": "Arguments",
                    "content": status['message']['arguments']
                })
                # gr.Markdown(f"Action Input: {status['message']['arguments']}")
                yield agent_content + [unfinished_content], observation_content + [unfinished_content]
        elif "observation" in status:
            observation_content.append({
                "title": "Observation",
                "content": status["observation"]
            })
            yield agent_content+ [unfinished_content], observation_content+ [unfinished_content]
            # with gr.Column():
            #     gr.Markdown(status["observation"])
    yield agent_content, observation_content


with gr.Blocks(delete_cache=(600, 600)) as demo:
    with ms.Application():
        with antd.ConfigProvider():

            with gr.Column():
                
                gr.Markdown("""
                ## Complete Tasks with [ToolGen](https://arxiv.org/abs/2410.03439): New Paradigm for LLM Agent
                * 🚀 Input a query and click "Inference" to complete the task. The whole process may take some time.
                * 🛠️ Currently we only support tools in ToolBench, we are committed to develop ToolGen for more tools.
                * 🎮 Have fun!
                

                """)
                with antd.Card(elem_style=dict(marginBottom=4),
                            styles=dict(body=dict(padding=4))):
                    with antd.Flex(elem_style=dict(width="100%",height="100%"),
                               justify="center",
                               align="center",
                               gap=0):
                        with ms.Div(elem_style=dict(flexShrink=0)):
                            antd.Image(resolve_image("./assets/banner.jpg"), preview=False, height=80, width=900)
            
                # with antd.Flex(vertical=True, justify="flex-start", align="center"):
                # with antd.Flex(vertical=True, justify="center", align="flex-start"):
                # with antd.Flex(vertical=True, justify="center", align="center"):
            with gr.Row():
                with gr.Column():
                    query = gr.TextArea(placeholder="Type your query here...", label="Query")
                    generate_btn = gr.Button("Inference")
                    examples = gr.Examples(example_queries, label="Examples", inputs=query)
                    # gr.Markdown("""*NOTE: Gaussian file can be very large (~50MB), it will take a while to display and download.*""")
                
                # with antd.Flex(vertical=True, justify="flex-start", align="center"):
                # with antd.Flex(vertical=True, justify="center", align="flex-start"):
                # with antd.Flex(vertical=True, justify="center", align="center"):
                with gr.Column():
                    agent_box = gr.TextArea(placeholder="Agent", visible=False)
                    with antd.Card(size="small", bordered=False):
                        ms.Div("Agent", elem_style={"font-weight": "bold"})
                    @gr.render(inputs=[agent_box], triggers=[agent_box.change])
                    def render_agent(agent_content):
                        agent_content = ast.literal_eval(agent_content)
                        for content in agent_content:
                            if content['title'] == "Unfinished":
                                with antd.Card(title="Generating...", size='small', bordered=False, loading=True):
                                    pass
                            else:
                                if content['title'] == "Action":
                                    with antd.Card(title=content['title'], size="small", bordered=False):
                                        ms.Div(content['content'], elem_style={"background-color": content["color"], "font-weight": "bold", "border-radius": "6px", "display": "inline-block"})
                                else:
                                    with antd.Card(title=content['title'], size="small", bordered=False):
                                        ms.Div(content['content'])

                # with antd.Flex(vertical=True, justify="flex-start", align="center"):
                # with antd.Flex(vertical=True, justify="center", align="flex-start"):
                # with antd.Flex(vertical=True, justify="center", align="center"):
                with gr.Column():
                    system_box = gr.TextArea(placeholder="System", visible=False)
                    with antd.Card(size="small", bordered=False):
                        ms.Div("System", elem_style={"font-weight": "bold"})
                    @gr.render(inputs=[system_box], triggers=[system_box.change])
                    def render_agent(observation_content):
                        observation_content = ast.literal_eval(observation_content)
                        for content in observation_content:
                            if content['title'] == "Unfinished":
                                with antd.Card(title="Processing...", size='small', bordered=False, loading=True):
                                    pass
                            else:
                                with antd.Card(title=content['title'], size="small", bordered=False):
                                    ms.Div(content['content'])
                
                generate_btn.click(run, inputs=[query], outputs=[agent_box, system_box], time_limit=120)



if __name__ == "__main__":
    demo.launch(server_port=7860)