Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse files
run.ipynb
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: agent_chatbot"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio git+https://github.com/huggingface/transformers.git#egg=transformers[agents]"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/agent_chatbot/utils.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from gradio import ChatMessage\n", "from transformers import load_tool, ReactCodeAgent, HfEngine # type: ignore\n", "from utils import stream_from_transformers_agent\n", "\n", "# Import tool from Hub\n", "image_generation_tool = load_tool(\"m-ric/text-to-image\")\n", "\n", "
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: agent_chatbot"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio git+https://github.com/huggingface/transformers.git#egg=transformers[agents]"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/agent_chatbot/utils.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from gradio import ChatMessage\n", "from transformers import load_tool, ReactCodeAgent, HfEngine # type: ignore\n", "from utils import stream_from_transformers_agent\n", "\n", "# Import tool from Hub\n", "image_generation_tool = load_tool(\"m-ric/text-to-image\")\n", "\n", "llm_engine = HfEngine(\"meta-llama/Meta-Llama-3-70B-Instruct\")\n", "# Initialize the agent with both tools\n", "agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)\n", "\n", "def interact_with_agent(prompt, messages):\n", " messages.append(ChatMessage(role=\"user\", content=prompt))\n", " yield messages\n", " for msg in stream_from_transformers_agent(agent, prompt):\n", " messages.append(msg)\n", " yield messages\n", " yield messages\n", "\n", "with gr.Blocks() as demo:\n", " stored_message = gr.State([])\n", " chatbot = gr.Chatbot(label=\"Agent\",\n", " type=\"messages\",\n", " avatar_images=(None, \"https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png\"))\n", " text_input = gr.Textbox(lines=1, label=\"Chat Message\")\n", " text_input.submit(lambda s: (s, \"\"), [text_input], [stored_message, text_input]).then(interact_with_agent, [stored_message, chatbot], [chatbot])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
run.py
CHANGED
@@ -6,12 +6,10 @@ from utils import stream_from_transformers_agent
|
|
6 |
# Import tool from Hub
|
7 |
image_generation_tool = load_tool("m-ric/text-to-image")
|
8 |
|
9 |
-
|
10 |
llm_engine = HfEngine("meta-llama/Meta-Llama-3-70B-Instruct")
|
11 |
# Initialize the agent with both tools
|
12 |
agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)
|
13 |
|
14 |
-
|
15 |
def interact_with_agent(prompt, messages):
|
16 |
messages.append(ChatMessage(role="user", content=prompt))
|
17 |
yield messages
|
@@ -20,7 +18,6 @@ def interact_with_agent(prompt, messages):
|
|
20 |
yield messages
|
21 |
yield messages
|
22 |
|
23 |
-
|
24 |
with gr.Blocks() as demo:
|
25 |
stored_message = gr.State([])
|
26 |
chatbot = gr.Chatbot(label="Agent",
|
@@ -29,6 +26,5 @@ with gr.Blocks() as demo:
|
|
29 |
text_input = gr.Textbox(lines=1, label="Chat Message")
|
30 |
text_input.submit(lambda s: (s, ""), [text_input], [stored_message, text_input]).then(interact_with_agent, [stored_message, chatbot], [chatbot])
|
31 |
|
32 |
-
|
33 |
if __name__ == "__main__":
|
34 |
-
demo.launch()
|
|
|
6 |
# Import tool from Hub
|
7 |
image_generation_tool = load_tool("m-ric/text-to-image")
|
8 |
|
|
|
9 |
llm_engine = HfEngine("meta-llama/Meta-Llama-3-70B-Instruct")
|
10 |
# Initialize the agent with both tools
|
11 |
agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)
|
12 |
|
|
|
13 |
def interact_with_agent(prompt, messages):
|
14 |
messages.append(ChatMessage(role="user", content=prompt))
|
15 |
yield messages
|
|
|
18 |
yield messages
|
19 |
yield messages
|
20 |
|
|
|
21 |
with gr.Blocks() as demo:
|
22 |
stored_message = gr.State([])
|
23 |
chatbot = gr.Chatbot(label="Agent",
|
|
|
26 |
text_input = gr.Textbox(lines=1, label="Chat Message")
|
27 |
text_input.submit(lambda s: (s, ""), [text_input], [stored_message, text_input]).then(interact_with_agent, [stored_message, chatbot], [chatbot])
|
28 |
|
|
|
29 |
if __name__ == "__main__":
|
30 |
+
demo.launch()
|
utils.py
CHANGED
@@ -4,7 +4,6 @@ from gradio import ChatMessage
|
|
4 |
from transformers.agents import ReactCodeAgent, agent_types
|
5 |
from typing import Generator
|
6 |
|
7 |
-
|
8 |
def pull_message(step_log: dict):
|
9 |
if step_log.get("rationale"):
|
10 |
yield ChatMessage(
|
@@ -31,7 +30,6 @@ def pull_message(step_log: dict):
|
|
31 |
metadata={"title": "💥 Error"},
|
32 |
)
|
33 |
|
34 |
-
|
35 |
def stream_from_transformers_agent(
|
36 |
agent: ReactCodeAgent, prompt: str
|
37 |
) -> Generator[ChatMessage, None, ChatMessage | None]:
|
@@ -39,7 +37,7 @@ def stream_from_transformers_agent(
|
|
39 |
|
40 |
class Output:
|
41 |
output: agent_types.AgentType | str = None
|
42 |
-
|
43 |
step_log = None
|
44 |
for step_log in agent.run(prompt, stream=True):
|
45 |
if isinstance(step_log, dict):
|
@@ -47,7 +45,6 @@ def stream_from_transformers_agent(
|
|
47 |
print("message", message)
|
48 |
yield message
|
49 |
|
50 |
-
|
51 |
Output.output = step_log
|
52 |
if isinstance(Output.output, agent_types.AgentText):
|
53 |
yield ChatMessage(
|
|
|
4 |
from transformers.agents import ReactCodeAgent, agent_types
|
5 |
from typing import Generator
|
6 |
|
|
|
7 |
def pull_message(step_log: dict):
|
8 |
if step_log.get("rationale"):
|
9 |
yield ChatMessage(
|
|
|
30 |
metadata={"title": "💥 Error"},
|
31 |
)
|
32 |
|
|
|
33 |
def stream_from_transformers_agent(
|
34 |
agent: ReactCodeAgent, prompt: str
|
35 |
) -> Generator[ChatMessage, None, ChatMessage | None]:
|
|
|
37 |
|
38 |
class Output:
|
39 |
output: agent_types.AgentType | str = None
|
40 |
+
|
41 |
step_log = None
|
42 |
for step_log in agent.run(prompt, stream=True):
|
43 |
if isinstance(step_log, dict):
|
|
|
45 |
print("message", message)
|
46 |
yield message
|
47 |
|
|
|
48 |
Output.output = step_log
|
49 |
if isinstance(Output.output, agent_types.AgentText):
|
50 |
yield ChatMessage(
|