Spaces:
Paused
Paused
Upload 56 files
Browse files- .gitattributes +34 -35
- README.md +14 -10
- experimental.ipynb +64 -0
- infiniInference/agent_factory.py +20 -0
- infiniInference/agent_node.py +4 -0
- infiniInference/handler.py +48 -0
- infiniInference/supervisor.py +97 -0
- infiniInference/testme.ipynb +35 -0
- myapp.py +1 -0
- perm/Agents9 (2).ipynb +552 -0
- perm/CustomLLMMistral.py +58 -0
- perm/HuggingFaceAI.py +22 -0
- perm/__pycache__/CustomLLMMistral.cpython-39.pyc +0 -0
- perm/__pycache__/HuggingFaceAI.cpython-39.pyc +0 -0
- perm/__pycache__/agent_system.cpython-39.pyc +0 -0
- perm/agent_system.py +55 -0
- perm/agents/.ipynb_checkpoints/agent_node-checkpoint.py +7 -0
- perm/agents/.ipynb_checkpoints/agent_state-checkpoint.py +11 -0
- perm/agents/.ipynb_checkpoints/agent_support-checkpoint.py +23 -0
- perm/agents/.ipynb_checkpoints/help_agent-checkpoint.py +30 -0
- perm/agents/.ipynb_checkpoints/project_agent-checkpoint.py +41 -0
- perm/agents/.ipynb_checkpoints/supervisor-checkpoint.py +65 -0
- perm/agents/.~agent_node.py +0 -0
- perm/agents/.~agent_state.py +0 -0
- perm/agents/.~agent_support.py +0 -0
- perm/agents/.~help_agent.py +0 -0
- perm/agents/__pycache__/agent_node.cpython-39.pyc +0 -0
- perm/agents/__pycache__/agent_state.cpython-39.pyc +0 -0
- perm/agents/__pycache__/agent_support.cpython-39.pyc +0 -0
- perm/agents/__pycache__/help_agent.cpython-39.pyc +0 -0
- perm/agents/__pycache__/project_agent.cpython-39.pyc +0 -0
- perm/agents/__pycache__/supervisor.cpython-39.pyc +0 -0
- perm/agents/agent_node.py +7 -0
- perm/agents/agent_state.py +11 -0
- perm/agents/agent_support.py +23 -0
- perm/agents/help_agent.py +30 -0
- perm/agents/project_agent.py +41 -0
- perm/agents/supervisor.py +65 -0
- perm/app.py +58 -0
- perm/experiment.py +77 -0
- perm/hello.py +3 -0
- perm/scratchpad.py +25 -0
- perm/start.sh +5 -0
- perm/tasks.json +10 -0
- perm/tools/.ipynb_checkpoints/multiply_tool-checkpoint.py +7 -0
- perm/tools/.ipynb_checkpoints/robot_information-checkpoint.py +14 -0
- perm/tools/__pycache__/multiply_tool.cpython-39.pyc +0 -0
- perm/tools/__pycache__/robot_information.cpython-39.pyc +0 -0
- perm/tools/multiply_tool.py +7 -0
- perm/tools/robot_information.py +14 -0
.gitattributes
CHANGED
@@ -1,35 +1,34 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.
|
29 |
-
*.
|
30 |
-
*.
|
31 |
-
*.
|
32 |
-
*.
|
33 |
-
*.
|
34 |
-
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
README.md
CHANGED
@@ -1,10 +1,14 @@
|
|
1 |
-
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
|
9 |
-
|
10 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: InfiniInference
|
3 |
+
emoji: 💻🐳
|
4 |
+
colorFrom: gray
|
5 |
+
colorTo: green
|
6 |
+
sdk: docker
|
7 |
+
pinned: false
|
8 |
+
tags:
|
9 |
+
- jupyterlab
|
10 |
+
suggested_storage: small
|
11 |
+
license: other
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
experimental.ipynb
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
+
"id": "initial_id",
|
7 |
+
"metadata": {
|
8 |
+
"collapsed": true
|
9 |
+
},
|
10 |
+
"outputs": [],
|
11 |
+
"source": [
|
12 |
+
"%pip install -r requirements.txt"
|
13 |
+
]
|
14 |
+
},
|
15 |
+
{
|
16 |
+
"cell_type": "code",
|
17 |
+
"outputs": [],
|
18 |
+
"source": [
|
19 |
+
"%env LANGCHAIN_TRACING_V2=true\n",
|
20 |
+
"%env LANGCHAIN_ENDPOINT=\"https://api.smith.langchain.com\"\n",
|
21 |
+
"%env LANGCHAIN_API_KEY=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\n",
|
22 |
+
"%env LANGCHAIN_PROJECT=TestProject"
|
23 |
+
],
|
24 |
+
"metadata": {
|
25 |
+
"collapsed": false
|
26 |
+
},
|
27 |
+
"id": "66a5af01e7e46ad9"
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"cell_type": "code",
|
31 |
+
"outputs": [],
|
32 |
+
"source": [
|
33 |
+
"from infiniInference.handler import start\n",
|
34 |
+
"from infiniInference.agent_factory import create_agent\n",
|
35 |
+
"start()"
|
36 |
+
],
|
37 |
+
"metadata": {
|
38 |
+
"collapsed": false
|
39 |
+
},
|
40 |
+
"id": "55569c1ec7bfe3f6"
|
41 |
+
}
|
42 |
+
],
|
43 |
+
"metadata": {
|
44 |
+
"kernelspec": {
|
45 |
+
"display_name": "Python 3",
|
46 |
+
"language": "python",
|
47 |
+
"name": "python3"
|
48 |
+
},
|
49 |
+
"language_info": {
|
50 |
+
"codemirror_mode": {
|
51 |
+
"name": "ipython",
|
52 |
+
"version": 2
|
53 |
+
},
|
54 |
+
"file_extension": ".py",
|
55 |
+
"mimetype": "text/x-python",
|
56 |
+
"name": "python",
|
57 |
+
"nbconvert_exporter": "python",
|
58 |
+
"pygments_lexer": "ipython2",
|
59 |
+
"version": "2.7.6"
|
60 |
+
}
|
61 |
+
},
|
62 |
+
"nbformat": 4,
|
63 |
+
"nbformat_minor": 5
|
64 |
+
}
|
infiniInference/agent_factory.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.agents import AgentExecutor
|
2 |
+
from transformers import PreTrainedModel
|
3 |
+
|
4 |
+
|
5 |
+
def create_agent(llm: PreTrainedModel, tools: list, system_prompt: str):
|
6 |
+
# Each worker node will be given a name and some tools.
|
7 |
+
prompt = ChatPromptTemplate.from_messages(
|
8 |
+
[
|
9 |
+
(
|
10 |
+
"system",
|
11 |
+
system_prompt,
|
12 |
+
),
|
13 |
+
MessagesPlaceholder(variable_name="messages"),
|
14 |
+
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
15 |
+
]
|
16 |
+
)
|
17 |
+
agent = prompt | llm.bind_tools(tools)
|
18 |
+
# create_openai_tools_agent(llm, tools, prompt)
|
19 |
+
executor = AgentExecutor(agent=agent, tools=tools)
|
20 |
+
return executor
|
infiniInference/agent_node.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.messages import BaseMessage, HumanMessage
|
2 |
+
def agent_node(state, agent, name):
|
3 |
+
result = agent.invoke(state)
|
4 |
+
return {"messages": [HumanMessage(content=result["output"], name=name)]}
|
infiniInference/handler.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import getpass
|
2 |
+
import os
|
3 |
+
from langchain_core.messages import BaseMessage
|
4 |
+
from infiniInference.agent_factory import create_agent
|
5 |
+
from infiniInference.supervisor import llm
|
6 |
+
|
7 |
+
|
8 |
+
def _set_if_undefined(var: str):
|
9 |
+
if not os.environ.get(var):
|
10 |
+
os.environ[var] = getpass.getpass(f"Please provide your {var}")
|
11 |
+
|
12 |
+
|
13 |
+
#_set_if_undefined("OPENAI_API_KEY")
|
14 |
+
# Optional, add tracing in LangSmith
|
15 |
+
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
16 |
+
os.environ["LANGCHAIN_PROJECT"] = "Agent test"
|
17 |
+
|
18 |
+
import operator
|
19 |
+
from typing import Annotated, Any, Dict, List, Optional, Sequence, TypedDict
|
20 |
+
import functools
|
21 |
+
|
22 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
23 |
+
from langgraph.graph import StateGraph, END
|
24 |
+
|
25 |
+
|
26 |
+
# The agent state is the input to each node in the graph
|
27 |
+
class AgentState(TypedDict):
|
28 |
+
# The annotation tells the graph that new messages will always
|
29 |
+
# be added to the current states
|
30 |
+
messages: Annotated[Sequence[BaseMessage], operator.add]
|
31 |
+
# The 'next' field indicates where to route to next
|
32 |
+
next: str
|
33 |
+
|
34 |
+
research_agent = create_agent(llm, [tavily_tool], "You are a web researcher.")
|
35 |
+
research_node = functools.partial(agent_node, agent=research_agent, name="Researcher")
|
36 |
+
|
37 |
+
# NOTE: THIS PERFORMS ARBITRARY CODE EXECUTION. PROCEED WITH CAUTION
|
38 |
+
code_agent = create_agent(
|
39 |
+
llm,
|
40 |
+
[python_repl_tool],
|
41 |
+
"You may generate safe python code to analyze data and generate charts using matplotlib.",
|
42 |
+
)
|
43 |
+
code_node = functools.partial(agent_node, agent=code_agent, name="Coder")
|
44 |
+
|
45 |
+
workflow = StateGraph(AgentState)
|
46 |
+
workflow.add_node("Researcher", research_node)
|
47 |
+
workflow.add_node("Coder", code_node)
|
48 |
+
workflow.add_node("supervisor", supervisor_chain)
|
infiniInference/supervisor.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
2 |
+
# from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
|
3 |
+
import torch
|
4 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForCausalLM, pipeline
|
5 |
+
from langchain_huggingface import HuggingFacePipeline
|
6 |
+
import getpass
|
7 |
+
from langchain.chains import SimpleChain
|
8 |
+
from langchain_mistralai import ChatMistralAI
|
9 |
+
token = getpass.getpass("Token: ")
|
10 |
+
|
11 |
+
members = ["Researcher", "Coder"]
|
12 |
+
system_prompt = (
|
13 |
+
"You are a supervisor tasked with managing a conversation between the"
|
14 |
+
" following workers: {members}. Given the following user request,"
|
15 |
+
" respond with the worker to act next. Each worker will perform a"
|
16 |
+
" task and respond with their results and status. When finished,"
|
17 |
+
" respond with FINISH."
|
18 |
+
)
|
19 |
+
# Our team supervisor is an LLM node. It just picks the next agent to process
|
20 |
+
# and decides when the work is completed
|
21 |
+
options = ["FINISH"] + members
|
22 |
+
# Using openai function calling can make output parsing easier for us
|
23 |
+
function_def = {
|
24 |
+
"name": "route",
|
25 |
+
"description": "Select the next role.",
|
26 |
+
"parameters": {
|
27 |
+
"title": "routeSchema",
|
28 |
+
"type": "object",
|
29 |
+
"properties": {
|
30 |
+
"next": {
|
31 |
+
"title": "Next",
|
32 |
+
"anyOf": [
|
33 |
+
{"enum": options},
|
34 |
+
],
|
35 |
+
}
|
36 |
+
},
|
37 |
+
"required": ["next"],
|
38 |
+
},
|
39 |
+
}
|
40 |
+
prompt = ChatPromptTemplate.from_messages(
|
41 |
+
[
|
42 |
+
("system", system_prompt),
|
43 |
+
MessagesPlaceholder(variable_name="messages"),
|
44 |
+
(
|
45 |
+
"system",
|
46 |
+
"Given the conversation above, who should act next?"
|
47 |
+
" Or should we FINISH? Select one of: {options}",
|
48 |
+
),
|
49 |
+
]
|
50 |
+
).partial(options=str(options), members=", ".join(members))
|
51 |
+
path = "mistralai/Mistral-7B-Instruct-v0.3"
|
52 |
+
|
53 |
+
model = AutoModelForCausalLM.from_pretrained(
|
54 |
+
path,
|
55 |
+
torch_dtype=torch.float16,
|
56 |
+
low_cpu_mem_usage=True,
|
57 |
+
trust_remote_code=True,
|
58 |
+
device_map="auto",
|
59 |
+
token=token
|
60 |
+
)
|
61 |
+
|
62 |
+
tokenizer = AutoTokenizer.from_pretrained(path, token=token)
|
63 |
+
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
|
64 |
+
tokenizer.padding_side = "left"
|
65 |
+
|
66 |
+
pipe = pipeline(task='text-generation', model=model, tokenizer=tokenizer,
|
67 |
+
num_return_sequences=1,
|
68 |
+
eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.eos_token_id,
|
69 |
+
max_new_tokens=260, temperature=0.7, do_sample=True)
|
70 |
+
|
71 |
+
llm = HuggingFacePipeline(pipeline=pipe)
|
72 |
+
|
73 |
+
|
74 |
+
|
75 |
+
def custom_function(input_text):
|
76 |
+
# Example function logic
|
77 |
+
return {"output": "processed " + input_text}
|
78 |
+
|
79 |
+
from langchain.tools import Tool
|
80 |
+
|
81 |
+
class MyCustomTool(Tool):
|
82 |
+
def call(self, input_text: str) -> str:
|
83 |
+
# Custom tool logic here
|
84 |
+
return f"Processed: {input_text}"
|
85 |
+
|
86 |
+
# Initialize the custom tool
|
87 |
+
my_tool = MyCustomTool()
|
88 |
+
|
89 |
+
chain = prompt | llm | [my_tool]
|
90 |
+
|
91 |
+
# Define the input text
|
92 |
+
input_text = "Your input text here"
|
93 |
+
|
94 |
+
# Run the chain with the input text
|
95 |
+
result = chain.invoke(input_text)
|
96 |
+
|
97 |
+
print(result)
|
infiniInference/testme.ipynb
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
+
"id": "initial_id",
|
7 |
+
"metadata": {
|
8 |
+
"collapsed": true
|
9 |
+
},
|
10 |
+
"outputs": [],
|
11 |
+
"source": "test "
|
12 |
+
}
|
13 |
+
],
|
14 |
+
"metadata": {
|
15 |
+
"kernelspec": {
|
16 |
+
"display_name": "Python 3",
|
17 |
+
"language": "python",
|
18 |
+
"name": "python3"
|
19 |
+
},
|
20 |
+
"language_info": {
|
21 |
+
"codemirror_mode": {
|
22 |
+
"name": "ipython",
|
23 |
+
"version": 2
|
24 |
+
},
|
25 |
+
"file_extension": ".py",
|
26 |
+
"mimetype": "text/x-python",
|
27 |
+
"name": "python",
|
28 |
+
"nbconvert_exporter": "python",
|
29 |
+
"pygments_lexer": "ipython2",
|
30 |
+
"version": "2.7.6"
|
31 |
+
}
|
32 |
+
},
|
33 |
+
"nbformat": 4,
|
34 |
+
"nbformat_minor": 5
|
35 |
+
}
|
myapp.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
print("Hello remote")
|
perm/Agents9 (2).ipynb
ADDED
@@ -0,0 +1,552 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 3,
|
6 |
+
"id": "124c8be0-1c20-4ea1-aeee-106d3fb7143f",
|
7 |
+
"metadata": {},
|
8 |
+
"outputs": [],
|
9 |
+
"source": [
|
10 |
+
"from langchain_huggingface import HuggingFacePipeline, ChatHuggingFace\n",
|
11 |
+
"from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForCausalLM, pipeline\n",
|
12 |
+
"import torch\n",
|
13 |
+
"from langchain_core.messages import (\n",
|
14 |
+
" HumanMessage,\n",
|
15 |
+
" SystemMessage,\n",
|
16 |
+
" BaseMessage\n",
|
17 |
+
")\n",
|
18 |
+
"from typing import (\n",
|
19 |
+
" Any,\n",
|
20 |
+
" Callable,\n",
|
21 |
+
" Dict,\n",
|
22 |
+
" List,\n",
|
23 |
+
" Literal,\n",
|
24 |
+
" Optional,\n",
|
25 |
+
" Sequence,\n",
|
26 |
+
" Type,\n",
|
27 |
+
" Union,\n",
|
28 |
+
" cast,\n",
|
29 |
+
")\n",
|
30 |
+
"\n",
|
31 |
+
"class HuggingFaceAI(ChatHuggingFace):\n",
|
32 |
+
" \n",
|
33 |
+
" def _to_chat_prompt(\n",
|
34 |
+
" self,\n",
|
35 |
+
" messages: List[BaseMessage],\n",
|
36 |
+
" ) -> str:\n",
|
37 |
+
" \"\"\"Convert a list of messages into a prompt format expected by wrapped LLM.\"\"\"\n",
|
38 |
+
" if not messages:\n",
|
39 |
+
" raise ValueError(\"At least one HumanMessage must be provided!\")\n",
|
40 |
+
"\n",
|
41 |
+
" if not isinstance(messages[-1], HumanMessage) and not isinstance(messages[-1], SystemMessage) :\n",
|
42 |
+
" raise ValueError(\"Last message must be a HumanMessage or SystemMessage!!!\")\n",
|
43 |
+
"\n",
|
44 |
+
" messages_dicts = [self._to_chatml_format(m) for m in messages]\n",
|
45 |
+
"\n",
|
46 |
+
" return self.tokenizer.apply_chat_template(\n",
|
47 |
+
" messages_dicts, tokenize=False, add_generation_prompt=True\n",
|
48 |
+
" )"
|
49 |
+
]
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"cell_type": "code",
|
53 |
+
"execution_count": 4,
|
54 |
+
"id": "3ab4c82b-4268-4ffa-b814-967cf5e347d2",
|
55 |
+
"metadata": {
|
56 |
+
"tags": []
|
57 |
+
},
|
58 |
+
"outputs": [
|
59 |
+
{
|
60 |
+
"data": {
|
61 |
+
"application/vnd.jupyter.widget-view+json": {
|
62 |
+
"model_id": "bb1698c756e946beb49a2afcfde2e51e",
|
63 |
+
"version_major": 2,
|
64 |
+
"version_minor": 0
|
65 |
+
},
|
66 |
+
"text/plain": [
|
67 |
+
"tokenizer_config.json: 0%| | 0.00/137k [00:00<?, ?B/s]"
|
68 |
+
]
|
69 |
+
},
|
70 |
+
"metadata": {},
|
71 |
+
"output_type": "display_data"
|
72 |
+
},
|
73 |
+
{
|
74 |
+
"data": {
|
75 |
+
"application/vnd.jupyter.widget-view+json": {
|
76 |
+
"model_id": "2fe3d3847e4e4cfa8b124211c679597c",
|
77 |
+
"version_major": 2,
|
78 |
+
"version_minor": 0
|
79 |
+
},
|
80 |
+
"text/plain": [
|
81 |
+
"tokenizer.model: 0%| | 0.00/587k [00:00<?, ?B/s]"
|
82 |
+
]
|
83 |
+
},
|
84 |
+
"metadata": {},
|
85 |
+
"output_type": "display_data"
|
86 |
+
},
|
87 |
+
{
|
88 |
+
"data": {
|
89 |
+
"application/vnd.jupyter.widget-view+json": {
|
90 |
+
"model_id": "8dc27068bab14901a3595e921a34c4ef",
|
91 |
+
"version_major": 2,
|
92 |
+
"version_minor": 0
|
93 |
+
},
|
94 |
+
"text/plain": [
|
95 |
+
"tokenizer.json: 0%| | 0.00/1.96M [00:00<?, ?B/s]"
|
96 |
+
]
|
97 |
+
},
|
98 |
+
"metadata": {},
|
99 |
+
"output_type": "display_data"
|
100 |
+
},
|
101 |
+
{
|
102 |
+
"data": {
|
103 |
+
"application/vnd.jupyter.widget-view+json": {
|
104 |
+
"model_id": "bc109fc9216449f19e610b30a9879b27",
|
105 |
+
"version_major": 2,
|
106 |
+
"version_minor": 0
|
107 |
+
},
|
108 |
+
"text/plain": [
|
109 |
+
"special_tokens_map.json: 0%| | 0.00/414 [00:00<?, ?B/s]"
|
110 |
+
]
|
111 |
+
},
|
112 |
+
"metadata": {},
|
113 |
+
"output_type": "display_data"
|
114 |
+
},
|
115 |
+
{
|
116 |
+
"data": {
|
117 |
+
"application/vnd.jupyter.widget-view+json": {
|
118 |
+
"model_id": "c3180e986f4d411d8511e361e4f3cd8f",
|
119 |
+
"version_major": 2,
|
120 |
+
"version_minor": 0
|
121 |
+
},
|
122 |
+
"text/plain": [
|
123 |
+
"config.json: 0%| | 0.00/601 [00:00<?, ?B/s]"
|
124 |
+
]
|
125 |
+
},
|
126 |
+
"metadata": {},
|
127 |
+
"output_type": "display_data"
|
128 |
+
},
|
129 |
+
{
|
130 |
+
"data": {
|
131 |
+
"application/vnd.jupyter.widget-view+json": {
|
132 |
+
"model_id": "4c3c52d9f0c04b1eb3eb0c5ebd3e14c8",
|
133 |
+
"version_major": 2,
|
134 |
+
"version_minor": 0
|
135 |
+
},
|
136 |
+
"text/plain": [
|
137 |
+
"model.safetensors.index.json: 0%| | 0.00/23.9k [00:00<?, ?B/s]"
|
138 |
+
]
|
139 |
+
},
|
140 |
+
"metadata": {},
|
141 |
+
"output_type": "display_data"
|
142 |
+
},
|
143 |
+
{
|
144 |
+
"data": {
|
145 |
+
"application/vnd.jupyter.widget-view+json": {
|
146 |
+
"model_id": "824ba11de3ba4cc7aa713779f3971025",
|
147 |
+
"version_major": 2,
|
148 |
+
"version_minor": 0
|
149 |
+
},
|
150 |
+
"text/plain": [
|
151 |
+
"Downloading shards: 0%| | 0/3 [00:00<?, ?it/s]"
|
152 |
+
]
|
153 |
+
},
|
154 |
+
"metadata": {},
|
155 |
+
"output_type": "display_data"
|
156 |
+
},
|
157 |
+
{
|
158 |
+
"data": {
|
159 |
+
"application/vnd.jupyter.widget-view+json": {
|
160 |
+
"model_id": "8c9b9325e7584ccf93733ab371493b1a",
|
161 |
+
"version_major": 2,
|
162 |
+
"version_minor": 0
|
163 |
+
},
|
164 |
+
"text/plain": [
|
165 |
+
"model-00001-of-00003.safetensors: 0%| | 0.00/4.95G [00:00<?, ?B/s]"
|
166 |
+
]
|
167 |
+
},
|
168 |
+
"metadata": {},
|
169 |
+
"output_type": "display_data"
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"data": {
|
173 |
+
"application/vnd.jupyter.widget-view+json": {
|
174 |
+
"model_id": "2a5cfc651b7f4b6793305c507bb4f1ca",
|
175 |
+
"version_major": 2,
|
176 |
+
"version_minor": 0
|
177 |
+
},
|
178 |
+
"text/plain": [
|
179 |
+
"model-00002-of-00003.safetensors: 0%| | 0.00/5.00G [00:00<?, ?B/s]"
|
180 |
+
]
|
181 |
+
},
|
182 |
+
"metadata": {},
|
183 |
+
"output_type": "display_data"
|
184 |
+
},
|
185 |
+
{
|
186 |
+
"data": {
|
187 |
+
"application/vnd.jupyter.widget-view+json": {
|
188 |
+
"model_id": "1aefc575e0c34cea950ca2e660024cc2",
|
189 |
+
"version_major": 2,
|
190 |
+
"version_minor": 0
|
191 |
+
},
|
192 |
+
"text/plain": [
|
193 |
+
"model-00003-of-00003.safetensors: 0%| | 0.00/4.55G [00:00<?, ?B/s]"
|
194 |
+
]
|
195 |
+
},
|
196 |
+
"metadata": {},
|
197 |
+
"output_type": "display_data"
|
198 |
+
},
|
199 |
+
{
|
200 |
+
"data": {
|
201 |
+
"application/vnd.jupyter.widget-view+json": {
|
202 |
+
"model_id": "511d965c616345eaaa447a3f6ae3d8b8",
|
203 |
+
"version_major": 2,
|
204 |
+
"version_minor": 0
|
205 |
+
},
|
206 |
+
"text/plain": [
|
207 |
+
"Loading checkpoint shards: 0%| | 0/3 [00:00<?, ?it/s]"
|
208 |
+
]
|
209 |
+
},
|
210 |
+
"metadata": {},
|
211 |
+
"output_type": "display_data"
|
212 |
+
},
|
213 |
+
{
|
214 |
+
"data": {
|
215 |
+
"application/vnd.jupyter.widget-view+json": {
|
216 |
+
"model_id": "a222b677f36e430383e9470d87a143c0",
|
217 |
+
"version_major": 2,
|
218 |
+
"version_minor": 0
|
219 |
+
},
|
220 |
+
"text/plain": [
|
221 |
+
"generation_config.json: 0%| | 0.00/116 [00:00<?, ?B/s]"
|
222 |
+
]
|
223 |
+
},
|
224 |
+
"metadata": {},
|
225 |
+
"output_type": "display_data"
|
226 |
+
},
|
227 |
+
{
|
228 |
+
"data": {
|
229 |
+
"text/plain": [
|
230 |
+
"AIMessage(content='<s>[INST] Hugging Face is [/INST] Hugging Face is a technology company that specializes in natural language processing (NLP). They are best known for their transformers library, which is a state-of-the-art machine learning framework for NLP tasks. The transformers library includes pre-trained models for a wide range of NLP tasks such as language translation, text classification, and question answering. Hugging Face also provides a platform for training, sharing, and using NLP models, called the Hugging Face Model Hub', id='run-9b0b07cc-2121-4c78-8ebf-61df58694193-0')"
|
231 |
+
]
|
232 |
+
},
|
233 |
+
"execution_count": 4,
|
234 |
+
"metadata": {},
|
235 |
+
"output_type": "execute_result"
|
236 |
+
}
|
237 |
+
],
|
238 |
+
"source": [
|
239 |
+
"from langchain_huggingface import ChatHuggingFace, HuggingFacePipeline\n",
|
240 |
+
"\n",
|
241 |
+
"llm = HuggingFacePipeline.from_model_id(\n",
|
242 |
+
" model_id=\"mistralai/Mistral-7B-Instruct-v0.3\",\n",
|
243 |
+
" device_map=\"auto\",\n",
|
244 |
+
" task=\"text-generation\",\n",
|
245 |
+
" pipeline_kwargs={\n",
|
246 |
+
" \"max_new_tokens\": 100,\n",
|
247 |
+
" \"top_k\": 50,\n",
|
248 |
+
" },\n",
|
249 |
+
")\n",
|
250 |
+
"\n",
|
251 |
+
"llm_engine_hf = HuggingFaceAI(llm=llm)\n",
|
252 |
+
"llm_engine_hf.invoke(\"Hugging Face is\")"
|
253 |
+
]
|
254 |
+
},
|
255 |
+
{
|
256 |
+
"cell_type": "code",
|
257 |
+
"execution_count": 5,
|
258 |
+
"id": "8cf48c8f-27c2-4c22-a219-965ad3621bc2",
|
259 |
+
"metadata": {
|
260 |
+
"tags": []
|
261 |
+
},
|
262 |
+
"outputs": [],
|
263 |
+
"source": [
|
264 |
+
"from langchain.agents import AgentExecutor, create_openai_tools_agent\n",
|
265 |
+
"from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage, AIMessage\n",
|
266 |
+
"from langchain_openai import ChatOpenAI\n",
|
267 |
+
"\n",
|
268 |
+
"\n",
|
269 |
+
"def create_agent(llm: ChatHuggingFace, tools: list, system_prompt: str):\n",
|
270 |
+
" # Each worker node will be given a name and some tools.\n",
|
271 |
+
" prompt = ChatPromptTemplate.from_messages(\n",
|
272 |
+
" [\n",
|
273 |
+
" (\n",
|
274 |
+
" \"system\",\n",
|
275 |
+
" system_prompt,\n",
|
276 |
+
" ),\n",
|
277 |
+
" MessagesPlaceholder(variable_name=\"messages\"),\n",
|
278 |
+
" MessagesPlaceholder(variable_name=\"agent_scratchpad\"),\n",
|
279 |
+
" ]\n",
|
280 |
+
" )\n",
|
281 |
+
" agent = create_openai_tools_agent(llm, tools, prompt)\n",
|
282 |
+
" executor = AgentExecutor(agent=agent, tools=tools)\n",
|
283 |
+
" return executor"
|
284 |
+
]
|
285 |
+
},
|
286 |
+
{
|
287 |
+
"cell_type": "code",
|
288 |
+
"execution_count": 6,
|
289 |
+
"id": "b2968526-9c48-45fe-b783-707873af7436",
|
290 |
+
"metadata": {
|
291 |
+
"tags": []
|
292 |
+
},
|
293 |
+
"outputs": [],
|
294 |
+
"source": [
|
295 |
+
"from typing import Annotated, List, Tuple, Union\n",
|
296 |
+
"\n",
|
297 |
+
"from langchain_core.tools import tool\n",
|
298 |
+
"from langchain_experimental.tools import PythonREPLTool\n",
|
299 |
+
"\n",
|
300 |
+
"\n",
|
301 |
+
"# This executes code locally, which can be unsafe\n",
|
302 |
+
"python_repl_tool = PythonREPLTool()"
|
303 |
+
]
|
304 |
+
},
|
305 |
+
{
|
306 |
+
"cell_type": "code",
|
307 |
+
"execution_count": 7,
|
308 |
+
"id": "fd6d6e2f-d327-4a03-8693-9b6e20170826",
|
309 |
+
"metadata": {
|
310 |
+
"tags": []
|
311 |
+
},
|
312 |
+
"outputs": [],
|
313 |
+
"source": [
|
314 |
+
"chat_model = llm_engine_hf"
|
315 |
+
]
|
316 |
+
},
|
317 |
+
{
|
318 |
+
"cell_type": "code",
|
319 |
+
"execution_count": 8,
|
320 |
+
"id": "5cf6d454-81fb-43cd-a472-519df681ca32",
|
321 |
+
"metadata": {
|
322 |
+
"tags": []
|
323 |
+
},
|
324 |
+
"outputs": [],
|
325 |
+
"source": [
|
326 |
+
"def agent_node(state, agent, name):\n",
|
327 |
+
" result = agent.invoke(state)\n",
|
328 |
+
" return {\"messages\": [HumanMessage(content=result[\"output\"], name=name)]}"
|
329 |
+
]
|
330 |
+
},
|
331 |
+
{
|
332 |
+
"cell_type": "code",
|
333 |
+
"execution_count": 9,
|
334 |
+
"id": "d35ca09b-1310-466d-924a-31f761a1f700",
|
335 |
+
"metadata": {},
|
336 |
+
"outputs": [],
|
337 |
+
"source": [
|
338 |
+
"from langchain_core.tools import tool\n",
|
339 |
+
"\n",
|
340 |
+
"@tool\n",
|
341 |
+
"def multiply(a: int, b: int) -> int:\n",
|
342 |
+
" \"\"\"Multiply two numbers.\"\"\"\n",
|
343 |
+
" print(\"Multiply used\")\n",
|
344 |
+
" return a * b"
|
345 |
+
]
|
346 |
+
},
|
347 |
+
{
|
348 |
+
"cell_type": "code",
|
349 |
+
"execution_count": 22,
|
350 |
+
"id": "1579a0b5-bc37-45d4-aba4-244c3891f0f6",
|
351 |
+
"metadata": {
|
352 |
+
"tags": []
|
353 |
+
},
|
354 |
+
"outputs": [],
|
355 |
+
"source": [
|
356 |
+
"from langchain_core.output_parsers import StrOutputParser\n",
|
357 |
+
"from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, MessagesPlaceholder\n",
|
358 |
+
"from langchain_core.runnables import RunnableLambda \n",
|
359 |
+
"\n",
|
360 |
+
"def stripPrompt(info):\n",
|
361 |
+
" print(info)\n",
|
362 |
+
" eot_token = \"[/INST] \"\n",
|
363 |
+
" i = info.content.rfind(eot_token)\n",
|
364 |
+
" if i == -1:\n",
|
365 |
+
" return info\n",
|
366 |
+
" \n",
|
367 |
+
" info.content = info.content[i + len(eot_token):]\n",
|
368 |
+
" \n",
|
369 |
+
" return info.content\n",
|
370 |
+
" \n",
|
371 |
+
"members = [\"Multiplier\", \"Coder\"]\n",
|
372 |
+
"# Our team supervisor is an LLM node. It just picks the next agent to process\n",
|
373 |
+
"# and decides when the work is completed\n",
|
374 |
+
"options = [\"FINISH\"] + members\n",
|
375 |
+
"system_prompt = (\n",
|
376 |
+
" \"You are a supervisor tasked with managing a conversation between the\"\n",
|
377 |
+
" \" following workers: {members}. Given the following user request,\"\n",
|
378 |
+
" \" respond with the worker to act next. Each worker will perform a\"\n",
|
379 |
+
" \" task and respond with their results and status. When finished,\"\n",
|
380 |
+
" \" respond with FINISH.\"\n",
|
381 |
+
")\n",
|
382 |
+
"\n",
|
383 |
+
"\n",
|
384 |
+
"prompt = ChatPromptTemplate.from_messages(\n",
|
385 |
+
" [\n",
|
386 |
+
" (\"human\", system_prompt),\n",
|
387 |
+
" (\"assistant\", \"ok\"),\n",
|
388 |
+
" MessagesPlaceholder(variable_name=\"messages\"),\n",
|
389 |
+
" (\"assistant\", \"ok\"),\n",
|
390 |
+
" (\n",
|
391 |
+
" \"human\",\n",
|
392 |
+
" \"Given the conversation above, who should act next?\"\n",
|
393 |
+
" \" Or should we FINISH? Select one of: {options}\",\n",
|
394 |
+
" ),\n",
|
395 |
+
" ]\n",
|
396 |
+
").partial(options=str(options), members=\", \".join(members))\n",
|
397 |
+
"\n",
|
398 |
+
"chain = ( prompt | chat_model | RunnableLambda(stripPrompt))"
|
399 |
+
]
|
400 |
+
},
|
401 |
+
{
|
402 |
+
"cell_type": "code",
|
403 |
+
"execution_count": 23,
|
404 |
+
"id": "dbe4efe7-220a-42f9-badc-0e27329c8225",
|
405 |
+
"metadata": {
|
406 |
+
"tags": []
|
407 |
+
},
|
408 |
+
"outputs": [],
|
409 |
+
"source": [
|
410 |
+
"import operator\n",
|
411 |
+
"from typing import Annotated, Any, Dict, List, Optional, Sequence, TypedDict\n",
|
412 |
+
"import functools\n",
|
413 |
+
"\n",
|
414 |
+
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
415 |
+
"from langgraph.graph import StateGraph, END\n",
|
416 |
+
"\n",
|
417 |
+
"\n",
|
418 |
+
"\n",
|
419 |
+
"\n",
|
420 |
+
"\n",
|
421 |
+
"multiply_agent = create_agent(chat_model, [multiply], \"You multiply numbers\")\n",
|
422 |
+
"multiply_node = functools.partial(agent_node, agent=multiply_agent, name=\"Multiplier\")\n",
|
423 |
+
"#research_node = functools.partial(agent_node, agent=research_agent, name=\"Researcher\")\n",
|
424 |
+
"\n",
|
425 |
+
"# NOTE: THIS PERFORMS ARBITRARY CODE EXECUTION. PROCEED WITH CAUTION\n",
|
426 |
+
"code_agent = create_agent(\n",
|
427 |
+
" chat_model,\n",
|
428 |
+
" [python_repl_tool],\n",
|
429 |
+
" \"You may generate safe python code to analyze data and generate charts using matplotlib.\",\n",
|
430 |
+
")\n",
|
431 |
+
"code_node = functools.partial(agent_node, agent=code_agent, name=\"Coder\")\n",
|
432 |
+
"\n",
|
433 |
+
"workflow = StateGraph(AgentState)\n",
|
434 |
+
"workflow.add_node(\"Multiplier\", multiply_node)\n",
|
435 |
+
"workflow.add_node(\"Coder\", code_node)\n",
|
436 |
+
"workflow.add_node(\"supervisor\", chain)"
|
437 |
+
]
|
438 |
+
},
|
439 |
+
{
|
440 |
+
"cell_type": "code",
|
441 |
+
"execution_count": 24,
|
442 |
+
"id": "74714d27-60b1-4ae3-9993-d0c10b77e417",
|
443 |
+
"metadata": {
|
444 |
+
"tags": []
|
445 |
+
},
|
446 |
+
"outputs": [],
|
447 |
+
"source": [
|
448 |
+
"for member in members:\n",
|
449 |
+
" # We want our workers to ALWAYS \"report back\" to the supervisor when done\n",
|
450 |
+
" workflow.add_edge(member, \"supervisor\")\n",
|
451 |
+
"# The supervisor populates the \"next\" field in the graph state\n",
|
452 |
+
"# which routes to a node or finishes\n",
|
453 |
+
"conditional_map = {k: k for k in members}\n",
|
454 |
+
"conditional_map[\"FINISH\"] = END\n",
|
455 |
+
"workflow.add_conditional_edges(\"supervisor\", lambda x: x[\"next\"], conditional_map)\n",
|
456 |
+
"# Finally, add entrypoint\n",
|
457 |
+
"workflow.set_entry_point(\"supervisor\")\n",
|
458 |
+
"\n",
|
459 |
+
"graph = workflow.compile()"
|
460 |
+
]
|
461 |
+
},
|
462 |
+
{
|
463 |
+
"cell_type": "code",
|
464 |
+
"execution_count": 25,
|
465 |
+
"id": "e6dc680d-15ec-484b-9a72-dc8af0f04234",
|
466 |
+
"metadata": {
|
467 |
+
"tags": []
|
468 |
+
},
|
469 |
+
"outputs": [
|
470 |
+
{
|
471 |
+
"name": "stdout",
|
472 |
+
"output_type": "stream",
|
473 |
+
"text": [
|
474 |
+
"content=\"<s>[INST] You are a supervisor tasked with managing a conversation between the following workers: Multiplier, Coder. Given the following user request, respond with the worker to act next. Each worker will perform a task and respond with their results and status. When finished, respond with FINISH. [/INST]ok</s>[INST] What is 4 multiplied by 35 [/INST]ok</s>[INST] Given the conversation above, who should act next? Or should we FINISH? Select one of: ['FINISH', 'Multiplier', 'Coder'] [/INST] Multiplier (since they provided the result of the multiplication)\" id='run-8c0df905-9902-4ac2-8327-f4e4fb612cc2-0'\n"
|
475 |
+
]
|
476 |
+
},
|
477 |
+
{
|
478 |
+
"ename": "InvalidUpdateError",
|
479 |
+
"evalue": "Expected dict, got Multiplier (since they provided the result of the multiplication)",
|
480 |
+
"output_type": "error",
|
481 |
+
"traceback": [
|
482 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
483 |
+
"\u001b[0;31mInvalidUpdateError\u001b[0m Traceback (most recent call last)",
|
484 |
+
"Cell \u001b[0;32mIn[25], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m s \u001b[38;5;129;01min\u001b[39;00m graph\u001b[38;5;241m.\u001b[39mstream(\n\u001b[1;32m 2\u001b[0m {\n\u001b[1;32m 3\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmessages\u001b[39m\u001b[38;5;124m\"\u001b[39m: [\n\u001b[1;32m 4\u001b[0m HumanMessage(content\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mWhat is 4 multiplied by 35\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 5\u001b[0m ]\n\u001b[1;32m 6\u001b[0m }\n\u001b[1;32m 7\u001b[0m ):\n\u001b[1;32m 8\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m__end__\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m s:\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28mprint\u001b[39m(s)\n",
|
485 |
+
"File \u001b[0;32m~/miniconda/lib/python3.9/site-packages/langgraph/pregel/__init__.py:963\u001b[0m, in \u001b[0;36mPregel.stream\u001b[0;34m(self, input, config, stream_mode, output_keys, input_keys, interrupt_before, interrupt_after, debug)\u001b[0m\n\u001b[1;32m 960\u001b[0m \u001b[38;5;28;01mdel\u001b[39;00m fut, task\n\u001b[1;32m 962\u001b[0m \u001b[38;5;66;03m# panic on failure or timeout\u001b[39;00m\n\u001b[0;32m--> 963\u001b[0m \u001b[43m_panic_or_proceed\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdone\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minflight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstep\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 964\u001b[0m \u001b[38;5;66;03m# don't keep futures around in memory longer than needed\u001b[39;00m\n\u001b[1;32m 965\u001b[0m \u001b[38;5;28;01mdel\u001b[39;00m done, inflight, futures\n",
|
486 |
+
"File \u001b[0;32m~/miniconda/lib/python3.9/site-packages/langgraph/pregel/__init__.py:1489\u001b[0m, in \u001b[0;36m_panic_or_proceed\u001b[0;34m(done, inflight, step)\u001b[0m\n\u001b[1;32m 1487\u001b[0m inflight\u001b[38;5;241m.\u001b[39mpop()\u001b[38;5;241m.\u001b[39mcancel()\n\u001b[1;32m 1488\u001b[0m \u001b[38;5;66;03m# raise the exception\u001b[39;00m\n\u001b[0;32m-> 1489\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m exc\n\u001b[1;32m 1491\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m inflight:\n\u001b[1;32m 1492\u001b[0m \u001b[38;5;66;03m# if we got here means we timed out\u001b[39;00m\n\u001b[1;32m 1493\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m inflight:\n\u001b[1;32m 1494\u001b[0m \u001b[38;5;66;03m# cancel all pending tasks\u001b[39;00m\n",
|
487 |
+
"File \u001b[0;32m~/miniconda/lib/python3.9/concurrent/futures/thread.py:52\u001b[0m, in \u001b[0;36m_WorkItem.run\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m\n\u001b[1;32m 51\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m---> 52\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 53\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m exc:\n\u001b[1;32m 54\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfuture\u001b[38;5;241m.\u001b[39mset_exception(exc)\n",
|
488 |
+
"File \u001b[0;32m~/miniconda/lib/python3.9/site-packages/langgraph/pregel/retry.py:66\u001b[0m, in \u001b[0;36mrun_with_retry\u001b[0;34m(task, retry_policy)\u001b[0m\n\u001b[1;32m 64\u001b[0m task\u001b[38;5;241m.\u001b[39mwrites\u001b[38;5;241m.\u001b[39mclear()\n\u001b[1;32m 65\u001b[0m \u001b[38;5;66;03m# run the task\u001b[39;00m\n\u001b[0;32m---> 66\u001b[0m \u001b[43mtask\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mproc\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtask\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minput\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtask\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 67\u001b[0m \u001b[38;5;66;03m# if successful, end\u001b[39;00m\n\u001b[1;32m 68\u001b[0m \u001b[38;5;28;01mbreak\u001b[39;00m\n",
|
489 |
+
"File \u001b[0;32m~/miniconda/lib/python3.9/site-packages/langchain_core/runnables/base.py:2495\u001b[0m, in \u001b[0;36mRunnableSequence.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 2493\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m step\u001b[38;5;241m.\u001b[39minvoke(\u001b[38;5;28minput\u001b[39m, config, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 2494\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 2495\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mstep\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2496\u001b[0m \u001b[38;5;66;03m# finish the root run\u001b[39;00m\n\u001b[1;32m 2497\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n",
|
490 |
+
"File \u001b[0;32m~/miniconda/lib/python3.9/site-packages/langgraph/utils.py:86\u001b[0m, in \u001b[0;36mRunnableCallable.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 84\u001b[0m kwargs \u001b[38;5;241m=\u001b[39m {\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mkwargs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs}\n\u001b[1;32m 85\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtrace:\n\u001b[0;32m---> 86\u001b[0m ret \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_with_config\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 87\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmerge_configs\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[1;32m 88\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 89\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 90\u001b[0m config \u001b[38;5;241m=\u001b[39m merge_configs(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig, config)\n",
|
491 |
+
"File \u001b[0;32m~/miniconda/lib/python3.9/site-packages/langchain_core/runnables/base.py:1596\u001b[0m, in \u001b[0;36mRunnable._call_with_config\u001b[0;34m(self, func, input, config, run_type, **kwargs)\u001b[0m\n\u001b[1;32m 1592\u001b[0m context \u001b[38;5;241m=\u001b[39m copy_context()\n\u001b[1;32m 1593\u001b[0m context\u001b[38;5;241m.\u001b[39mrun(_set_config_context, child_config)\n\u001b[1;32m 1594\u001b[0m output \u001b[38;5;241m=\u001b[39m cast(\n\u001b[1;32m 1595\u001b[0m Output,\n\u001b[0;32m-> 1596\u001b[0m \u001b[43mcontext\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1597\u001b[0m \u001b[43m \u001b[49m\u001b[43mcall_func_with_variable_args\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore[arg-type]\u001b[39;49;00m\n\u001b[1;32m 1598\u001b[0m \u001b[43m \u001b[49m\u001b[43mfunc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore[arg-type]\u001b[39;49;00m\n\u001b[1;32m 1599\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore[arg-type]\u001b[39;49;00m\n\u001b[1;32m 1600\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1601\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1602\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1603\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m,\n\u001b[1;32m 1604\u001b[0m )\n\u001b[1;32m 1605\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 1606\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n",
|
492 |
+
"File \u001b[0;32m~/miniconda/lib/python3.9/site-packages/langchain_core/runnables/config.py:380\u001b[0m, in \u001b[0;36mcall_func_with_variable_args\u001b[0;34m(func, input, config, run_manager, **kwargs)\u001b[0m\n\u001b[1;32m 378\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m run_manager \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m accepts_run_manager(func):\n\u001b[1;32m 379\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrun_manager\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m run_manager\n\u001b[0;32m--> 380\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
|
493 |
+
"File \u001b[0;32m~/miniconda/lib/python3.9/site-packages/langgraph/pregel/write.py:97\u001b[0m, in \u001b[0;36mChannelWrite._write\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 93\u001b[0m \u001b[38;5;66;03m# process entries into values\u001b[39;00m\n\u001b[1;32m 94\u001b[0m values \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 95\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m write\u001b[38;5;241m.\u001b[39mvalue \u001b[38;5;129;01mis\u001b[39;00m PASSTHROUGH \u001b[38;5;28;01melse\u001b[39;00m write\u001b[38;5;241m.\u001b[39mvalue \u001b[38;5;28;01mfor\u001b[39;00m write \u001b[38;5;129;01min\u001b[39;00m entries\n\u001b[1;32m 96\u001b[0m ]\n\u001b[0;32m---> 97\u001b[0m values \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 98\u001b[0m val \u001b[38;5;28;01mif\u001b[39;00m write\u001b[38;5;241m.\u001b[39mmapper \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m write\u001b[38;5;241m.\u001b[39mmapper\u001b[38;5;241m.\u001b[39minvoke(val, config)\n\u001b[1;32m 99\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m val, write \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mzip\u001b[39m(values, entries)\n\u001b[1;32m 100\u001b[0m ]\n\u001b[1;32m 101\u001b[0m values \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 102\u001b[0m (write\u001b[38;5;241m.\u001b[39mchannel, val)\n\u001b[1;32m 103\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m val, write \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mzip\u001b[39m(values, entries)\n\u001b[1;32m 104\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m write\u001b[38;5;241m.\u001b[39mskip_none \u001b[38;5;129;01mor\u001b[39;00m val \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 105\u001b[0m ]\n\u001b[1;32m 106\u001b[0m \u001b[38;5;66;03m# write packets and values\u001b[39;00m\n",
|
494 |
+
"File \u001b[0;32m~/miniconda/lib/python3.9/site-packages/langgraph/pregel/write.py:98\u001b[0m, in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 93\u001b[0m \u001b[38;5;66;03m# process entries into values\u001b[39;00m\n\u001b[1;32m 94\u001b[0m values \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 95\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m write\u001b[38;5;241m.\u001b[39mvalue \u001b[38;5;129;01mis\u001b[39;00m PASSTHROUGH \u001b[38;5;28;01melse\u001b[39;00m write\u001b[38;5;241m.\u001b[39mvalue \u001b[38;5;28;01mfor\u001b[39;00m write \u001b[38;5;129;01min\u001b[39;00m entries\n\u001b[1;32m 96\u001b[0m ]\n\u001b[1;32m 97\u001b[0m values \u001b[38;5;241m=\u001b[39m [\n\u001b[0;32m---> 98\u001b[0m val \u001b[38;5;28;01mif\u001b[39;00m write\u001b[38;5;241m.\u001b[39mmapper \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[43mwrite\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmapper\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[43mval\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 99\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m val, write \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mzip\u001b[39m(values, entries)\n\u001b[1;32m 100\u001b[0m ]\n\u001b[1;32m 101\u001b[0m values \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 102\u001b[0m (write\u001b[38;5;241m.\u001b[39mchannel, val)\n\u001b[1;32m 103\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m val, write \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mzip\u001b[39m(values, entries)\n\u001b[1;32m 104\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m write\u001b[38;5;241m.\u001b[39mskip_none \u001b[38;5;129;01mor\u001b[39;00m val \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 105\u001b[0m ]\n\u001b[1;32m 106\u001b[0m \u001b[38;5;66;03m# write packets and values\u001b[39;00m\n",
|
495 |
+
"File \u001b[0;32m~/miniconda/lib/python3.9/site-packages/langgraph/utils.py:95\u001b[0m, in \u001b[0;36mRunnableCallable.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 93\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m accepts_config(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfunc):\n\u001b[1;32m 94\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mconfig\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m config\n\u001b[0;32m---> 95\u001b[0m ret \u001b[38;5;241m=\u001b[39m \u001b[43mcontext\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 96\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(ret, Runnable) \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mrecurse:\n\u001b[1;32m 97\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m ret\u001b[38;5;241m.\u001b[39minvoke(\u001b[38;5;28minput\u001b[39m, config)\n",
|
496 |
+
"File \u001b[0;32m~/miniconda/lib/python3.9/site-packages/langgraph/graph/state.py:300\u001b[0m, in \u001b[0;36mCompiledStateGraph.attach_node.<locals>._get_state_key\u001b[0;34m(input, config, key)\u001b[0m\n\u001b[1;32m 298\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m value \u001b[38;5;28;01mif\u001b[39;00m value \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m SKIP_WRITE\n\u001b[1;32m 299\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 300\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m InvalidUpdateError(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mExpected dict, got \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28minput\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n",
|
497 |
+
"\u001b[0;31mInvalidUpdateError\u001b[0m: Expected dict, got Multiplier (since they provided the result of the multiplication)"
|
498 |
+
]
|
499 |
+
}
|
500 |
+
],
|
501 |
+
"source": [
|
502 |
+
"for s in graph.stream(\n",
|
503 |
+
" {\n",
|
504 |
+
" \"messages\": [\n",
|
505 |
+
" HumanMessage(content=\"What is 4 multiplied by 35\")\n",
|
506 |
+
" ]\n",
|
507 |
+
" }\n",
|
508 |
+
"):\n",
|
509 |
+
" if \"__end__\" not in s:\n",
|
510 |
+
" print(s)\n",
|
511 |
+
" print(\"----\")"
|
512 |
+
]
|
513 |
+
},
|
514 |
+
{
|
515 |
+
"cell_type": "code",
|
516 |
+
"execution_count": null,
|
517 |
+
"id": "6ff7b4d7-189a-4c31-9366-c42e3a0a7d47",
|
518 |
+
"metadata": {},
|
519 |
+
"outputs": [],
|
520 |
+
"source": []
|
521 |
+
},
|
522 |
+
{
|
523 |
+
"cell_type": "code",
|
524 |
+
"execution_count": null,
|
525 |
+
"id": "2d14d9f3-bcc8-476a-b5f9-34987183636c",
|
526 |
+
"metadata": {},
|
527 |
+
"outputs": [],
|
528 |
+
"source": []
|
529 |
+
}
|
530 |
+
],
|
531 |
+
"metadata": {
|
532 |
+
"kernelspec": {
|
533 |
+
"display_name": "Python 3 (ipykernel)",
|
534 |
+
"language": "python",
|
535 |
+
"name": "python3"
|
536 |
+
},
|
537 |
+
"language_info": {
|
538 |
+
"codemirror_mode": {
|
539 |
+
"name": "ipython",
|
540 |
+
"version": 3
|
541 |
+
},
|
542 |
+
"file_extension": ".py",
|
543 |
+
"mimetype": "text/x-python",
|
544 |
+
"name": "python",
|
545 |
+
"nbconvert_exporter": "python",
|
546 |
+
"pygments_lexer": "ipython3",
|
547 |
+
"version": "3.9.5"
|
548 |
+
}
|
549 |
+
},
|
550 |
+
"nbformat": 4,
|
551 |
+
"nbformat_minor": 5
|
552 |
+
}
|
perm/CustomLLMMistral.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.llms.base import LLM
|
2 |
+
from langchain.callbacks.manager import CallbackManagerForLLMRun
|
3 |
+
from typing import Optional, List, Mapping, Any
|
4 |
+
|
5 |
+
import warnings
|
6 |
+
import torch
|
7 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
8 |
+
from transformers.models.mistral.modeling_mistral import MistralForCausalLM
|
9 |
+
from transformers.models.llama.tokenization_llama_fast import LlamaTokenizerFast
|
10 |
+
from pydantic import Field
|
11 |
+
|
12 |
+
class CustomLLMMistral(LLM):
|
13 |
+
model: MistralForCausalLM = Field(...)
|
14 |
+
tokenizer: LlamaTokenizerFast = Field(...)
|
15 |
+
|
16 |
+
def __init__(self):
|
17 |
+
|
18 |
+
model_name = "mistralai/Mistral-7B-Instruct-v0.3"
|
19 |
+
|
20 |
+
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
|
21 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, quantization_config=quantization_config, device_map="auto")
|
22 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
23 |
+
super().__init__(model=model, tokenizer=tokenizer)
|
24 |
+
self.model = model
|
25 |
+
self.tokenizer = tokenizer
|
26 |
+
|
27 |
+
|
28 |
+
@property
|
29 |
+
def _llm_type(self) -> str:
|
30 |
+
return "custom"
|
31 |
+
|
32 |
+
def _call(self, prompt: str, stop: Optional[List[str]] = None,
|
33 |
+
run_manager: Optional[CallbackManagerForLLMRun] = None) -> str:
|
34 |
+
|
35 |
+
messages = [
|
36 |
+
{"role": "user", "content": prompt},
|
37 |
+
]
|
38 |
+
|
39 |
+
encodeds = self.tokenizer.apply_chat_template(messages, return_tensors="pt")
|
40 |
+
model_inputs = encodeds.to(self.model.device)
|
41 |
+
|
42 |
+
generated_ids = self.model.generate(model_inputs, max_new_tokens=512, do_sample=True, pad_token_id=self.tokenizer.eos_token_id, top_k=4, temperature=0.7)
|
43 |
+
decoded = self.tokenizer.batch_decode(generated_ids)
|
44 |
+
|
45 |
+
output = decoded[0].split("[/INST]")[1].replace("</s>", "").strip()
|
46 |
+
|
47 |
+
if stop is not None:
|
48 |
+
for word in stop:
|
49 |
+
output = output.split(word)[0].strip()
|
50 |
+
|
51 |
+
while not output.endswith("```"):
|
52 |
+
output += "`"
|
53 |
+
|
54 |
+
return output
|
55 |
+
|
56 |
+
@property
|
57 |
+
def _identifying_params(self) -> Mapping[str, Any]:
|
58 |
+
return {"model": self.model}
|
perm/HuggingFaceAI.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_huggingface import HuggingFacePipeline, ChatHuggingFace
|
2 |
+
from langchain_core.messages import BaseMessage
|
3 |
+
from typing import List
|
4 |
+
|
5 |
+
class HuggingFaceAI(ChatHuggingFace):
|
6 |
+
|
7 |
+
def _to_chat_prompt(
|
8 |
+
self,
|
9 |
+
messages: List[BaseMessage],
|
10 |
+
) -> str:
|
11 |
+
"""Convert a list of messages into a prompt format expected by wrapped LLM."""
|
12 |
+
if not messages:
|
13 |
+
raise ValueError("At least one HumanMessage must be provided!")
|
14 |
+
|
15 |
+
if not isinstance(messages[-1], HumanMessage) and not isinstance(messages[-1], SystemMessage) :
|
16 |
+
raise ValueError("Last message must be a HumanMessage or SystemMessage!!!")
|
17 |
+
|
18 |
+
messages_dicts = [self._to_chatml_format(m) for m in messages]
|
19 |
+
|
20 |
+
return self.tokenizer.apply_chat_template(
|
21 |
+
messages_dicts, tokenize=False, add_generation_prompt=True
|
22 |
+
)
|
perm/__pycache__/CustomLLMMistral.cpython-39.pyc
ADDED
Binary file (2.53 kB). View file
|
|
perm/__pycache__/HuggingFaceAI.cpython-39.pyc
ADDED
Binary file (1.24 kB). View file
|
|
perm/__pycache__/agent_system.cpython-39.pyc
ADDED
Binary file (1.93 kB). View file
|
|
perm/agent_system.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langgraph.graph import StateGraph, END
|
2 |
+
from agents.agent_state import AgentState
|
3 |
+
|
4 |
+
|
5 |
+
class AgentSystem:
|
6 |
+
def __init__(self, nodes, members):
|
7 |
+
self.workflow = self.setup_nodes(nodes)
|
8 |
+
self.setup_graph(members)
|
9 |
+
self.members = members
|
10 |
+
|
11 |
+
|
12 |
+
def get_workflow(self, ):
|
13 |
+
return workflow
|
14 |
+
|
15 |
+
def compile(self):
|
16 |
+
return self.workflow.compile()
|
17 |
+
|
18 |
+
def setup_nodes(self, nodes):
|
19 |
+
workflow = StateGraph(AgentState)
|
20 |
+
|
21 |
+
for node in nodes:
|
22 |
+
workflow.add_node(node["name"], node["instance"])
|
23 |
+
|
24 |
+
|
25 |
+
return workflow
|
26 |
+
|
27 |
+
def check_step(self, x):
|
28 |
+
o = x["next"]
|
29 |
+
#print(f"Check for {o}")
|
30 |
+
#print(self.members)
|
31 |
+
|
32 |
+
for member in self.members:
|
33 |
+
if member in o:
|
34 |
+
#print(f"Found member: {member} in target_string")
|
35 |
+
return member
|
36 |
+
#else:
|
37 |
+
#print(f"Member: {member} not found in target_string")
|
38 |
+
|
39 |
+
#print("nothing found, Finish")
|
40 |
+
return "FINISH"
|
41 |
+
|
42 |
+
|
43 |
+
def setup_graph(self, members):
|
44 |
+
for member in members:
|
45 |
+
# We want our workers to ALWAYS "report back" to the supervisor when done
|
46 |
+
print(f"Add return path to supervisor for {member}")
|
47 |
+
self.workflow.add_edge(member, "supervisor")
|
48 |
+
|
49 |
+
# The supervisor populates the "next" field in the graph state
|
50 |
+
# which routes to a node or finishes
|
51 |
+
conditional_map = {k: k for k in members}
|
52 |
+
conditional_map["FINISH"] = END
|
53 |
+
self.workflow.add_conditional_edges("supervisor", lambda x : self.check_step( x ), conditional_map)
|
54 |
+
# Finally, add entrypoint
|
55 |
+
self.workflow.set_entry_point("supervisor")
|
perm/agents/.ipynb_checkpoints/agent_node-checkpoint.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.messages import BaseMessage, HumanMessage
|
2 |
+
|
3 |
+
def agent_node(state, agent, name):
|
4 |
+
result = agent.invoke(state)
|
5 |
+
return {"messages": [HumanMessage(content=result["output"], name=name)]}
|
6 |
+
|
7 |
+
|
perm/agents/.ipynb_checkpoints/agent_state-checkpoint.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Annotated, Any, Dict, List, Optional, Sequence, TypedDict
|
2 |
+
from langchain_core.messages import BaseMessage
|
3 |
+
import operator
|
4 |
+
|
5 |
+
# The agent state is the input to each node in the graph
|
6 |
+
class AgentState(TypedDict):
|
7 |
+
# The annotation tells the graph that new messages will always
|
8 |
+
# be added to the current states
|
9 |
+
messages: Annotated[Sequence[BaseMessage], operator.add]
|
10 |
+
# The 'next' field indicates where to route to next
|
11 |
+
next: str
|
perm/agents/.ipynb_checkpoints/agent_support-checkpoint.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.agents import AgentExecutor, create_openai_tools_agent
|
2 |
+
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage, AIMessage
|
3 |
+
from langchain_openai import ChatOpenAI
|
4 |
+
|
5 |
+
from langchain_huggingface import ChatHuggingFace
|
6 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
7 |
+
|
8 |
+
def create_agent(llm: ChatHuggingFace, tools: list, system_prompt: str):
|
9 |
+
# Each worker node will be given a name and some tools.
|
10 |
+
prompt = ChatPromptTemplate.from_messages(
|
11 |
+
[
|
12 |
+
(
|
13 |
+
"human",
|
14 |
+
system_prompt,
|
15 |
+
),
|
16 |
+
("assistant", "ok" ),
|
17 |
+
MessagesPlaceholder(variable_name="messages"),
|
18 |
+
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
19 |
+
]
|
20 |
+
)
|
21 |
+
agent = create_openai_tools_agent(llm, tools, prompt)
|
22 |
+
executor = AgentExecutor(agent=agent, tools=tools)
|
23 |
+
return executor
|
perm/agents/.ipynb_checkpoints/help_agent-checkpoint.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.agents import AgentExecutor, Agent, create_openai_tools_agent
|
2 |
+
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, MessagesPlaceholder
|
3 |
+
from langchain_core.tools import tool
|
4 |
+
from langchain_openai import ChatOpenAI
|
5 |
+
|
6 |
+
@tool
|
7 |
+
def multiply(a: int, b: int) -> int:
|
8 |
+
"""Multiply two numbers."""
|
9 |
+
print("Multiply used")
|
10 |
+
return a * b
|
11 |
+
|
12 |
+
class HelpAgent(AgentExecutor):
|
13 |
+
|
14 |
+
def __init__(self, llm, system_prompt):
|
15 |
+
prompt = ChatPromptTemplate.from_messages(
|
16 |
+
[
|
17 |
+
(
|
18 |
+
"human",
|
19 |
+
system_prompt,
|
20 |
+
),
|
21 |
+
("assistant", "ok" ),
|
22 |
+
MessagesPlaceholder(variable_name="messages"),
|
23 |
+
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
24 |
+
]
|
25 |
+
)
|
26 |
+
#agent = prompt | llm
|
27 |
+
agent = create_openai_tools_agent(llm, [multiply], prompt)
|
28 |
+
super().__init__(agent=agent, tools=[multiply])
|
29 |
+
|
30 |
+
|
perm/agents/.ipynb_checkpoints/project_agent-checkpoint.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.agents import AgentExecutor, Agent, create_openai_tools_agent
|
2 |
+
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, MessagesPlaceholder
|
3 |
+
from langchain_core.tools import tool
|
4 |
+
from langchain_openai import ChatOpenAI
|
5 |
+
#from tools.robot_information import robot_information
|
6 |
+
from langchain.agents import create_tool_calling_agent
|
7 |
+
from langchain.agents import AgentExecutor, ZeroShotAgent
|
8 |
+
from langchain_core.tools import tool
|
9 |
+
import json
|
10 |
+
|
11 |
+
@tool
|
12 |
+
def robot_information(project_name: str) -> str:
|
13 |
+
"""Retrieves count and detailed information about the robots in the named project in real-time in JSON format"""
|
14 |
+
print("retrieved robot")
|
15 |
+
data = {
|
16 |
+
"count": 2,
|
17 |
+
"information": [
|
18 |
+
{"name": "Robot1", "battery": 90, "type": "heavy"},
|
19 |
+
{"name": "Robot2", "battery": 34, "type": "medium"}
|
20 |
+
]
|
21 |
+
}
|
22 |
+
return json.dumps(data)
|
23 |
+
class ProjectAgent(AgentExecutor):
|
24 |
+
|
25 |
+
def __init__(self, llm, system_prompt):
|
26 |
+
prompt = ChatPromptTemplate.from_messages(
|
27 |
+
[
|
28 |
+
|
29 |
+
MessagesPlaceholder(variable_name="messages"),
|
30 |
+
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
31 |
+
]
|
32 |
+
)
|
33 |
+
#agent = prompt | llm
|
34 |
+
tools = [robot_information]
|
35 |
+
#llm_with_tools = llm.bind_tools(tools)
|
36 |
+
# agent = create_openai_tools_agent(llm, [robot_information], prompt)
|
37 |
+
agent = create_tool_calling_agent(llm, tools, prompt)
|
38 |
+
|
39 |
+
#agent = prompt | llm_with_tools
|
40 |
+
super().__init__(agent=agent, tools=[robot_information], verbose=True)
|
41 |
+
|
perm/agents/.ipynb_checkpoints/supervisor-checkpoint.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.output_parsers import StrOutputParser
|
2 |
+
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, MessagesPlaceholder
|
3 |
+
from langchain_core.runnables import RunnableLambda, RunnableSequence
|
4 |
+
from langchain_core.messages import HumanMessage
|
5 |
+
|
6 |
+
|
7 |
+
def strip_prompt(info):
|
8 |
+
print(info)
|
9 |
+
eot_token = "[/INST] "
|
10 |
+
i = info.content.rfind(eot_token)
|
11 |
+
if i == -1:
|
12 |
+
return info
|
13 |
+
|
14 |
+
info.content = info.content[i + len(eot_token):]
|
15 |
+
|
16 |
+
return {"next": info.content}
|
17 |
+
|
18 |
+
|
19 |
+
class Supervisor():
|
20 |
+
|
21 |
+
members = []
|
22 |
+
|
23 |
+
system_prompt = (
|
24 |
+
"You are a supervisor tasked with managing a conversation between the"
|
25 |
+
" following workers: {members}. Given the following user request,"
|
26 |
+
" respond with the worker to act next. Each worker will perform a"
|
27 |
+
" task and respond with their results and status. When finished,"
|
28 |
+
" respond with FINISH."
|
29 |
+
)
|
30 |
+
|
31 |
+
def __init__(self, llm, members):
|
32 |
+
self.members += members
|
33 |
+
self.prompt = ChatPromptTemplate.from_messages(
|
34 |
+
[
|
35 |
+
("human", self.system_prompt),
|
36 |
+
("assistant", "ok"),
|
37 |
+
MessagesPlaceholder(variable_name="messages"),
|
38 |
+
("assistant", "ok"),
|
39 |
+
(
|
40 |
+
"human",
|
41 |
+
"Given the conversation above, who should act next?"
|
42 |
+
" Or should we FINISH? Select one of: {options}",
|
43 |
+
),
|
44 |
+
]
|
45 |
+
).partial(options=str(self.get_options()), members=", ".join(self.members))
|
46 |
+
|
47 |
+
self.chain = ( self.prompt | llm | RunnableLambda(strip_prompt))
|
48 |
+
|
49 |
+
def add_member(self, member):
|
50 |
+
self.members.append(member)
|
51 |
+
|
52 |
+
def get_members(self):
|
53 |
+
return self.members;
|
54 |
+
|
55 |
+
def get_options(self):
|
56 |
+
return ["FINISH"] + self.members
|
57 |
+
|
58 |
+
def get_chain(self):
|
59 |
+
return self.chain
|
60 |
+
|
61 |
+
|
62 |
+
def invoke(self, query):
|
63 |
+
self.chain.invoke([HumanMessage(query)])
|
64 |
+
|
65 |
+
|
perm/agents/.~agent_node.py
ADDED
File without changes
|
perm/agents/.~agent_state.py
ADDED
File without changes
|
perm/agents/.~agent_support.py
ADDED
File without changes
|
perm/agents/.~help_agent.py
ADDED
File without changes
|
perm/agents/__pycache__/agent_node.cpython-39.pyc
ADDED
Binary file (403 Bytes). View file
|
|
perm/agents/__pycache__/agent_state.cpython-39.pyc
ADDED
Binary file (595 Bytes). View file
|
|
perm/agents/__pycache__/agent_support.cpython-39.pyc
ADDED
Binary file (930 Bytes). View file
|
|
perm/agents/__pycache__/help_agent.cpython-39.pyc
ADDED
Binary file (1.2 kB). View file
|
|
perm/agents/__pycache__/project_agent.cpython-39.pyc
ADDED
Binary file (1.56 kB). View file
|
|
perm/agents/__pycache__/supervisor.cpython-39.pyc
ADDED
Binary file (2.4 kB). View file
|
|
perm/agents/agent_node.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.messages import BaseMessage, HumanMessage
|
2 |
+
|
3 |
+
def agent_node(state, agent, name):
|
4 |
+
result = agent.invoke(state)
|
5 |
+
return {"messages": [HumanMessage(content=result["output"], name=name)]}
|
6 |
+
|
7 |
+
|
perm/agents/agent_state.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Annotated, Any, Dict, List, Optional, Sequence, TypedDict
|
2 |
+
from langchain_core.messages import BaseMessage
|
3 |
+
import operator
|
4 |
+
|
5 |
+
# The agent state is the input to each node in the graph
|
6 |
+
class AgentState(TypedDict):
|
7 |
+
# The annotation tells the graph that new messages will always
|
8 |
+
# be added to the current states
|
9 |
+
messages: Annotated[Sequence[BaseMessage], operator.add]
|
10 |
+
# The 'next' field indicates where to route to next
|
11 |
+
next: str
|
perm/agents/agent_support.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.agents import AgentExecutor, create_openai_tools_agent
|
2 |
+
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage, AIMessage
|
3 |
+
from langchain_openai import ChatOpenAI
|
4 |
+
|
5 |
+
from langchain_huggingface import ChatHuggingFace
|
6 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
7 |
+
|
8 |
+
def create_agent(llm: ChatHuggingFace, tools: list, system_prompt: str):
|
9 |
+
# Each worker node will be given a name and some tools.
|
10 |
+
prompt = ChatPromptTemplate.from_messages(
|
11 |
+
[
|
12 |
+
(
|
13 |
+
"human",
|
14 |
+
system_prompt,
|
15 |
+
),
|
16 |
+
("assistant", "ok" ),
|
17 |
+
MessagesPlaceholder(variable_name="messages"),
|
18 |
+
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
19 |
+
]
|
20 |
+
)
|
21 |
+
agent = create_openai_tools_agent(llm, tools, prompt)
|
22 |
+
executor = AgentExecutor(agent=agent, tools=tools)
|
23 |
+
return executor
|
perm/agents/help_agent.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.agents import AgentExecutor, Agent, create_openai_tools_agent
|
2 |
+
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, MessagesPlaceholder
|
3 |
+
from langchain_core.tools import tool
|
4 |
+
from langchain_openai import ChatOpenAI
|
5 |
+
|
6 |
+
@tool
|
7 |
+
def multiply(a: int, b: int) -> int:
|
8 |
+
"""Multiply two numbers."""
|
9 |
+
print("Multiply used")
|
10 |
+
return a * b
|
11 |
+
|
12 |
+
class HelpAgent(AgentExecutor):
|
13 |
+
|
14 |
+
def __init__(self, llm, system_prompt):
|
15 |
+
prompt = ChatPromptTemplate.from_messages(
|
16 |
+
[
|
17 |
+
(
|
18 |
+
"human",
|
19 |
+
system_prompt,
|
20 |
+
),
|
21 |
+
("assistant", "ok" ),
|
22 |
+
MessagesPlaceholder(variable_name="messages"),
|
23 |
+
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
24 |
+
]
|
25 |
+
)
|
26 |
+
#agent = prompt | llm
|
27 |
+
agent = create_openai_tools_agent(llm, [multiply], prompt)
|
28 |
+
super().__init__(agent=agent, tools=[multiply])
|
29 |
+
|
30 |
+
|
perm/agents/project_agent.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.agents import AgentExecutor, Agent, create_openai_tools_agent
|
2 |
+
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, MessagesPlaceholder
|
3 |
+
from langchain_core.tools import tool
|
4 |
+
from langchain_openai import ChatOpenAI
|
5 |
+
#from tools.robot_information import robot_information
|
6 |
+
from langchain.agents import create_tool_calling_agent
|
7 |
+
from langchain.agents import AgentExecutor, ZeroShotAgent
|
8 |
+
from langchain_core.tools import tool
|
9 |
+
import json
|
10 |
+
|
11 |
+
@tool
|
12 |
+
def robot_information(project_name: str) -> str:
|
13 |
+
"""Retrieves count and detailed information about the robots in the named project in real-time in JSON format"""
|
14 |
+
print("retrieved robot")
|
15 |
+
data = {
|
16 |
+
"count": 2,
|
17 |
+
"information": [
|
18 |
+
{"name": "Robot1", "battery": 90, "type": "heavy"},
|
19 |
+
{"name": "Robot2", "battery": 34, "type": "medium"}
|
20 |
+
]
|
21 |
+
}
|
22 |
+
return json.dumps(data)
|
23 |
+
class ProjectAgent(AgentExecutor):
|
24 |
+
|
25 |
+
def __init__(self, llm, system_prompt):
|
26 |
+
prompt = ChatPromptTemplate.from_messages(
|
27 |
+
[
|
28 |
+
|
29 |
+
MessagesPlaceholder(variable_name="messages"),
|
30 |
+
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
31 |
+
]
|
32 |
+
)
|
33 |
+
#agent = prompt | llm
|
34 |
+
tools = [robot_information]
|
35 |
+
#llm_with_tools = llm.bind_tools(tools)
|
36 |
+
# agent = create_openai_tools_agent(llm, [robot_information], prompt)
|
37 |
+
agent = create_tool_calling_agent(llm, tools, prompt)
|
38 |
+
|
39 |
+
#agent = prompt | llm_with_tools
|
40 |
+
super().__init__(agent=agent, tools=[robot_information], verbose=True)
|
41 |
+
|
perm/agents/supervisor.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.output_parsers import StrOutputParser
|
2 |
+
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, MessagesPlaceholder
|
3 |
+
from langchain_core.runnables import RunnableLambda, RunnableSequence
|
4 |
+
from langchain_core.messages import HumanMessage
|
5 |
+
|
6 |
+
|
7 |
+
def strip_prompt(info):
|
8 |
+
print(info)
|
9 |
+
eot_token = "[/INST] "
|
10 |
+
i = info.content.rfind(eot_token)
|
11 |
+
if i == -1:
|
12 |
+
return info
|
13 |
+
|
14 |
+
info.content = info.content[i + len(eot_token):]
|
15 |
+
|
16 |
+
return {"next": info.content}
|
17 |
+
|
18 |
+
|
19 |
+
class Supervisor():
|
20 |
+
|
21 |
+
members = []
|
22 |
+
|
23 |
+
system_prompt = (
|
24 |
+
"You are a supervisor tasked with managing a conversation between the"
|
25 |
+
" following workers: {members}. Given the following user request,"
|
26 |
+
" respond with the worker to act next. Each worker will perform a"
|
27 |
+
" task and respond with their results and status. When finished,"
|
28 |
+
" respond with FINISH."
|
29 |
+
)
|
30 |
+
|
31 |
+
def __init__(self, llm, members):
|
32 |
+
self.members += members
|
33 |
+
self.prompt = ChatPromptTemplate.from_messages(
|
34 |
+
[
|
35 |
+
("human", self.system_prompt),
|
36 |
+
("assistant", "ok"),
|
37 |
+
MessagesPlaceholder(variable_name="messages"),
|
38 |
+
("assistant", "ok"),
|
39 |
+
(
|
40 |
+
"human",
|
41 |
+
"Given the conversation above, who should act next?"
|
42 |
+
" Or should we FINISH? Select one of: {options}",
|
43 |
+
),
|
44 |
+
]
|
45 |
+
).partial(options=str(self.get_options()), members=", ".join(self.members))
|
46 |
+
|
47 |
+
self.chain = ( self.prompt | llm | RunnableLambda(strip_prompt))
|
48 |
+
|
49 |
+
def add_member(self, member):
|
50 |
+
self.members.append(member)
|
51 |
+
|
52 |
+
def get_members(self):
|
53 |
+
return self.members;
|
54 |
+
|
55 |
+
def get_options(self):
|
56 |
+
return ["FINISH"] + self.members
|
57 |
+
|
58 |
+
def get_chain(self):
|
59 |
+
return self.chain
|
60 |
+
|
61 |
+
|
62 |
+
def invoke(self, query):
|
63 |
+
self.chain.invoke([HumanMessage(query)])
|
64 |
+
|
65 |
+
|
perm/app.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import HuggingFaceAI
|
2 |
+
from langchain_huggingface import HuggingFacePipeline, ChatHuggingFace
|
3 |
+
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
|
4 |
+
|
5 |
+
from agents.supervisor import Supervisor
|
6 |
+
from agents.agent_support import create_agent
|
7 |
+
from agents.agent_node import agent_node
|
8 |
+
|
9 |
+
from agents.help_agent import HelpAgent
|
10 |
+
from agents.project_agent import ProjectAgent
|
11 |
+
|
12 |
+
from agent_system import AgentSystem
|
13 |
+
|
14 |
+
from tools.multiply_tool import multiply
|
15 |
+
import functools
|
16 |
+
|
17 |
+
import os
|
18 |
+
from uuid import uuid4
|
19 |
+
|
20 |
+
unique_id = uuid4().hex[0:8]
|
21 |
+
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
22 |
+
os.environ["LANGCHAIN_PROJECT"] = f"InfiniFleetTrace"
|
23 |
+
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
|
24 |
+
os.environ["LANGCHAIN_API_KEY"] = "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" # Update to your API key
|
25 |
+
|
26 |
+
|
27 |
+
llm = HuggingFacePipeline.from_model_id(
|
28 |
+
model_id="mistralai/Mistral-7B-Instruct-v0.3",
|
29 |
+
device_map="auto",
|
30 |
+
task="text-generation",
|
31 |
+
pipeline_kwargs={
|
32 |
+
"max_new_tokens": 100,
|
33 |
+
"top_k": 50,
|
34 |
+
},
|
35 |
+
)
|
36 |
+
|
37 |
+
print("Creating chat interface")
|
38 |
+
chat_llm = ChatHuggingFace(llm=llm)
|
39 |
+
print("Done")
|
40 |
+
print("---------")
|
41 |
+
|
42 |
+
|
43 |
+
supervisor = Supervisor(chat_llm, ["ProductHelp","ProjectHelp", "Multiplier"])
|
44 |
+
|
45 |
+
|
46 |
+
help_agent = HelpAgent(chat_llm, "You provide help for the InfiniFleet product in general")
|
47 |
+
help_node = functools.partial(agent_node, agent=help_agent, name="ProductHelp")
|
48 |
+
|
49 |
+
project_agent = ProjectAgent(chat_llm, "Always use robot_information tool to get all required information.")
|
50 |
+
project_node = functools.partial(agent_node, agent=project_agent, name="ProjectHelp")
|
51 |
+
|
52 |
+
print("--project agent-------")
|
53 |
+
input_data = "Use the tool to give me information about how many robots there are in the project called 'largeProject'."
|
54 |
+
result = project_agent.invoke({"messages":[HumanMessage(input_data)]})
|
55 |
+
print(result)
|
56 |
+
print("---------")
|
57 |
+
|
58 |
+
|
perm/experiment.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from CustomLLMMistral import CustomLLMMistral
|
2 |
+
from tools.robot_information import robot_information
|
3 |
+
import os
|
4 |
+
|
5 |
+
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
6 |
+
os.environ["LANGCHAIN_PROJECT"] = f"InfiniFleetTrace"
|
7 |
+
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
|
8 |
+
os.environ["LANGCHAIN_API_KEY"] = "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" # Update to your API key
|
9 |
+
|
10 |
+
llm = CustomLLMMistral()
|
11 |
+
|
12 |
+
#info = robot_information.invoke("test")
|
13 |
+
#print(info)
|
14 |
+
|
15 |
+
tools = [ robot_information ]
|
16 |
+
|
17 |
+
system="""
|
18 |
+
You are designed to solve tasks. Each task requires multiple steps that are represented by a markdown code snippet of a json blob.
|
19 |
+
The json structure should contain the following keys:
|
20 |
+
thought -> your thoughts
|
21 |
+
action -> name of a tool
|
22 |
+
action_input -> parameters to send to the tool
|
23 |
+
|
24 |
+
These are the tools you can use: {tool_names}.
|
25 |
+
|
26 |
+
These are the tools descriptions:
|
27 |
+
|
28 |
+
{tools}
|
29 |
+
|
30 |
+
If you have enough information to answer the query use the tool "Final Answer". Its parameters is the solution.
|
31 |
+
If there is not enough information, keep trying.
|
32 |
+
|
33 |
+
"""
|
34 |
+
|
35 |
+
human="""
|
36 |
+
Add the word "STOP" after each markdown snippet. Example:
|
37 |
+
|
38 |
+
```json
|
39 |
+
{{"thought": "<your thoughts>",
|
40 |
+
"action": "<tool name or Final Answer to give a final answer>",
|
41 |
+
"action_input": "<tool parameters or the final output"}}
|
42 |
+
```
|
43 |
+
STOP
|
44 |
+
|
45 |
+
This is my query="{input}". Write only the next step needed to solve it.
|
46 |
+
Your answer should be based in the previous tools executions, even if you think you know the answer.
|
47 |
+
Remember to add STOP after each snippet.
|
48 |
+
|
49 |
+
These were the previous steps given to solve this query and the information you already gathered:
|
50 |
+
"""
|
51 |
+
|
52 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
53 |
+
|
54 |
+
prompt = ChatPromptTemplate.from_messages(
|
55 |
+
[
|
56 |
+
("system", system),
|
57 |
+
MessagesPlaceholder("chat_history", optional=True),
|
58 |
+
("human", human),
|
59 |
+
MessagesPlaceholder("agent_scratchpad"),
|
60 |
+
]
|
61 |
+
)
|
62 |
+
|
63 |
+
from langchain.agents import create_json_chat_agent, AgentExecutor
|
64 |
+
from langchain.memory import ConversationBufferMemory
|
65 |
+
|
66 |
+
agent = create_json_chat_agent(
|
67 |
+
tools = tools,
|
68 |
+
llm = llm,
|
69 |
+
prompt = prompt,
|
70 |
+
stop_sequence = ["STOP"],
|
71 |
+
template_tool_response = "{observation}"
|
72 |
+
)
|
73 |
+
|
74 |
+
memory = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
75 |
+
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True)
|
76 |
+
|
77 |
+
agent_executor.invoke({"input": "Who are you?"})
|
perm/hello.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_huggingface import HuggingFacePipeline
|
2 |
+
print("Hello world")
|
3 |
+
|
perm/scratchpad.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
multiply_agent = create_agent(chat_llm, [multiply], "You multiply numbers")
|
2 |
+
multiply_node = functools.partial(agent_node, agent=multiply_agent, name="Multiplier")
|
3 |
+
|
4 |
+
|
5 |
+
nodes = [
|
6 |
+
{ "name": "supervisor", "instance": supervisor.get_chain() },
|
7 |
+
{ "name": "ProductHelp", "instance": help_node },
|
8 |
+
{ "name": "ProjectHelp", "instance": project_node },
|
9 |
+
{ "name": "Multiplier", "instance": multiply_node }
|
10 |
+
]
|
11 |
+
|
12 |
+
|
13 |
+
agent_system = AgentSystem(nodes, supervisor.get_members())
|
14 |
+
graph = agent_system.compile()
|
15 |
+
|
16 |
+
for s in graph.stream(
|
17 |
+
{
|
18 |
+
"messages": [
|
19 |
+
HumanMessage(content="How many robots are currently set up in the project?")
|
20 |
+
]
|
21 |
+
}
|
22 |
+
):
|
23 |
+
if "__end__" not in s:
|
24 |
+
print(s)
|
25 |
+
print("----")
|
perm/start.sh
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export env LANGCHAIN_TRACING_V2=true
|
2 |
+
export env LANGCHAIN_ENDPOINT="https://api.smith.langchain.com"
|
3 |
+
export env LANGCHAIN_API_KEY=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
|
4 |
+
export env LANGCHAIN_PROJECT=TestProject
|
5 |
+
pip install -U "huggingface_hub[cli]"
|
perm/tasks.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"version": "2.0.0",
|
3 |
+
"tasks": [
|
4 |
+
{
|
5 |
+
"label": "Env & Login",
|
6 |
+
"type": "shell",
|
7 |
+
"command": "bash ./start.sh"
|
8 |
+
}
|
9 |
+
]
|
10 |
+
}
|
perm/tools/.ipynb_checkpoints/multiply_tool-checkpoint.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.tools import tool
|
2 |
+
|
3 |
+
@tool
|
4 |
+
def multiply(a: int, b: int) -> int:
|
5 |
+
"""Multiply two numbers."""
|
6 |
+
print("Multiply used")
|
7 |
+
return a * b
|
perm/tools/.ipynb_checkpoints/robot_information-checkpoint.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.tools import tool
|
2 |
+
import json
|
3 |
+
|
4 |
+
@tool
|
5 |
+
def robot_information(project_name: str) -> str:
|
6 |
+
"""Retrieves detailed information about the robots in the named project in real-time in JSON format"""
|
7 |
+
print("retrieved robot")
|
8 |
+
data = {
|
9 |
+
"information": [
|
10 |
+
{"name": "Robot1", "battery": 90, "type": "heavy"},
|
11 |
+
{"name": "Robot2", "battery": 34, "type": "medium"}
|
12 |
+
]
|
13 |
+
}
|
14 |
+
return json.dumps(data)
|
perm/tools/__pycache__/multiply_tool.cpython-39.pyc
ADDED
Binary file (370 Bytes). View file
|
|
perm/tools/__pycache__/robot_information.cpython-39.pyc
ADDED
Binary file (612 Bytes). View file
|
|
perm/tools/multiply_tool.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.tools import tool
|
2 |
+
|
3 |
+
@tool
|
4 |
+
def multiply(a: int, b: int) -> int:
|
5 |
+
"""Multiply two numbers."""
|
6 |
+
print("Multiply used")
|
7 |
+
return a * b
|
perm/tools/robot_information.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.tools import tool
|
2 |
+
import json
|
3 |
+
|
4 |
+
@tool
|
5 |
+
def robot_information(project_name: str) -> str:
|
6 |
+
"""Retrieves detailed information about the robots in the named project in real-time in JSON format"""
|
7 |
+
print("retrieved robot")
|
8 |
+
data = {
|
9 |
+
"information": [
|
10 |
+
{"name": "Robot1", "battery": 90, "type": "heavy"},
|
11 |
+
{"name": "Robot2", "battery": 34, "type": "medium"}
|
12 |
+
]
|
13 |
+
}
|
14 |
+
return json.dumps(data)
|