acecalisto3 commited on
Commit
ac54317
·
verified ·
1 Parent(s): 45d3f2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +435 -256
app.py CHANGED
@@ -1,262 +1,441 @@
1
  import os
2
- import json
 
 
 
3
  import time
4
- from typing import Dict, List, Tuple
5
 
6
- import gradio as gr
7
- import streamlit as st
8
- from huggingface_hub import InferenceClient
9
- from transformers import AutoModelForCausalLM, AutoTokenizer
10
- from rich import print as rprint
11
- from rich.panel import Panel
12
- from rich.progress import track
13
- from rich.table import Table
14
-
15
- # --- Constants ---
16
- MODEL_NAME = "bigscience/bloom-1b7" # Choose a suitable model
17
- MAX_NEW_TOKENS = 1024
18
- TEMPERATURE = 0.7
19
- TOP_P = 0.95
20
- REPETITION_PENALTY = 1.2
21
-
22
- # --- Model & Tokenizer ---
23
- model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
24
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
25
-
26
- # --- Agents ---
27
- agents = {
28
- "WEB_DEV": {
29
- "description": "Expert in web development technologies and frameworks.",
30
- "skills": ["HTML", "CSS", "JavaScript", "React", "Vue.js", "Flask", "Django", "Node.js", "Express.js"],
31
- "system_prompt": "You are a web development expert. Your goal is to assist the user in building and deploying web applications. Provide code snippets, explanations, and guidance on best practices.",
32
- },
33
- "AI_SYSTEM_PROMPT": {
34
- "description": "Expert in designing and implementing AI systems.",
35
- "skills": ["Machine Learning", "Deep Learning", "Natural Language Processing", "Computer Vision", "Reinforcement Learning"],
36
- "system_prompt": "You are an AI system expert. Your goal is to assist the user in designing and implementing AI systems. Provide code snippets, explanations, and guidance on best practices.",
37
- },
38
- "PYTHON_CODE_DEV": {
39
- "description": "Expert in Python programming and development.",
40
- "skills": ["Python", "Data Structures", "Algorithms", "Object-Oriented Programming", "Functional Programming"],
41
- "system_prompt": "You are a Python code development expert. Your goal is to assist the user in writing and debugging Python code. Provide code snippets, explanations, and guidance on best practices.",
42
- },
43
- "CODE_REVIEW_ASSISTANT": {
44
- "description": "Expert in code review and quality assurance.",
45
- "skills": ["Code Style", "Best Practices", "Security", "Performance", "Maintainability"],
46
- "system_prompt": "You are a code review assistant. Your goal is to assist the user in reviewing code for quality and efficiency. Provide feedback on code style, best practices, security, performance, and maintainability.",
47
- },
48
- "CONTENT_WRITER_EDITOR": {
49
- "description": "Expert in content writing and editing.",
50
- "skills": ["Grammar", "Style", "Clarity", "Conciseness", "SEO"],
51
- "system_prompt": "You are a content writer and editor. Your goal is to assist the user in creating high-quality content. Provide suggestions on grammar, style, clarity, conciseness, and SEO.",
52
- },
53
- "QUESTION_GENERATOR": {
54
- "description": "Expert in generating questions for learning and assessment.",
55
- "skills": ["Question Types", "Cognitive Levels", "Assessment Design"],
56
- "system_prompt": "You are a question generator. Your goal is to assist the user in generating questions for learning and assessment. Provide questions that are relevant to the topic and aligned with the cognitive levels.",
57
- },
58
- "HUGGINGFACE_FILE_DEV": {
59
- "description": "Expert in developing Hugging Face files for machine learning models.",
60
- "skills": ["Transformers", "Datasets", "Model Training", "Model Deployment"],
61
- "system_prompt": "You are a Hugging Face file development expert. Your goal is to assist the user in creating and deploying Hugging Face files for machine learning models. Provide code snippets, explanations, and guidance on best practices.",
62
- },
63
- }
64
-
65
- # --- Session State ---
66
- if "workspace_projects" not in st.session_state:
67
- st.session_state.workspace_projects = {}
68
- if "chat_history" not in st.session_state:
69
- st.session_state.chat_history = []
70
- if "active_agent" not in st.session_state:
71
- st.session_state.active_agent = None
72
- if "selected_agents" not in st.session_state:
73
- st.session_state.selected_agents = []
74
-
75
- # --- Functions ---
76
- def format_prompt(message: str, history: List[Tuple[str, str]], agent_prompt: str) -> str:
77
- """Formats the prompt for the language model."""
78
- prompt = "<s>"
79
- for user_prompt, bot_response in history:
80
- prompt += f"[INST] {user_prompt} [/INST]"
81
- prompt += f" {bot_response}</s> "
82
- prompt += f"[INST] {agent_prompt}, {message} [/INST]"
83
- return prompt
84
-
85
- def generate_response(prompt: str, agent_name: str) -> str:
86
- """Generates a response from the language model."""
87
- agent = agents[agent_name]
88
- system_prompt = agent["system_prompt"]
89
- generate_kwargs = dict(
90
- temperature=TEMPERATURE,
91
- max_new_tokens=MAX_NEW_TOKENS,
92
- top_p=TOP_P,
93
- repetition_penalty=REPETITION_PENALTY,
94
- do_sample=True,
95
- )
96
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
97
- output = model.generate(input_ids, **generate_kwargs)
98
- response = tokenizer.decode(output[0], skip_special_tokens=True)
99
- return response
100
-
101
- def chat_interface(chat_input: str, agent_names: List[str]) -> str:
102
- """Handles chat interactions with the selected agents."""
103
- if agent_names:
104
- responses = []
105
- for agent_name in agent_names:
106
- prompt = format_prompt(chat_input, st.session_state.chat_history, agents[agent_name]["system_prompt"])
107
- response = generate_response(prompt, agent_name)
108
- responses.append(f"{agent_name}: {response}")
109
- return "\n".join(responses)
110
- else:
111
- return "Please select at least one agent."
112
-
113
- def terminal_interface(command: str, project_name: str) -> str:
114
- """Executes a command within the specified project directory."""
115
- try:
116
- result = subprocess.run(command, shell=True, capture_output=True, text=True, cwd=project_name)
117
- return result.stdout if result.returncode == 0 else result.stderr
118
- except Exception as e:
119
- return str(e)
120
-
121
- def add_code_to_workspace(project_name: str, code: str, file_name: str) -> str:
122
- """Adds code to a workspace project."""
123
- project_path = os.path.join(os.getcwd(), project_name)
124
- if not os.path.exists(project_path):
125
- os.makedirs(project_path)
126
- file_path = os.path.join(project_path, file_name)
127
- with open(file_path, 'w') as file:
128
- file.write(code)
129
- if project_name not in st.session_state.workspace_projects:
130
- st.session_state.workspace_projects[project_name] = {'files': []}
131
- st.session_state.workspace_projects[project_name]['files'].append(file_name)
132
- return f"Added {file_name} to {project_name}"
133
-
134
- def display_workspace_projects():
135
- """Displays a table of workspace projects."""
136
- table = Table(title="Workspace Projects")
137
- table.add_column("Project Name", style="cyan", no_wrap=True)
138
- table.add_column("Files", style="magenta")
139
- for project_name, details in st.session_state.workspace_projects.items():
140
- table.add_row(project_name, ", ".join(details['files']))
141
- rprint(Panel(table, title="[bold blue]Workspace Projects[/bold blue]"))
142
-
143
- def display_chat_history():
144
- """Displays the chat history in a formatted way."""
145
- table = Table(title="Chat History")
146
- table.add_column("User", style="cyan", no_wrap=True)
147
- table.add_column("Agent", style="magenta")
148
- for user_prompt, bot_response in st.session_state.chat_history:
149
- table.add_row(user_prompt, bot_response)
150
- rprint(Panel(table, title="[bold blue]Chat History[/bold blue]"))
151
-
152
- def display_agent_info(agent_name: str):
153
- """Displays information about the selected agent."""
154
- agent = agents[agent_name]
155
- table = Table(title=f"{agent_name} - Agent Information")
156
- table.add_column("Description", style="cyan", no_wrap=True)
157
- table.add_column("Skills", style="magenta")
158
- table.add_row(agent["description"], ", ".join(agent["skills"]))
159
- rprint(Panel(table, title=f"[bold blue]{agent_name} - Agent Information[/bold blue]"))
160
-
161
- def run_autonomous_build(agent_names: List[str], project_name: str):
162
- """Runs the autonomous build process."""
163
- for agent_name in agent_names:
164
- agent = agents[agent_name]
165
- chat_history = st.session_state.chat_history
166
- workspace_projects = st.session_state.workspace_projects
167
- summary, next_step = agent.autonomous_build(chat_history, workspace_projects)
168
- rprint(Panel(summary, title="[bold blue]Current State[/bold blue]"))
169
- rprint(Panel(next_step, title="[bold blue]Next Step[/bold blue]"))
170
- # Implement logic for autonomous build based on the current state
171
- # ...
172
-
173
- # --- Streamlit UI ---
174
- st.title("DevToolKit: AI-Powered Development Environment")
175
-
176
- # --- Project Management ---
177
- st.header("Project Management")
178
- project_name = st.text_input("Enter project name:")
179
- if st.button("Create Project"):
180
- if project_name not in st.session_state.workspace_projects:
181
- st.session_state.workspace_projects[project_name] = {'files': []}
182
- st.success(f"Created project: {project_name}")
183
- else:
184
- st.warning(f"Project {project_name} already exists")
185
-
186
- # --- Code Addition ---
187
- st.subheader("Add Code to Workspace")
188
- code_to_add = st.text_area("Enter code to add to workspace:")
189
- file_name = st.text_input("Enter file name (e.g. 'app.py'):")
190
- if st.button("Add Code"):
191
- add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
192
- st.success(add_code_status)
193
-
194
- # --- Terminal Interface ---
195
- st.subheader("Terminal (Workspace Context)")
196
- terminal_input = st.text_input("Enter a command within the workspace:")
197
- if st.button("Run Command"):
198
- terminal_output = terminal_interface(terminal_input, project_name)
199
- st.code(terminal_output, language="bash")
200
-
201
- # --- Chat Interface ---
202
- st.subheader("Chat with AI Agents")
203
- selected_agents = st.multiselect("Select AI agents", list(agents.keys()), key="agent_select")
204
- st.session_state.selected_agents = selected_agents
205
- agent_chat_input = st.text_area("Enter your message for the agents:", key="agent_input")
206
- if st.button("Send to Agents", key="agent_send"):
207
- agent_chat_response = chat_interface(agent_chat_input, selected_agents)
208
- st.write(agent_chat_response)
209
-
210
- # --- Agent Control ---
211
- st.subheader("Agent Control")
212
- for agent_name in agents:
213
- agent = agents[agent_name]
214
- with st.expander(f"{agent_name} ({agent['description']})"):
215
- if st.button(f"Activate {agent_name}", key=f"activate_{agent_name}"):
216
- st.session_state.active_agent = agent_name
217
- st.success(f"{agent_name} activated.")
218
- if st.button(f"Deactivate {agent_name}", key=f"deactivate_{agent_name}"):
219
- st.session_state.active_agent = None
220
- st.success(f"{agent_name} deactivated.")
221
-
222
- # --- Automate Build Process ---
223
- st.subheader("Automate Build Process")
224
- if st.button("Automate"):
225
- if st.session_state.selected_agents:
226
- run_autonomous_build(st.session_state.selected_agents, project_name)
227
- else:
228
- st.warning("Please select at least one agent.")
229
-
230
- # --- Display Information ---
231
- st.sidebar.subheader("Current State")
232
- st.sidebar.json(st.session_state.current_state)
233
- if st.session_state.active_agent:
234
- display_agent_info(st.session_state.active_agent)
235
- display_workspace_projects()
236
- display_chat_history()
237
-
238
- # --- Gradio Interface ---
239
- additional_inputs = [
240
- gr.Dropdown(label="Agents", choices=[s for s in agents.keys()], value=list(agents.keys())[0], interactive=True),
241
- gr.Textbox(label="System Prompt", max_lines=1, interactive=True),
242
- gr.Slider(label="Temperature", value=TEMPERATURE, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
243
- gr.Slider(label="Max new tokens", value=MAX_NEW_TOKENS, minimum=0, maximum=1000*10, step=64, interactive=True, info="The maximum numbers of new tokens"),
244
- gr.Slider(label="Top-p (nucleus sampling)", value=TOP_P, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
245
- gr.Slider(label="Repetition penalty", value=REPETITION_PENALTY, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens"),
246
  ]
247
-
248
- examples = [
249
- ["Create a simple web application using Flask", "WEB_DEV"],
250
- ["Generate a Python script to perform a linear regression analysis", "PYTHON_CODE_DEV"],
251
- ["Create a Dockerfile for a Node.js application", "AI_SYSTEM_PROMPT"],
252
- # Add more examples as needed
253
  ]
254
 
255
- gr.ChatInterface(
256
- fn=chat_interface,
257
- chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
258
- additional_inputs=additional_inputs,
259
- title="DevToolKit AI Assistant",
260
- examples=examples,
261
- concurrency_limit=20,
262
- ).launch(show_api=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import subprocess
3
+ from huggingface_hub import InferenceClient
4
+ import gradio as gr
5
+ import random
6
  import time
7
+ from typing import List, Dict
8
 
9
+ # Simulated agent and tool libraries
10
+ AGENT_TYPES = [
11
+ "Task Executor",
12
+ "Information Retriever",
13
+ "Decision Maker",
14
+ "Data Analyzer",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  ]
16
+ TOOL_TYPES = [
17
+ "Web Scraper",
18
+ "Database Connector",
19
+ "API Caller",
20
+ "File Handler",
21
+ "Text Processor",
22
  ]
23
 
24
+ # Initialize Hugging Face client
25
+ client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
26
+
27
+ VERBOSE = False
28
+ MAX_HISTORY = 100
29
+ MODEL = "mistralai/Mixtral-8x7B-Instruct-v0.1"
30
+
31
+ # Import necessary prompts and functions from the existing code
32
+ from .prompts import (
33
+ ACTION_PROMPT,
34
+ ADD_PROMPT,
35
+ COMPRESS_HISTORY_PROMPT,
36
+ LOG_PROMPT,
37
+ LOG_RESPONSE,
38
+ MODIFY_PROMPT,
39
+ PREFIX,
40
+ READ_PROMPT,
41
+ TASK_PROMPT,
42
+ UNDERSTAND_TEST_RESULTS_PROMPT,
43
+ )
44
+ from .utils import parse_action, parse_file_content, read_python_module_structure
45
+ from flask import Flask, request, jsonify
46
+
47
+
48
+ class Agent:
49
+ def __init__(self, name: str, agent_type: str, complexity: int):
50
+ self.name = name
51
+ self.type = agent_type
52
+ self.complexity = complexity
53
+ self.tools = []
54
+
55
+ def add_tool(self, tool):
56
+ self.tools.append(tool)
57
+
58
+ def __str__(self):
59
+ return f"{self.name} ({self.type}) - Complexity: {self.complexity}"
60
+
61
+
62
+ class Tool:
63
+ def __init__(self, name: str, tool_type: str):
64
+ self.name = name
65
+ self.type = tool_type
66
+
67
+ def __str__(self):
68
+ return f"{self.name} ({self.type})"
69
+
70
+
71
+ class Pypelyne:
72
+ def __init__(self):
73
+ self.agents: List[Agent] = []
74
+ self.tools: List[Tool] = []
75
+ self.history = ""
76
+ self.task = None
77
+ self.purpose = None
78
+ self.directory = None
79
+
80
+ def add_agent(self, agent: Agent):
81
+ self.agents.append(agent)
82
+
83
+ def add_tool(self, tool: Tool):
84
+ self.tools.append(tool)
85
+
86
+ def generate_chat_app(self):
87
+ time.sleep(2) # Simulate processing time
88
+ return f"Chat app generated with {len(self.agents)} agents and {len(self.tools)} tools."
89
+
90
+ def run_gpt(self, prompt_template, stop_tokens, max_tokens, **prompt_kwargs):
91
+ content = PREFIX.format(
92
+ module_summary=read_python_module_structure(self.directory)[0],
93
+ purpose=self.purpose,
94
+ ) + prompt_template.format(**prompt_kwargs)
95
+
96
+ if VERBOSE:
97
+ print(LOG_PROMPT.format(content))
98
+
99
+ stream = client.text_generation(
100
+ prompt=content,
101
+ max_new_tokens=max_tokens,
102
+ stop_sequences=stop_tokens if stop_tokens else None,
103
+ do_sample=True,
104
+ temperature=0.7,
105
+ )
106
+
107
+ resp = "".join(token for token in stream)
108
+
109
+ if VERBOSE:
110
+ print(LOG_RESPONSE.format(resp))
111
+ return resp
112
+
113
+ def compress_history(self):
114
+ resp = self.run_gpt(
115
+ COMPRESS_HISTORY_PROMPT,
116
+ stop_tokens=["observation:", "task:", "action:", "thought:"],
117
+ max_tokens=512,
118
+ task=self.task,
119
+ history=self.history,
120
+ )
121
+ self.history = f"observation: {resp}\n"
122
+
123
+ def run_action(self, action_name, action_input):
124
+ if action_name == "COMPLETE":
125
+ return "Task completed."
126
+
127
+ if len(self.history.split("\n")) > MAX_HISTORY:
128
+ if VERBOSE:
129
+ print("COMPRESSING HISTORY")
130
+ self.compress_history()
131
+
132
+ action_funcs = {
133
+ "MAIN": self.call_main,
134
+ "UPDATE-TASK": self.call_set_task,
135
+ "MODIFY-FILE": self.call_modify,
136
+ "READ-FILE": self.call_read,
137
+ "ADD-FILE": self.call_add,
138
+ "TEST": self.call_test,
139
+ }
140
+
141
+ if action_name not in action_funcs:
142
+ return f"Unknown action: {action_name}"
143
+
144
+ print(f"RUN: {action_name} {action_input}")
145
+ return action_funcs[action_name](action_input)
146
+
147
+ def call_main(self, action_input):
148
+ resp = self.run_gpt(
149
+ ACTION_PROMPT,
150
+ stop_tokens=["observation:", "task:"],
151
+ max_tokens=256,
152
+ task=self.task,
153
+ history=self.history,
154
+ )
155
+ lines = resp.strip().strip("\n").split("\n")
156
+ for line in lines:
157
+ if line == "":
158
+ continue
159
+ if line.startswith("thought: "):
160
+ self.history += f"{line}\n"
161
+ elif line.startswith("action: "):
162
+ action_name, action_input = parse_action(line)
163
+ self.history += f"{line}\n"
164
+ return self.run_action(action_name, action_input)
165
+ return "No valid action found."
166
+
167
+ def call_set_task(self, action_input):
168
+ self.task = self.run_gpt(
169
+ TASK_PROMPT,
170
+ stop_tokens=[],
171
+ max_tokens=64,
172
+ task=self.task,
173
+ history=self.history,
174
+ ).strip("\n")
175
+ self.history += f"observation: task has been updated to: {self.task}\n"
176
+ return f"Task updated: {self.task}"
177
+
178
+ def call_modify(self, action_input):
179
+ if not os.path.exists(action_input):
180
+ self.history += "observation: file does not exist\n"
181
+ return "File does not exist."
182
+
183
+ content = read_python_module_structure(self.directory)[1]
184
+ f_content = (
185
+ content[action_input] if content[action_input] else "< document is empty >"
186
+ )
187
+
188
+ resp = self.run_gpt(
189
+ MODIFY_PROMPT,
190
+ stop_tokens=["action:", "thought:", "observation:"],
191
+ max_tokens=2048,
192
+ task=self.task,
193
+ history=self.history,
194
+ file_path=action_input,
195
+ file_contents=f_content,
196
+ )
197
+ new_contents, description = parse_file_content(resp)
198
+ if new_contents is None:
199
+ self.history += "observation: failed to modify file\n"
200
+ return "Failed to modify file."
201
+
202
+ with open(action_input, "w") as f:
203
+ f.write(new_contents)
204
+
205
+ self.history += f"observation: file successfully modified\n"
206
+ self.history += f"observation: {description}\n"
207
+ return f"File modified: {action_input}"
208
+
209
+ def call_read(self, action_input):
210
+ if not os.path.exists(action_input):
211
+ self.history += "observation: file does not exist\n"
212
+ return "File does not exist."
213
+
214
+ content = read_python_module_structure(self.directory)[1]
215
+ f_content = (
216
+ content[action_input] if content[action_input] else "< document is empty >"
217
+ )
218
+
219
+ resp = self.run_gpt(
220
+ READ_PROMPT,
221
+ stop_tokens=[],
222
+ max_tokens=256,
223
+ task=self.task,
224
+ history=self.history,
225
+ file_path=action_input,
226
+ file_contents=f_content,
227
+ ).strip("\n")
228
+ self.history += f"observation: {resp}\n"
229
+ return f"File read: {action_input}"
230
+
231
+ def call_add(self, action_input):
232
+ d = os.path.dirname(action_input)
233
+ if not d.startswith(self.directory):
234
+ self.history += (
235
+ f"observation: files must be under directory {self.directory}\n"
236
+ )
237
+ return f"Invalid directory: {d}"
238
+ elif not action_input.endswith(".py"):
239
+ self.history += "observation: can only write .py files\n"
240
+ return "Only .py files are allowed."
241
+ else:
242
+ if d and not os.path.exists(d):
243
+ os.makedirs(d)
244
+ if not os.path.exists(action_input):
245
+ resp = self.run_gpt(
246
+ ADD_PROMPT,
247
+ stop_tokens=["action:", "thought:", "observation:"],
248
+ max_tokens=2048,
249
+ task=self.task,
250
+ history=self.history,
251
+ file_path=action_input,
252
+ )
253
+ new_contents, description = parse_file_content(resp)
254
+ if new_contents is None:
255
+ self.history += "observation: failed to write file\n"
256
+ return "Failed to write file."
257
+
258
+ with open(action_input, "w") as f:
259
+ f.write(new_contents)
260
+
261
+ self.history += "observation: file successfully written\n"
262
+ self.history += f"observation: {description}\n"
263
+ return f"File added: {action_input}"
264
+ else:
265
+ self.history += "observation: file already exists\n"
266
+ return "File already exists."
267
+
268
+ def call_test(self, action_input):
269
+ result = subprocess.run(
270
+ ["python", "-m", "pytest", "--collect-only", self.directory],
271
+ capture_output=True,
272
+ text=True,
273
+ )
274
+ if result.returncode != 0:
275
+ self.history += f"observation: there are no tests! Test should be written in a test folder under {self.directory}\n"
276
+ return "No tests found."
277
+ result = subprocess.run(
278
+ ["python", "-m", "pytest", self.directory], capture_output=True, text=True
279
+ )
280
+ if result.returncode == 0:
281
+ self.history += "observation: tests pass\n"
282
+ return "All tests passed."
283
+
284
+ resp = self.run_gpt(
285
+ UNDERSTAND_TEST_RESULTS_PROMPT,
286
+ stop_tokens=[],
287
+ max_tokens=256,
288
+ task=self.task,
289
+ history=self.history,
290
+ stdout=result.stdout[:5000],
291
+ stderr=result.stderr[:5000],
292
+ )
293
+ self.history += f"observation: tests failed: {resp}\n"
294
+ return f"Tests failed: {resp}"
295
+
296
+
297
+ pypelyne = Pypelyne()
298
+
299
+
300
+ def create_agent(name: str, agent_type: str, complexity: int) -> str:
301
+ agent = Agent(name, agent_type, complexity)
302
+ pypelyne.add_agent(agent)
303
+ return f"Agent created: {agent}"
304
+
305
+
306
+ def create_tool(name: str, tool_type: str) -> str:
307
+ tool = Tool(name, tool_type)
308
+ pypelyne.add_tool(tool)
309
+ return f"Tool created: {tool}"
310
+
311
+
312
+ def assign_tool(agent_name: str, tool_name: str) -> str:
313
+ agent = next((a for a in pypelyne.agents if a.name == agent_name), None)
314
+ tool = next((t for t in pypelyne.tools if t.name == tool_name), None)
315
+
316
+ if agent and tool:
317
+ agent.add_tool(tool)
318
+ return f"Tool '{tool.name}' assigned to agent '{agent.name}'"
319
+ else:
320
+ return "Agent or tool not found."
321
+
322
+
323
+ def generate_chat_app() -> str:
324
+ return pypelyne.generate_chat_app()
325
+
326
+
327
+ def list_agents() -> str:
328
+ return (
329
+ "\n".join(str(agent) for agent in pypelyne.agents) or "No agents created yet."
330
+ )
331
+
332
+
333
+ def list_tools() -> str:
334
+ return "\n".join(str(tool) for tool in pypelyne.tools) or "No tools created yet."
335
+
336
+
337
+ def chat_with_pypelyne(message: str) -> str:
338
+ return pypelyne.run_action("MAIN", message)
339
+
340
+
341
+ def set_purpose_and_directory(purpose: str, directory: str) -> str:
342
+ pypelyne.purpose = purpose
343
+ pypelyne.directory = directory
344
+ return f"Purpose set to: {purpose}\nWorking directory set to: {directory}"
345
+
346
+
347
+ with gr.Blocks() as app:
348
+ gr.Markdown("# Welcome to Pypelyne")
349
+ gr.Markdown("Create your custom pipeline with agents and tools, then chat with it!")
350
+
351
+ with gr.Tab("Setup"):
352
+ purpose_input = gr.Textbox(label="Set Purpose")
353
+ directory_input = gr.Textbox(label="Set Working Directory")
354
+ setup_btn = gr.Button("Set Purpose and Directory")
355
+ setup_output = gr.Textbox(label="Setup Output")
356
+ setup_btn.click(
357
+ set_purpose_and_directory,
358
+ inputs=[purpose_input, directory_input],
359
+ outputs=setup_output,
360
+ )
361
+
362
+ with gr.Tab("Create Agents"):
363
+ agent_name = gr.Textbox(label="Agent Name")
364
+ agent_type = gr.Dropdown(choices=AGENT_TYPES, label="Agent Type")
365
+ agent_complexity = gr.Slider(
366
+ minimum=1, maximum=10, step=1, label="Agent Complexity"
367
+ )
368
+ create_agent_btn = gr.Button("Create Agent")
369
+ agent_output = gr.Textbox(label="Output")
370
+ create_agent_btn.click(
371
+ create_agent,
372
+ inputs=[agent_name, agent_type, agent_complexity],
373
+ outputs=agent_output,
374
+ )
375
+
376
+ with gr.Tab("Create Tools"):
377
+ tool_name = gr.Textbox(label="Tool Name")
378
+ tool_type = gr.Dropdown(choices=TOOL_TYPES, label="Tool Type")
379
+ create_tool_btn = gr.Button("Create Tool")
380
+ tool_output = gr.Textbox(label="Output")
381
+ create_tool_btn.click(
382
+ create_tool, inputs=[tool_name, tool_type], outputs=tool_output
383
+ )
384
+
385
+ with gr.Tab("Assign Tools"):
386
+ agent_select = gr.Dropdown(choices=[], label="Select Agent")
387
+ tool_select = gr.Dropdown(choices=[], label="Select Tool")
388
+ assign_tool_btn = gr.Button("Assign Tool")
389
+ assign_output = gr.Textbox(label="Output")
390
+ assign_tool_btn.click(
391
+ assign_tool, inputs=[agent_select, tool_select], outputs=assign_output
392
+ )
393
+
394
+ with gr.Tab("Generate Chat App"):
395
+ generate_btn = gr.Button("Generate Chat App")
396
+ generate_output = gr.Textbox(label="Output")
397
+ generate_btn.click(generate_chat_app, outputs=generate_output)
398
+
399
+ with gr.Tab("Chat with Pypelyne"):
400
+ chat_input = gr.Textbox(label="Your Message")
401
+ chat_output = gr.Textbox(label="Pypelyne's Response")
402
+ chat_btn = gr.Button("Send")
403
+ chat_btn.click(chat_with_pypelyne, inputs=chat_input, outputs=chat_output)
404
+
405
+ with gr.Tab("View Pypelyne"):
406
+ view_agents_btn = gr.Button("View Agents")
407
+ view_tools_btn = gr.Button("View Tools")
408
+ view_output = gr.Textbox(label="Pypelyne Components")
409
+ view_agents_btn.click(list_agents, outputs=view_output)
410
+ view_tools_btn.click(list_tools, outputs=view_output)
411
+
412
+ def update_dropdowns():
413
+ return gr.Dropdown.update(
414
+ choices=[agent.name for agent in pypelyne.agents]
415
+ ), gr.Dropdown.update(choices=[tool.name for tool in pypelyne.tools])
416
+
417
+ create_agent_btn.click(update_dropdowns, outputs=[agent_select, tool_select])
418
+ create_tool_btn.click(update_dropdowns, outputs=[agent_select, tool_select])
419
+
420
+ if __name__ == "__main__":
421
+ app.launch()
422
+ app = Flask(__name__)
423
+
424
+ @app.route("/chat", methods=["POST"])
425
+ def chat():
426
+ message = request.json["message"]
427
+ response = chat_with_pypelyne(message)
428
+ return jsonify({"response": response})
429
+
430
+ @app.route("/agents", methods=["GET"])
431
+ def get_agents():
432
+ agents = list_agents()
433
+ return jsonify({"agents": agents})
434
+
435
+ @app.route("/tools", methods=["GET"])
436
+ def get_tools():
437
+ tools = list_tools()
438
+ return jsonify({"tools": tools})
439
+
440
+ if __name__ == "__main__":
441
+ app.run()