Spaces:
Running
Running
Update app2.py
Browse files
app2.py
CHANGED
@@ -1,41 +1,15 @@
|
|
1 |
-
import asyncio
|
2 |
-
import gradio as gr
|
3 |
-
from sqlalchemy.exc import SQLAlchemyError
|
4 |
-
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
|
5 |
-
from sqlalchemy.future import select # Correct async query API
|
6 |
-
from sqlalchemy.orm import sessionmaker
|
7 |
-
import logging
|
8 |
-
import os
|
9 |
-
import sys
|
10 |
-
import subprocess
|
11 |
-
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
|
12 |
-
import openai
|
13 |
import streamlit as st
|
14 |
-
from
|
15 |
-
from rich import print as rprint
|
16 |
-
from rich.panel import Panel
|
17 |
-
from rich.progress import track
|
18 |
-
from rich.table import Table
|
19 |
-
import git
|
20 |
-
from langchain.llms import HuggingFaceHub
|
21 |
from langchain.chains import ConversationChain
|
22 |
from langchain.memory import ConversationBufferMemory
|
|
|
|
|
23 |
|
24 |
-
#
|
25 |
-
|
26 |
-
MAX_NEW_TOKENS = 2048
|
27 |
-
TEMPERATURE = 0.7
|
28 |
-
TOP_P = 0.95
|
29 |
-
REPETITION_PENALTY = 1.2
|
30 |
-
|
31 |
-
# Load Model and Tokenizer
|
32 |
-
@st.cache_resource
|
33 |
-
def load_model_and_tokenizer():
|
34 |
-
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME, device_map="auto")
|
35 |
-
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
36 |
-
return model, tokenizer
|
37 |
|
38 |
-
|
|
|
39 |
|
40 |
# Agents
|
41 |
agents = {
|
@@ -82,14 +56,20 @@ def add_code_to_workspace(project_name: str, code: str, file_name: str):
|
|
82 |
return f"Project {project_name} does not exist"
|
83 |
|
84 |
def terminal_interface(command: str, project_name: str):
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
91 |
def get_agent_response(message: str, system_prompt: str):
|
92 |
-
llm =
|
93 |
memory = ConversationBufferMemory()
|
94 |
conversation = ConversationChain(llm=llm, memory=memory)
|
95 |
response = conversation.run(system_prompt + "\n" + message)
|
@@ -114,7 +94,7 @@ def display_chat_history():
|
|
114 |
for message in st.session_state.chat_history:
|
115 |
st.text(message)
|
116 |
|
117 |
-
def run_autonomous_build(selected_agents:
|
118 |
st.info("Starting autonomous build process...")
|
119 |
for agent in selected_agents:
|
120 |
st.write(f"Agent {agent} is working on the project...")
|
@@ -123,7 +103,7 @@ def run_autonomous_build(selected_agents: List[str], project_name: str):
|
|
123 |
st.write(f"Agent {agent} has completed its task.")
|
124 |
st.success("Autonomous build process completed!")
|
125 |
|
126 |
-
def collaborative_agent_example(selected_agents:
|
127 |
st.info(f"Starting collaborative task: {task}")
|
128 |
responses = {}
|
129 |
for agent in selected_agents:
|
@@ -135,7 +115,7 @@ def collaborative_agent_example(selected_agents: List[str], project_name: str, t
|
|
135 |
st.success("Collaborative task completed!")
|
136 |
st.write(combined_response)
|
137 |
|
138 |
-
def combine_and_process_responses(responses:
|
139 |
combined = "\n\n".join([f"{agent}: {response}" for agent, response in responses.items()])
|
140 |
return f"Combined response for task '{task}':\n\n{combined}"
|
141 |
|
@@ -163,7 +143,7 @@ if st.session_state.workspace_projects:
|
|
163 |
selected_file = st.selectbox("Select file to edit", files) if files else None
|
164 |
if selected_file:
|
165 |
file_content = next((file['code'] for file in st.session_state.workspace_projects[selected_project]['files'] if file['file_name'] == selected_file), "")
|
166 |
-
edited_code =
|
167 |
if st.button("Save Changes"):
|
168 |
for file in st.session_state.workspace_projects[selected_project]['files']:
|
169 |
if file['file_name'] == selected_file:
|
@@ -173,7 +153,7 @@ if st.session_state.workspace_projects:
|
|
173 |
else:
|
174 |
st.info("No files in the project. Use the chat interface to generate code.")
|
175 |
else:
|
176 |
-
st.info("No projects created yet. Create a project to start coding."
|
177 |
|
178 |
# Terminal Interface
|
179 |
st.subheader("Terminal (Workspace Context)")
|
@@ -246,4 +226,4 @@ if st.button("Start Collaborative Task"):
|
|
246 |
if collab_agents and collab_project and collab_task:
|
247 |
collaborative_agent_example(collab_agents, collab_project, collab_task)
|
248 |
else:
|
249 |
-
st.warning("Please select agents, enter a project name, and a task.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from langchain.llms import GoogleSearchAPIWrapper
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from langchain.chains import ConversationChain
|
4 |
from langchain.memory import ConversationBufferMemory
|
5 |
+
import subprocess
|
6 |
+
import git
|
7 |
|
8 |
+
# Replace with your actual Google Search API key
|
9 |
+
GOOGLE_API_KEY = "YOUR_API_KEY"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
+
# Initialize Google Search API Wrapper
|
12 |
+
search = GoogleSearchAPIWrapper(google_api_key=GOOGLE_API_KEY)
|
13 |
|
14 |
# Agents
|
15 |
agents = {
|
|
|
56 |
return f"Project {project_name} does not exist"
|
57 |
|
58 |
def terminal_interface(command: str, project_name: str):
|
59 |
+
try:
|
60 |
+
if project_name in st.session_state.workspace_projects:
|
61 |
+
result = subprocess.run(command, cwd=project_name, shell=True, capture_output=True, text=True)
|
62 |
+
return result.stdout + result.stderr
|
63 |
+
else:
|
64 |
+
return f"Project {project_name} does not exist"
|
65 |
+
except FileNotFoundError:
|
66 |
+
return f"Error: Command not found. Please check your command."
|
67 |
+
except Exception as e:
|
68 |
+
logging.error(f"An error occurred: {e}")
|
69 |
+
return f"An unexpected error occurred while running the command."
|
70 |
|
71 |
def get_agent_response(message: str, system_prompt: str):
|
72 |
+
llm = GoogleSearchAPIWrapper(google_api_key=GOOGLE_API_KEY)
|
73 |
memory = ConversationBufferMemory()
|
74 |
conversation = ConversationChain(llm=llm, memory=memory)
|
75 |
response = conversation.run(system_prompt + "\n" + message)
|
|
|
94 |
for message in st.session_state.chat_history:
|
95 |
st.text(message)
|
96 |
|
97 |
+
def run_autonomous_build(selected_agents: list[str], project_name: str):
|
98 |
st.info("Starting autonomous build process...")
|
99 |
for agent in selected_agents:
|
100 |
st.write(f"Agent {agent} is working on the project...")
|
|
|
103 |
st.write(f"Agent {agent} has completed its task.")
|
104 |
st.success("Autonomous build process completed!")
|
105 |
|
106 |
+
def collaborative_agent_example(selected_agents: list[str], project_name: str, task: str):
|
107 |
st.info(f"Starting collaborative task: {task}")
|
108 |
responses = {}
|
109 |
for agent in selected_agents:
|
|
|
115 |
st.success("Collaborative task completed!")
|
116 |
st.write(combined_response)
|
117 |
|
118 |
+
def combine_and_process_responses(responses: dict[str, str], task: str) -> str:
|
119 |
combined = "\n\n".join([f"{agent}: {response}" for agent, response in responses.items()])
|
120 |
return f"Combined response for task '{task}':\n\n{combined}"
|
121 |
|
|
|
143 |
selected_file = st.selectbox("Select file to edit", files) if files else None
|
144 |
if selected_file:
|
145 |
file_content = next((file['code'] for file in st.session_state.workspace_projects[selected_project]['files'] if file['file_name'] == selected_file), "")
|
146 |
+
edited_code = st.text_area("Edit code", value=file_content, height=300) # Using st.text_area for now
|
147 |
if st.button("Save Changes"):
|
148 |
for file in st.session_state.workspace_projects[selected_project]['files']:
|
149 |
if file['file_name'] == selected_file:
|
|
|
153 |
else:
|
154 |
st.info("No files in the project. Use the chat interface to generate code.")
|
155 |
else:
|
156 |
+
st.info( "No projects created yet. Create a project to start coding."
|
157 |
|
158 |
# Terminal Interface
|
159 |
st.subheader("Terminal (Workspace Context)")
|
|
|
226 |
if collab_agents and collab_project and collab_task:
|
227 |
collaborative_agent_example(collab_agents, collab_project, collab_task)
|
228 |
else:
|
229 |
+
st.warning("Please select agents, enter a project name, and a task.")
|