jaeson commited on
Commit
1b6fe24
·
1 Parent(s): ab8e90d

addeed chat

Browse files
Files changed (4) hide show
  1. .gitignore +5 -0
  2. app.py +49 -29
  3. smol.py +74 -0
  4. tool.py +248 -0
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ .env
2
+ .env.local
3
+ .env.development
4
+ /__pycache__/
5
+
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  import json
3
  import os
 
4
 
5
  GLOBAL_STATE_FILE = "global_state.json"
6
  if os.path.exists(GLOBAL_STATE_FILE):
@@ -9,25 +10,36 @@ if os.path.exists(GLOBAL_STATE_FILE):
9
  else:
10
  global_game_state = {} # initialize an empty state
11
 
12
- # TODO: Function to handle chat updates.
13
- def process_chat(message, local_state):
14
- # Initialize local state if None
15
- local_state = local_state or {}
16
- # Update local state with the latest message
17
- local_state["last_message"] = message
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- # Check for a command to update the global game state
20
- if message.strip().lower() == "update global":
21
- # Update the global state
22
- global global_game_state
23
- global_game_state = local_state.copy()
24
- # Optionally persist the global state to a file
25
- with open(GLOBAL_STATE_FILE, "w") as f:
26
- json.dump(global_game_state, f)
27
- response = "Global game updated!"
28
- else:
29
- response = f"Local game updated with: {message}"
30
- return "", local_state, response
31
 
32
  with gr.Blocks() as demo:
33
  gr.Markdown("## Live Game & Chat Interface")
@@ -45,17 +57,25 @@ with gr.Blocks() as demo:
45
  # Right Column: Chat interface (smaller column)
46
  with gr.Column(scale=1):
47
  gr.Markdown("### Chat")
48
- chat_output = gr.Textbox(label="Chat Output", interactive=False)
49
- chat_input = gr.Textbox(
50
- placeholder="Type your message here...",
51
- label="Your Message"
52
- )
53
- local_state = gr.State({})
 
 
 
 
 
 
 
 
54
 
55
- chat_input.submit(
56
- process_chat,
57
- inputs=[chat_input, local_state],
58
- outputs=[chat_input, local_state, chat_output],
59
- )
60
 
61
  demo.launch()
 
1
  import gradio as gr
2
  import json
3
  import os
4
+ from smol import ChatAgent
5
 
6
  GLOBAL_STATE_FILE = "global_state.json"
7
  if os.path.exists(GLOBAL_STATE_FILE):
 
10
  else:
11
  global_game_state = {} # initialize an empty state
12
 
13
+ def process_chat(message, chat_history):
14
+ if chat_history is None:
15
+ chat_history = []
16
+
17
+ chat_history.append(("User", message))
18
+
19
+ response = ChatAgent(message)
20
+
21
+ chat_history.append(("AI", response))
22
+
23
+ return "", chat_history
24
+
25
+ def chat_function(user_prompt, history):
26
+ # Initialize history if empty
27
+ if history is None:
28
+ history = []
29
+ # Append the user's message as a dictionary
30
+ history.append({"role": "user", "content": user_prompt})
31
 
32
+ # Process the prompt using your ChatAgent to get the AI response
33
+ ai_response = ChatAgent(user_prompt)
34
+ print(f"AI RESPONSE: {ai_response}")
35
+ print("HISTORY: ", history)
36
+
37
+ # Append the AI's response as a dictionary
38
+ history.append({"role": "assistant", "content": ai_response})
39
+
40
+ # Clear the input and return the updated history
41
+ return "", history
42
+
 
43
 
44
  with gr.Blocks() as demo:
45
  gr.Markdown("## Live Game & Chat Interface")
 
57
  # Right Column: Chat interface (smaller column)
58
  with gr.Column(scale=1):
59
  gr.Markdown("### Chat")
60
+ chatbot = gr.Chatbot(type="messages",label="Conversation")
61
+ # Textbox to receive user prompt
62
+ txt_input = gr.Textbox(placeholder="Type your prompt here...", label="Your Message")
63
+ # State to hold the conversation history
64
+ state = gr.State([])
65
+
66
+ # When the user submits a message, update the chat
67
+ txt_input.submit(chat_function, inputs=[txt_input, state], outputs=[txt_input, chatbot])
68
+ # chat_output = gr.Chatbot(label="Chat Output", type="messages", interactive=False)
69
+ # chat_input = gr.Textbox(
70
+ # placeholder="Type your message here...",
71
+ # label="Your Message"
72
+ # )
73
+ # chat_history = gr.State([])
74
 
75
+ # chat_input.submit(
76
+ # process_chat,
77
+ # inputs=[chat_input, chat_history],
78
+ # outputs=[chat_input, chat_output],
79
+ # )
80
 
81
  demo.launch()
smol.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+
4
+ from smolagents import CodeAgent, DuckDuckGoSearchTool, Tool, HfApiModel
5
+ from dotenv import load_dotenv, dotenv_values
6
+ from tool import FindFilesTool, GitPushTool, FileReplaceTool, ProcessFlowIdentifierTool, GetImageDimensionsTool, FileModifyTool
7
+
8
+ load_dotenv()
9
+
10
+ HF_TOKEN = os.getenv("HF_TOKEN")
11
+
12
+ image_generation_tool = Tool.from_space(
13
+ "black-forest-labs/FLUX.1-schnell",
14
+ name="image_generator",
15
+ description="Generate an image from a prompt"
16
+ )
17
+
18
+ model = HfApiModel("Qwen/Qwen2.5-Coder-32B-Instruct", token=HF_TOKEN)
19
+ """
20
+ Todo:
21
+ - prompt cleaning
22
+ - ensure github upload pathways
23
+ ==============
24
+ step 1: receive prompt
25
+ (deferred for now) step 2: analyze prompt for specific task (asset change, script change, etc)
26
+ step 3: crawl files to search for specific file that matches task and save file location
27
+ step 4: run appropriate tool to accomplish task
28
+ step 5: upload changes to github
29
+ """
30
+
31
+ find_files_tool = FindFilesTool()
32
+ file_replace_tool = FileReplaceTool()
33
+ process_identifier_tool= ProcessFlowIdentifierTool()
34
+ get_image_dimensions_tool= GetImageDimensionsTool()
35
+ file_modify_tool = FileModifyTool()
36
+
37
+ def ChatAgent(userPrompt):
38
+ #Identify purpose of prompt
39
+ promptCleanerAgent = CodeAgent(tools=[], model=model)
40
+ instructions = promptCleanerAgent.run(f"""
41
+ determine the purpose of the following string "{userPrompt}" if it is one of the following: [asset_change, script_update, conversation].
42
+ asset_change: The user wants to change an asset in the game.
43
+ script_update: The user wants to update the game script.
44
+ conversation: The user wants to have a conversation with the AI by asking general questions or greetings.
45
+ """)
46
+
47
+ appDescription = """
48
+ This is a 2d platformer game where the player controls a ball that bounces off platforms falling down. This app
49
+ uses typescript and sandpack. The folder components/sandpack-examples.tsx file contains the game logic and scripts.
50
+ """
51
+ if(instructions == "conversation"):
52
+ contextPrompt = f"""
53
+ User prompt '{userPrompt}'
54
+ 1) Reply to the user as a friendly ai agent.
55
+ 2) Do not use any tools to modify files.
56
+ 3) End process after replying to the user.
57
+ """
58
+ else:
59
+ contextPrompt = f'using process_identifier_tool look for the appropriate instructions for "{instructions}" and apply it to the user prompt after this'
60
+
61
+ agent = CodeAgent(tools=[find_files_tool, process_identifier_tool, image_generation_tool, file_modify_tool, get_image_dimensions_tool, file_replace_tool], model=model)
62
+ if instructions == "conversation":
63
+ response = agent.run(f"{contextPrompt}")
64
+ else:
65
+ response = agent.run(f"{appDescription} {contextPrompt} {userPrompt} ")
66
+ # agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=HfApiModel())
67
+ # Step 1: Prompt reception
68
+ print(f"Response made: {response}")
69
+ return response
70
+ # # Run the agent to generate an image based on a prompt
71
+ # # Check if the image path exists
72
+ # update_git_tool = GitPushTool()
73
+ # agent = CodeAgent(tools=[update_git_tool], model=model)
74
+ # agent.run("commit to new branch and push to repo", additional_args={'branch_name': 'image-replace-tool-5'})
tool.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from smolagents import CodeAgent, DuckDuckGoSearchTool, Tool, HfApiModel
2
+ import os
3
+ from dotenv import load_dotenv
4
+ class FileReaderTool(Tool):
5
+ name = "file_reader_tool"
6
+ description = """
7
+ This tool will be used by the LLM Agent to read files to help analyze files for its task.
8
+ """
9
+ inputs = {
10
+ "file_location": {
11
+ "type": "string",
12
+ "description": "The location of the file that will be read/analyzed"
13
+ }
14
+ }
15
+ output_type = "string"
16
+
17
+ def forward(self,file_location ) -> str:
18
+ with open(file_location, "r") as file:
19
+ return file.read()
20
+
21
+ class FileWriteTool(Tool):
22
+ name = "file_write_tool"
23
+ description = """
24
+ This tool will be used by the LLM Agent to overwrite files if needed for task.
25
+ """
26
+ inputs = {
27
+ "file_location": {
28
+ "type": "string",
29
+ "description": "The location of the file that will be read/analyzed"
30
+ },
31
+ "new_code": {
32
+ "type": "string",
33
+ "description": "This is the code that will overwrite the contents of a file. If file does not exist, it is the new content."
34
+ }
35
+ }
36
+ output_type = "string"
37
+
38
+ def forward(self,file_location, new_code) -> str:
39
+ with open(file_location, "w") as file:
40
+ return file.write(new_code)
41
+
42
+ class FileModifyTool(Tool):
43
+ name = "file_modify_tool"
44
+ description = """
45
+ This tool will be used by the LLM Agent to modify files if needed for task.
46
+ """
47
+ inputs = {
48
+ "file_location": {
49
+ "type": "string",
50
+ "description": "The location of the file that will be read/analyzed"
51
+ },
52
+ "prompt": {
53
+ "type": "string",
54
+ "description": "This is the prompt that the LLM will use to decide how to modify the code."
55
+ }
56
+ }
57
+ output_type = "string"
58
+
59
+ def forward(self,file_location, prompt) -> str:
60
+ load_dotenv()
61
+
62
+ HF_TOKEN = os.getenv("HF_TOKEN")
63
+
64
+ file_reader_tool = FileReaderTool()
65
+ file_write_tool = FileWriteTool()
66
+
67
+ model = HfApiModel("Qwen/Qwen2.5-Coder-32B-Instruct", token=HF_TOKEN)
68
+ coderAgent = CodeAgent(tools=[], model=model)
69
+
70
+ file_content = file_reader_tool.forward(file_location)
71
+
72
+ if not file_content:
73
+ return "Error: File could not be read."
74
+
75
+ modified_code = coderAgent.run(f'Based on the prompt "{prompt}" \n Modify the following code:\n{file_content}\n')
76
+ if "ERROR" in modified_code:
77
+ return "Modification failed, please refine your request."
78
+ write_result = file_write_tool.forward(file_location, modified_code)
79
+ return
80
+
81
+
82
+
83
+ class FileReplaceTool(Tool):
84
+ name = "file_replace_tool"
85
+ description ="""
86
+ This tool will be used to replace the file in a given location with the provided new file location. This is not used to update files.
87
+ """
88
+ inputs = {
89
+ "target_file_location": {
90
+ "type": "string",
91
+ "description": "the location of the file that will be replaced"
92
+ },
93
+ "new_file_location": {
94
+ "type": "string",
95
+ "description": "the location of the new file to replace target file location"
96
+ }
97
+ }
98
+ output_type = "string"
99
+ def forward(self, target_file_location, new_file_location) -> str:
100
+ import os
101
+ import shutil
102
+
103
+ if os.path.exists(new_file_location):
104
+ # Define the destination path for the saved image
105
+ # # Create the './generatedImages' directory if it doesn't exist
106
+ # # Copy the image from the temporary location to the desired directory
107
+ shutil.copy(new_file_location, target_file_location)
108
+
109
+ return print(f"Image saved to {target_file_location}")
110
+ else:
111
+ return print("Failed to generate an image or the file does not exist.")
112
+
113
+ class GetImageDimensionsTool(Tool):
114
+ name = "get_image_dimensions_tool"
115
+ description= """
116
+ This tool is used to get the width and height of a webp file.
117
+ """
118
+ inputs = {
119
+ "file_location": {
120
+ "type": "string",
121
+ "description": "The location in which the webp file can be located"
122
+ }
123
+ }
124
+ output_type = "object"
125
+ def forward(self, file_location) -> dict:
126
+ from PIL import Image
127
+
128
+ with Image.open(file_location) as img:
129
+ width, height = img.size
130
+
131
+ return {"width": width, "height": height}
132
+
133
+ class ProcessFlowIdentifierTool(Tool):
134
+ name = "process_flow_identifier_tool"
135
+ description = """
136
+ This tool will be used to give a set of instructions depending on the purpose of the prompt. This is to aid the LLM in its decision making process.
137
+ """
138
+ inputs = {
139
+ "prompt_objective": {
140
+ "type": "string",
141
+ "description": "This is the objective of the user's original prompt to help identify the steps needed for the llm to take."
142
+ }
143
+ }
144
+ output_type = "string"
145
+ def forward(self, prompt_objective) -> str:
146
+ match prompt_objective:
147
+ case "asset_change":
148
+ instructions = """
149
+ 1) use the find files tool to get a list of files containing tsx, and find the sandpack-examples.tsx file and copy its path.
150
+ 2) Use the file_modify_tool to analyze and update the file.
151
+ 3) End process after sucessfully modifying the file
152
+ """
153
+ return instructions
154
+ case "script_change":
155
+ instructions = """
156
+ 1) use the find files tool to get a list of files containing tsx, and find the sandpack-examples.tsx file and copy its path.
157
+ 2) Use the file_modify_tool to analyze and update the file.
158
+ 3) End process after sucessfully modifying the file
159
+ """
160
+ return instructions
161
+ case "conversation":
162
+ """
163
+ 1) Reply to the user with the last message they sent as a friendly ai agent.
164
+ 2) Do not use any tools to modify files.
165
+ 3) End process after replying to the user.
166
+ """
167
+ case _:
168
+ instructions = """
169
+ inform user that the instructions where unclear
170
+ """
171
+ return instructions
172
+
173
+
174
+ class GitPushTool(Tool):
175
+ name = "git_push_tool"
176
+ description = """
177
+ This tool will be triggered to create a new branch and push new changes to the repository.
178
+ """
179
+ inputs = {
180
+ "branch_name": {
181
+ "type": "string",
182
+ "description": "the target branch that will be pushed, new or existing."
183
+ }
184
+ }
185
+ output_type = "string"
186
+
187
+ def forward(self, branch_name) -> str:
188
+ import os
189
+ import subprocess
190
+ try:
191
+ gitUsername = os.getenv("GIT_USERNAME")
192
+ gitEmail = os.getenv("GIT_EMAIL")
193
+ # new_branch = "add-generated-image-2"
194
+ # Step 1: Ensure we are in a Git repository
195
+ subprocess.run(["git", "status"], check=True)
196
+
197
+ # Step 2: Create and switch to a new branch
198
+ subprocess.run(["git", "checkout", "-b", branch_name], check=True)
199
+ print(f"Checked out to new branch: {branch_name}")
200
+
201
+ # Step 3: Add the changes
202
+ subprocess.run(["git", "add", "*"], check=True)
203
+ print("Changes added to staging.")
204
+ # Step 4: Add credentials
205
+ subprocess.run(["git", "config", "--global", "user.email", gitEmail], check=True)
206
+ print("Updated git email.")
207
+ subprocess.run(["git", "config", "--global", "user.name", gitUsername], check=True)
208
+ print("Updated git user name.")
209
+
210
+ # Step 5: Commit the changes
211
+ commit_message = "Add generated image to repository"
212
+ subprocess.run(["git", "commit", "-m", commit_message], check=True)
213
+ print("Changes committed.")
214
+
215
+ #Step 6: Push the branch to the remote repository
216
+ subprocess.run(["git", "push", "--set-upstream", "origin", branch_name], check=True)
217
+ return print(f"Branch '{branch_name}' pushed to remote repository.")
218
+ except subprocess.CalledProcessError as e:
219
+ return print(f"An error occurred while performing Git operations: {e}")
220
+
221
+ class FindFilesTool(Tool):
222
+ name = "find_files"
223
+ description = "Find files with a given extension in a directory and its subdirectories"
224
+ inputs = {"extension":{"type":"string","description":"the place from which you start your ride"}}
225
+
226
+ output_type = "string"
227
+
228
+ def forward(self, extension: str) -> str:
229
+ """
230
+ Recursively search for files with a given extension in a directory and its subdirectories.
231
+
232
+ Args:
233
+ extension: The file extension to look for (e.g., '.txt')
234
+ """
235
+ import os
236
+
237
+ root_dir = "./"
238
+ found_files = []
239
+
240
+ # Walk through the directory tree
241
+ for dirpath, dirnames, filenames in os.walk(root_dir):
242
+ for filename in filenames:
243
+ if filename.endswith(extension):
244
+ filepath = os.path.join(dirpath, filename)
245
+ absolute_path = os.path.abspath(filepath)
246
+ found_files.append(absolute_path)
247
+
248
+ return found_files