acecalisto3 commited on
Commit
1939266
·
verified ·
1 Parent(s): 1e1ada9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +463 -42
app.py CHANGED
@@ -1,53 +1,474 @@
1
- import gradio as gr
2
- from transformers import pipeline
 
 
 
 
 
3
 
4
- def model_inference(model_name, task, input_data):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  try:
6
- # Load the model pipeline dynamically based on user selection
7
- model_pipeline = pipeline(task, model=model_name)
8
- # Perform the inference
9
- result = model_pipeline(input_data, max_length=100)
10
- # Handle different output formats
11
- if isinstance(result, list):
12
- return result[0]['generated_text'] if 'generated_text' in result[0] else str(result)
13
- return result
14
  except Exception as e:
15
- # Return error message to the user interface
16
- return f"An error occurred: {str(e)}"
17
-
18
- def setup_interface():
19
- # Define the available models and tasks
20
- models = {
21
- "Text Generation": ["gpt2", "EleutherAI/gpt-neo-2.7B"],
22
- "Text Classification": ["bert-base-uncased", "roberta-base"],
23
- "Token Classification": ["dbmdz/bert-large-cased-finetuned-conll03-english"]
24
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- tasks = {
27
- "Text Generation": "text-generation",
28
- "Text Classification": "text-classification",
29
- "Token Classification": "token-classification"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
- with gr.Blocks() as demo:
33
- gr.Markdown("### Hugging Face Model Playground")
34
- with gr.Row():
35
- selected_task = gr.Dropdown(label="Select Task", choices=list(models.keys()), value="Text Generation")
36
- model_name = gr.Dropdown(label="Select Model", choices=models[selected_task.value])
37
- input_data = gr.Textbox(label="Input", placeholder="Type here...")
38
- output = gr.Textbox(label="Output", placeholder="Results will appear here...")
 
 
 
 
39
 
40
- # Update the model dropdown based on task selection
41
- def update_models(task):
42
- return gr.Dropdown.update(choices=models[task])
43
 
44
- selected_task.change(fn=update_models, inputs=selected_task, outputs=model_name)
 
 
45
 
46
- # Run model inference when input data changes
47
- input_data.change(fn=model_inference, inputs=[model_name, selected_task, input_data], outputs=output)
 
48
 
49
- return demo
 
 
 
 
 
 
 
 
 
 
50
 
51
- if __name__ == "__main__":
52
- interface = setup_interface()
53
- interface.launch()
 
 
 
 
1
+ import os
2
+ import sys
3
+ import subprocess
4
+ import base64
5
+ import json
6
+ from io import StringIO
7
+ from typing import Dict, List
8
 
9
+ import streamlit as st
10
+ from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
11
+ from pylint import lint
12
+
13
+ # Add your Hugging Face API token here
14
+ hf_token = st.secrets["huggingface"]
15
+
16
+ # Global state to manage communication between Tool Box and Workspace Chat App
17
+ if "chat_history" not in st.session_state:
18
+ st.session_state.chat_history = []
19
+ if "terminal_history" not in st.session_state:
20
+ st.session_state.terminal_history = []
21
+ if "workspace_projects" not in st.session_state:
22
+ st.session_state.workspace_projects = {}
23
+
24
+ # Load pre-trained RAG retriever
25
+ rag_retriever = pipeline("retrieval-question-answering", model="facebook/rag-token-base")
26
+
27
+ # Load pre-trained chat model
28
+ chat_model = AutoModelForSeq2SeqLM.from_pretrained("microsoft/DialoGPT-medium")
29
+
30
+ # Load tokenizer
31
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
32
+
33
+ def process_input(user_input: str) -> str:
34
+ # Input pipeline: Tokenize and preprocess user input
35
+ input_ids = tokenizer(user_input, return_tensors="pt").input_ids
36
+ attention_mask = tokenizer(user_input, return_tensors="pt").attention_mask
37
+
38
+ # RAG model: Generate response
39
+ with torch.no_grad():
40
+ output = rag_retriever(input_ids, attention_mask=attention_mask)
41
+ response = output.generator_outputs[0].sequences[0]
42
+
43
+ # Chat model: Refine response
44
+ chat_input = tokenizer(response, return_tensors="pt")
45
+ chat_input["input_ids"] = chat_input["input_ids"].unsqueeze(0)
46
+ chat_input["attention_mask"] = chat_input["attention_mask"].unsqueeze(0)
47
+ with torch.no_grad():
48
+ chat_output = chat_model(**chat_input)
49
+ refined_response = chat_output.sequences[0]
50
+
51
+ # Output pipeline: Return final response
52
+ return refined_response
53
+
54
+ class AIAgent:
55
+ def __init__(self, name: str, description: str, skills: List[str], hf_api=None):
56
+ self.name = name
57
+ self.description = description
58
+ self.skills = skills
59
+ self._hf_api = hf_api
60
+ self._hf_token = hf_token
61
+
62
+ @property
63
+ def hf_api(self):
64
+ if not self._hf_api and self.has_valid_hf_token():
65
+ self._hf_api = HfApi(token=self._hf_token)
66
+ return self._hf_api
67
+
68
+ def has_valid_hf_token(self):
69
+ return bool(self._hf_token)
70
+
71
+ async def autonomous_build(self, chat_history: List[str], workspace_projects: Dict[str, str], project_name: str, selected_model: str):
72
+ # Continuation of previous methods
73
+ summary = "Chat History:\n" + "\n".join(chat_history)
74
+ summary += "\n\nWorkspace Projects:\n" + "\n".join(workspace_projects.items())
75
+
76
+ # Analyze chat history and workspace projects to suggest actions
77
+ # Example:
78
+ # - Check if the user has requested to create a new file
79
+ # - Check if the user has requested to install a package
80
+ # - Check if the user has requested to run a command
81
+ # - Check if the user has requested to generate code
82
+ # - Check if the user has requested to translate code
83
+ # - Check if the user has requested to summarize text
84
+ # - Check if the user has requested to analyze sentiment
85
+
86
+ # Generate a response based on the analysis
87
+ next_step = "Based on the current state, the next logical step is to implement the main application logic."
88
+
89
+ # Ensure project folder exists
90
+ project_path = os.path.join(PROJECT_ROOT, project_name)
91
+ if not os.path.exists(project_path):
92
+ os.makedirs(project_path)
93
+
94
+ # Create requirements.txt if it doesn't exist
95
+ requirements_file = os.path.join(project_path, "requirements.txt")
96
+ if not os.path.exists(requirements_file):
97
+ with open(requirements_file, "w") as f:
98
+ f.write("# Add your project's dependencies here\n")
99
+
100
+ # Create app.py if it doesn't exist
101
+ app_file = os.path.join(project_path, "app.py")
102
+ if not os.path.exists(app_file):
103
+ with open(app_file, "w") as f:
104
+ f.write("# Your project's main application logic goes here\n")
105
+
106
+ # Generate GUI code for app.py if requested
107
+ if "create a gui" in summary.lower():
108
+ gui_code = generate_code(
109
+ "Create a simple GUI for this application", selected_model)
110
+ with open(app_file, "a") as f:
111
+ f.write(gui_code)
112
+
113
+ # Run the default build process
114
+ build_command = "pip install -r requirements.txt && python app.py"
115
+ try:
116
+ result = subprocess.run(
117
+ build_command, shell=True, capture_output=True, text=True, cwd=project_path)
118
+ st.write(f"Build Output:\n{result.stdout}")
119
+ if result.stderr:
120
+ st.error(f"Build Errors:\n{result.stderr}")
121
+ except Exception as e:
122
+ st.error(f"Build Error: {e}")
123
+
124
+ return summary, next_step
125
+
126
+ def get_built_space_files() -> Dict[str, str]:
127
+ # Replace with your logic to gather the files you want to deploy
128
+ return {
129
+ "app.py": "# Your Streamlit app code here",
130
+ "requirements.txt": "streamlit\ntransformers"
131
+ # Add other files as needed
132
+ }
133
+
134
+ def save_agent_to_file(agent: AIAgent):
135
+ """Saves the agent's prompt to a file."""
136
+ if not os.path.exists(AGENT_DIRECTORY):
137
+ os.makedirs(AGENT_DIRECTORY)
138
+ file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
139
+ with open(file_path, "w") as file:
140
+ file.write(agent.create_agent_prompt())
141
+ st.session_state.available_agents.append(agent.name)
142
+
143
+ def load_agent_prompt(agent_name: str) -> str:
144
+ """Loads an agent prompt from a file."""
145
+ file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
146
+ if os.path.exists(file_path):
147
+ with open(file_path, "r") as file:
148
+ agent_prompt = file.read()
149
+ return agent_prompt
150
+ else:
151
+ return None
152
+
153
+ def create_agent_from_text(name: str, text: str) -> str:
154
+ skills = text.split("\n")
155
+ agent = AIAgent(name, "AI agent created from text input.", skills)
156
+ save_agent_to_file(agent)
157
+ return agent.create_agent_prompt()
158
+
159
+ def chat_interface_with_agent(input_text: str, agent_name: str) -> str:
160
+ agent_prompt = load_agent_prompt(agent_name)
161
+ if agent_prompt is None:
162
+ return f"Agent {agent_name} not found."
163
+
164
+ model_name = "MaziyarPanahi/Codestral-22B-v0.1-GGUF"
165
  try:
166
+ generator = pipeline("text-generation", model=model_name)
167
+ generator.tokenizer.pad_token = generator.tokenizer.eos_token
168
+ generated_response = generator(
169
+ f"{agent_prompt}\n\nUser: {input_text}\nAgent:", max_length=100, do_sample=True, top_k=50)[0]["generated_text"]
170
+ return generated_response
 
 
 
171
  except Exception as e:
172
+ return f"Error loading model: {e}"
173
+
174
+ def terminal_interface(command: str, project_name: str = None) -> str:
175
+ if project_name:
176
+ project_path = os.path.join(PROJECT_ROOT, project_name)
177
+ if not os.path.exists(project_path):
178
+ return f"Project {project_name} does not exist."
179
+ result = subprocess.run(
180
+ command, shell=True, capture_output=True, text=True, cwd=project_path)
181
+ else:
182
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
183
+ return result.stdout
184
+
185
+ def code_editor_interface(code: str) -> str:
186
+ try:
187
+ formatted_code = black.format_str(code, mode=black.FileMode())
188
+ except black.NothingChanged:
189
+ formatted_code = code
190
+
191
+ result = StringIO()
192
+ sys.stdout = result
193
+ sys.stderr = result
194
+
195
+ (pylint_stdout, pylint_stderr) = lint.py_run(code, return_std=True)
196
+ sys.stdout = sys.__stdout__
197
+ sys.stderr = sys.__stderr__
198
+
199
+ lint_message = pylint_stdout.getvalue() + pylint_stderr.getvalue()
200
 
201
+ return formatted_code, lint_message
202
+
203
+ def summarize_text(text: str) -> str:
204
+ summarizer = pipeline("summarization")
205
+ summary = summarizer(text, max_length=130, min_length=30, do_sample=False)
206
+ return summary[0]['summary_text']
207
+
208
+ def sentiment_analysis(text: str) -> str:
209
+ analyzer = pipeline("sentiment-analysis")
210
+ result = analyzer(text)
211
+ return result[0]['label']
212
+
213
+ def translate_code(code: str, source_language: str, target_language: str) -> str:
214
+ # Use a Hugging Face translation model instead of OpenAI
215
+ # Example: English to Spanish
216
+ translator = pipeline(
217
+ "translation", model="bartowski/Codestral-22B-v0.1-GGUF")
218
+ translated_code = translator(code, target_lang=target_language)[0]['translation_text']
219
+ return translated_code
220
+
221
+ def generate_code(code_idea: str, model_name: str) -> str:
222
+ """Generates code using the selected model."""
223
+ try:
224
+ generator = pipeline('text-generation', model=model_name)
225
+ generated_code = generator(code_idea, max_length=1000, num_return_sequences=1)[0]['generated_text']
226
+ return generated_code
227
+ except Exception as e:
228
+ return f"Error generating code: {e}"
229
+
230
+ def chat_interface(input_text: str) -> str:
231
+ """Handles general chat interactions with the user."""
232
+ # Use a Hugging Face chatbot model or your own logic
233
+ chatbot = pipeline("text-generation", model="microsoft/DialoGPT-medium")
234
+ response = chatbot(input_text, max_length=50, num_return_sequences=1)[0]['generated_text']
235
+ return response
236
+
237
+ def workspace_interface(project_name: str) -> str:
238
+ project_path = os.path.join(PROJECT_ROOT, project_name)
239
+ if not os.path.exists(project_path):
240
+ os.makedirs(project_path)
241
+ st.session_state.workspace_projects[project_name] = {'files': []}
242
+ return f"Project '{project_name}' created successfully."
243
+ else:
244
+ return f"Project '{project_name}' already exists."
245
+
246
+ def add_code_to_workspace(project_name: str, code: str, file_name: str) -> str:
247
+ project_path = os.path.join(PROJECT_ROOT, project_name)
248
+ if not os.path.exists(project_path):
249
+ return f"Project '{project_name}' does not exist."
250
+
251
+ file_path = os.path.join(project_path, file_name)
252
+ with open(file_path, "w") as file:
253
+ file.write(code)
254
+ st.session_state.workspace_projects[project_name]['files'].append(file_name)
255
+ return f"Code added to '{file_name}' in project '{project_name}'."
256
+
257
+ def create_space_on_hugging_face(api, name, description, public, files, entrypoint="launch.py"):
258
+ url = f"{hf_hub_url()}spaces/{name}/prepare-repo"
259
+ headers = {"Authorization": f"Bearer {api.access_token}"}
260
+ payload = {
261
+ "public": public,
262
+ "gitignore_template": "web",
263
+ "default_branch": "main",
264
+ "archived": False,
265
+ "files": []
266
  }
267
+ for filename, contents in files.items():
268
+ data = {
269
+ "content": contents,
270
+ "path": filename,
271
+ "encoding": "utf-8",
272
+ "mode": "overwrite"
273
+ }
274
+ payload["files"].append(data)
275
+ response = requests.post(url, json=payload, headers=headers)
276
+ response.raise_for_status()
277
+ location = response.headers.get("Location")
278
+ # wait_for_processing(location, api) # You might need to implement this if it's not already defined
279
+
280
+ return Repository(name=name, api=api)
281
+
282
+ # Streamlit App
283
+ st.title("AI Agent Creator")
284
+
285
+ # Sidebar navigation
286
+ st.sidebar.title("Navigation")
287
+ app_mode = st.sidebar.selectbox(
288
+ "Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
289
+
290
+ if app_mode == "AI Agent Creator":
291
+ # AI Agent Creator
292
+ st.header("Create an AI Agent from Text")
293
+
294
+ st.subheader("From Text")
295
+ agent_name = st.text_input("Enter agent name:")
296
+ text_input = st.text_area("Enter skills (one per line):")
297
+ if st.button("Create Agent"):
298
+ agent_prompt = create_agent_from_text(agent_name, text_input)
299
+ st.success(f"Agent '{agent_name}' created and saved successfully.")
300
+ st.session_state.available_agents.append(agent_name)
301
+
302
+ elif app_mode == "Tool Box":
303
+ # Tool Box
304
+ st.header("AI-Powered Tools")
305
+
306
+ # Chat Interface
307
+ st.subheader("Chat with CodeCraft")
308
+ chat_input = st.text_area("Enter your message:")
309
+ if st.button("Send"):
310
+ chat_response = chat_interface(chat_input)
311
+ st.session_state.chat_history.append((chat_input, chat_response))
312
+ st.write(f"CodeCraft: {chat_response}")
313
+
314
+ # Terminal Interface
315
+ st.subheader("Terminal")
316
+ terminal_input = st.text_input("Enter a command:")
317
+ if st.button("Run"):
318
+ terminal_output = terminal_interface(terminal_input)
319
+ st.session_state.terminal_history.append(
320
+ (terminal_input, terminal_output))
321
+ st.code(terminal_output, language="bash")
322
+
323
+ # Code Editor Interface
324
+ st.subheader("Code Editor")
325
+ code_editor = st.text_area("Write your code:", height=300)
326
+ if st.button("Format & Lint"):
327
+ formatted_code, lint_message = code_editor_interface(code_editor)
328
+ st.code(formatted_code, language="python")
329
+ st.info(lint_message)
330
+
331
+ # Text Summarization Tool
332
+ st.subheader("Summarize Text")
333
+ text_to_summarize = st.text_area("Enter text to summarize:")
334
+ if st.button("Summarize"):
335
+ summary = summarize_text(text_to_summarize)
336
+ st.write(f"Summary: {summary}")
337
+
338
+ # Sentiment Analysis Tool
339
+ st.subheader("Sentiment Analysis")
340
+ sentiment_text = st.text_area("Enter text for sentiment analysis:")
341
+ if st.button("Analyze Sentiment"):
342
+ sentiment = sentiment_analysis(sentiment_text)
343
+ st.write(f"Sentiment: {sentiment}")
344
+
345
+ # Text Translation Tool (Code Translation)
346
+ st.subheader("Translate Code")
347
+ code_to_translate = st.text_area("Enter code to translate:")
348
+ source_language = st.text_input("Enter source language (e.g., 'Python'):")
349
+ target_language = st.text_input(
350
+ "Enter target language (e.g., 'JavaScript'):")
351
+ if st.button("Translate Code"):
352
+ translated_code = translate_code(
353
+ code_to_translate, source_language, target_language)
354
+ st.code(translated_code, language=target_language.lower())
355
+
356
+ # Code Generation
357
+ st.subheader("Code Generation")
358
+ code_idea = st.text_input("Enter your code idea:")
359
+ if st.button("Generate Code"):
360
+ generated_code = generate_code(code_idea)
361
+ st.code(generated_code, language="python")
362
+
363
+ elif app_mode == "Workspace Chat App":
364
+ # Workspace Chat App
365
+ st.header("Workspace Chat App")
366
+
367
+ # Project Workspace Creation
368
+ st.subheader("Create a New Project")
369
+ project_name = st.text_input("Enter project name:")
370
+ if st.button("Create Project"):
371
+ workspace_status = workspace_interface(project_name)
372
+ st.success(workspace_status)
373
+
374
+ # Automatically create requirements.txt and app.py
375
+ project_path = os.path.join(PROJECT_ROOT, project_name)
376
+ requirements_file = os.path.join(project_path, "requirements.txt")
377
+ if not os.path.exists(requirements_file):
378
+ with open(requirements_file, "w") as f:
379
+ f.write("# Add your project's dependencies here\n")
380
+
381
+ app_file = os.path.join(project_path, "app.py")
382
+ if not os.path.exists(app_file):
383
+ with open(app_file, "w") as f:
384
+ f.write("# Your project's main application logic goes here\n")
385
+
386
+ # Add Code to Workspace
387
+ st.subheader("Add Code to Workspace")
388
+ code_to_add = st.text_area("Enter code to add to workspace:")
389
+ file_name = st.text_input("Enter file name (e.g., 'app.py'):")
390
+ if st.button("Add Code"):
391
+ add_code_status = add_code_to_workspace(
392
+ project_name, code_to_add, file_name)
393
+ st.session_state.terminal_history.append(
394
+ (f"Add Code: {code_to_add}", add_code_status))
395
+ st.success(add_code_status)
396
+
397
+ # Terminal Interface with Project Context
398
+ st.subheader("Terminal (Workspace Context)")
399
+ terminal_input = st.text_input("Enter a command within the workspace:")
400
+ if st.button("Run Command"):
401
+ terminal_output = terminal_interface(terminal_input, project_name)
402
+ st.session_state.terminal_history.append(
403
+ (terminal_input, terminal_output))
404
+ st.code(terminal_output, language="bash")
405
+
406
+ # Chat Interface for Guidance
407
+ st.subheader("Chat with CodeCraft for Guidance")
408
+ chat_input = st.text_area("Enter your message for guidance:")
409
+ if st.button("Get Guidance"):
410
+ chat_response = chat_interface(chat_input)
411
+ st.session_state.chat_history.append((chat_input, chat_response))
412
+ st.write(f"CodeCraft: {chat_response}")
413
+
414
+ # Display Chat History
415
+ st.subheader("Chat History")
416
+ for user_input, response in st.session_state.chat_history:
417
+ st.write(f"User: {user_input}")
418
+ st.write(f"CodeCraft: {response}")
419
+
420
+ # Display Terminal History
421
+ st.subheader("Terminal History")
422
+ for command, output in st.session_state.terminal_history:
423
+ st.write(f"Command: {command}")
424
+ st.code(output, language="bash")
425
+
426
+ # Display Projects and Files
427
+ st.subheader("Workspace Projects")
428
+ for project, details in st.session_state.workspace_projects.items():
429
+ st.write(f"Project: {project}")
430
+ for file in details['files']:
431
+ st.write(f" - {file}")
432
 
433
+ # Chat with AI Agents
434
+ st.subheader("Chat with AI Agents")
435
+ selected_agent = st.selectbox(
436
+ "Select an AI agent", st.session_state.available_agents)
437
+ agent_chat_input = st.text_area("Enter your message for the agent:")
438
+ if st.button("Send to Agent"):
439
+ agent_chat_response = chat_interface_with_agent(
440
+ agent_chat_input, selected_agent)
441
+ st.session_state.chat_history.append(
442
+ (agent_chat_input, agent_chat_response))
443
+ st.write(f"{selected_agent}: {agent_chat_response}")
444
 
445
+ # Code Generation
446
+ st.subheader("Code Generation")
447
+ code_idea = st.text_input("Enter your code idea:")
448
 
449
+ # Model Selection Menu
450
+ selected_model = st.selectbox(
451
+ "Select a code-generative model", AVAILABLE_CODE_GENERATIVE_MODELS)
452
 
453
+ if st.button("Generate Code"):
454
+ generated_code = generate_code(code_idea, selected_model)
455
+ st.code(generated_code, language="python")
456
 
457
+ # Automate Build Process
458
+ st.subheader("Automate Build Process")
459
+ if st.button("Automate"):
460
+ # Load the agent without skills for now
461
+ agent = AIAgent(selected_agent, "", [])
462
+ summary, next_step = agent.autonomous_build(
463
+ st.session_state.chat_history, st.session_state.workspace_projects, project_name, selected_model)
464
+ st.write("Autonomous Build Summary:")
465
+ st.write(summary)
466
+ st.write("Next Step:")
467
+ st.write(next_step)
468
 
469
+ # If everything went well, proceed to deploy the Space
470
+ if agent._hf_api and agent.has_valid_hf_token():
471
+ agent.deploy_built_space_to_hf()
472
+ # Use the hf_token to interact with the Hugging Face API
473
+ api = HfApi(token="hf_token") # Function to create a Space on Hugging Face
474
+ create_space_on_hugging_face(api, agent.name, agent.description, True, get_built_space_files())