acecalisto3 commited on
Commit
b3ef9b6
·
verified ·
1 Parent(s): c45a610

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +323 -227
app.py CHANGED
@@ -7,31 +7,43 @@ from datetime import datetime
7
  import logging
8
 
9
  import gradio as gr
10
- from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
11
- from huggingface_hub import InferenceClient, cached_download, Repository, HfApi
12
- from IPython.display import display, HTML
13
 
14
  # --- Configuration ---
15
- VERBOSE = True
16
- MAX_HISTORY = 5
17
- MAX_TOKENS = 2048
18
- TEMPERATURE = 0.7
19
- TOP_P = 0.8
20
- REPETITION_PENALTY = 1.5
21
- DEFAULT_PROJECT_PATH = "./my-hf-project" # Default project directory
 
 
 
22
 
23
  # --- Logging Setup ---
24
  logging.basicConfig(
25
- filename="app.log",
26
- level=logging.INFO,
27
  format="%(asctime)s - %(levelname)s - %(message)s",
28
  )
29
 
 
 
 
 
 
 
 
 
 
30
  # --- Prompts ---
31
  PREFIX = """
32
  {date_time_str}
33
  Purpose: {purpose}
34
- Agent: {agent_name}
35
  """
36
 
37
  LOG_PROMPT = """
@@ -42,254 +54,338 @@ LOG_RESPONSE = """
42
  RESPONSE: {resp}
43
  """
44
 
45
- # --- Global Variables ---
46
- current_model = None # Store the currently loaded model
47
- repo = None # Store the Hugging Face Repository object
48
- model_descriptions = {} # Store model descriptions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
  # --- Functions ---
51
  def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 2) -> str:
52
- prompt = ""
 
 
53
  for user_prompt, bot_response in history[-max_history_turns:]:
54
- prompt += f"Human: {user_prompt}\nAssistant: {bot_response}\n"
55
- prompt += f"Human: {message}\nAssistant:"
 
56
  return prompt
57
 
58
- def generate_response(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  prompt: str,
60
  history: List[Tuple[str, str]],
61
- agent_name: str = "Generic Agent",
62
  sys_prompt: str = "",
63
  temperature: float = TEMPERATURE,
64
  max_new_tokens: int = MAX_TOKENS,
65
  top_p: float = TOP_P,
66
  repetition_penalty: float = REPETITION_PENALTY,
67
  ) -> str:
68
- global current_model
69
- if current_model is None:
70
- return "Error: Please load a model first."
71
-
72
- date_time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
73
- full_prompt = PREFIX.format(
74
  date_time_str=date_time_str,
75
- purpose=sys_prompt,
76
- agent_name=agent_name
77
- ) + format_prompt(prompt, history)
78
-
79
  if VERBOSE:
80
- logging.info(LOG_PROMPT.format(content=full_prompt))
81
-
82
- response = current_model(
83
- full_prompt,
84
- max_new_tokens=max_new_tokens,
85
- temperature=temperature,
86
- top_p=top_p,
87
- repetition_penalty=repetition_penalty,
88
- do_sample=True
89
- )[0]['generated_text']
90
 
91
- assistant_response = response.split("Assistant:")[-1].strip()
 
 
 
92
 
93
  if VERBOSE:
94
- logging.info(LOG_RESPONSE.format(resp=assistant_response))
95
-
96
- return assistant_response
97
-
98
- def load_hf_model(model_name: str):
99
- """Loads a language model and fetches its description."""
100
- global current_model, model_descriptions
101
- try:
102
- tokenizer = AutoTokenizer.from_pretrained(model_name)
103
- current_model = pipeline(
104
- "text-generation",
105
- model=model_name,
106
- tokenizer=tokenizer,
107
- model_kwargs={"load_in_8bit": True}
108
- )
109
-
110
- # Fetch and store the model description
111
- api = HfApi()
112
- model_info = api.model_info(model_name)
113
- model_descriptions[model_name] = model_info.pipeline_tag
114
- return f"Successfully loaded model: {model_name}"
115
- except Exception as e:
116
- return f"Error loading model: {str(e)}"
117
-
118
- def execute_command(command: str, project_path: str = None) -> str:
119
- """Executes a shell command and returns the output."""
120
  try:
121
- if project_path:
122
- process = subprocess.Popen(command, shell=True, cwd=project_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
 
 
 
 
 
 
 
123
  else:
124
- process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
125
- output, error = process.communicate()
126
- if error:
127
- return f"Error: {error.decode('utf-8')}"
128
- return output.decode("utf-8")
129
  except Exception as e:
130
- return f"Error executing command: {str(e)}"
131
-
132
- def create_hf_project(project_name: str, project_path: str = DEFAULT_PROJECT_PATH):
133
- """Creates a new Hugging Face project."""
134
- global repo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  try:
136
- if os.path.exists(project_path):
137
- return f"Error: Directory '{project_path}' already exists!"
138
- # Create the repository
139
- repo = Repository(local_dir=project_path, clone_from=None)
140
- repo.git_init()
141
-
142
- # Add basic files (optional, you can customize this)
143
- with open(os.path.join(project_path, "README.md"), "w") as f:
144
- f.write(f"# {project_name}\n\nA new Hugging Face project.")
145
-
146
- # Stage all changes
147
- repo.git_add(pattern="*")
148
- repo.git_commit(commit_message="Initial commit")
149
-
150
- return f"Hugging Face project '{project_name}' created successfully at '{project_path}'"
 
 
151
  except Exception as e:
152
- return f"Error creating Hugging Face project: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
 
154
- def list_project_files(project_path: str = DEFAULT_PROJECT_PATH) -> str:
155
- """Lists files in the project directory."""
156
- try:
157
- files = os.listdir(project_path)
158
- if not files:
159
- return "Project directory is empty."
160
- return "\n".join(files)
161
- except Exception as e:
162
- return f"Error listing project files: {str(e)}"
163
 
164
- def read_file_content(file_path: str, project_path: str = DEFAULT_PROJECT_PATH) -> str:
165
- """Reads and returns the content of a file in the project."""
166
- try:
167
- full_path = os.path.join(project_path, file_path)
168
- with open(full_path, "r") as f:
169
- content = f.read()
170
- return content
171
- except Exception as e:
172
- return f"Error reading file: {str(e)}"
173
 
174
- def write_to_file(file_path: str, content: str, project_path: str = DEFAULT_PROJECT_PATH) -> str:
175
- """Writes content to a file in the project."""
176
- try:
177
- full_path = os.path.join(project_path, file_path)
178
- with open(full_path, "w") as f:
179
- f.write(content)
180
- return f"Successfully wrote to '{file_path}'"
181
- except Exception as e:
182
- return f"Error writing to file: {str(e)}"
 
183
 
184
- def preview_project(project_path: str = DEFAULT_PROJECT_PATH):
185
- """Provides a preview of the project, if applicable."""
186
- # Assuming a simple HTML preview for now
 
187
  try:
188
- index_html_path = os.path.join(project_path, "index.html")
189
- if os.path.exists(index_html_path):
190
- with open(index_html_path, "r") as f:
191
- html_content = f.read()
192
- display(HTML(html_content))
193
- return "Previewing 'index.html'"
194
- else:
195
- return "No 'index.html' found for preview."
196
  except Exception as e:
197
- return f"Error previewing project: {str(e)}"
 
 
198
 
199
- def main():
200
  with gr.Blocks() as demo:
201
- gr.Markdown("## FragMixt: Your Hugging Face No-Code App Builder")
202
-
203
- # --- Model Selection ---
204
- with gr.Tab("Model"):
205
- # --- Model Dropdown with Categories ---
206
- model_categories = gr.Dropdown(
207
- choices=["Text Generation", "Text Summarization", "Code Generation", "Translation", "Question Answering"],
208
- label="Model Category",
209
- value="Text Generation"
210
- )
211
- model_name = gr.Dropdown(
212
- choices=[], # Initially empty, will be populated based on category
213
- label="Hugging Face Model Name",
214
- )
215
- load_button = gr.Button("Load Model")
216
- load_output = gr.Textbox(label="Output")
217
- model_description = gr.Markdown(label="Model Description")
218
-
219
- # --- Function to populate model names based on category ---
220
- def update_model_dropdown(category):
221
- models = []
222
- api = HfApi()
223
- for model in api.list_models():
224
- if model.pipeline_tag == category:
225
- models.append(model.modelId)
226
- return gr.Dropdown.update(choices=models)
227
-
228
- # --- Event handler for category dropdown ---
229
- model_categories.change(
230
- fn=update_model_dropdown,
231
- inputs=model_categories,
232
- outputs=model_name,
233
- )
234
-
235
- # --- Event handler to display model description ---
236
- def display_model_description(model_name):
237
- global model_descriptions
238
- if model_name in model_descriptions:
239
- return model_descriptions[model_name]
240
- else:
241
- return "Model description not available."
242
-
243
- model_name.change(
244
- fn=display_model_description,
245
- inputs=model_name,
246
- outputs=model_description,
247
- )
248
-
249
- load_button.click(load_hf_model, inputs=model_name, outputs=load_output)
250
-
251
- # --- Chat Interface ---
252
- with gr.Tab("Chat"):
253
- chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True)
254
- message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
255
- purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
256
- agent_name = gr.Dropdown(label="Agents", choices=["Generic Agent"], value="Generic Agent", interactive=True)
257
- sys_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
258
- temperature = gr.Slider(label="Temperature", value=TEMPERATURE, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs")
259
- max_new_tokens = gr.Slider(label="Max new tokens", value=MAX_TOKENS, minimum=0, maximum=1048 * 10, step=64, interactive=True, info="The maximum numbers of new tokens")
260
- top_p = gr.Slider(label="Top-p (nucleus sampling)", value=TOP_P, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
261
- repetition_penalty = gr.Slider(label="Repetition penalty", value=REPETITION_PENALTY, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
262
- submit_button = gr.Button(value="Send")
263
  history = gr.State([])
264
-
265
- def run_chat(purpose: str, message: str, agent_name: str, sys_prompt: str, temperature: float, max_new_tokens: int, top_p: float, repetition_penalty: float, history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
266
- response = generate_response(message, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
267
- history.append((message, response))
268
- return history, history
269
-
270
- submit_button.click(run_chat, inputs=[purpose, message, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history], outputs=[chatbot, history])
271
-
272
- # --- Project Management ---
273
- with gr.Tab("Project"):
274
- project_name = gr.Textbox(label="Project Name", placeholder="MyHuggingFaceApp")
275
- create_project_button = gr.Button("Create Hugging Face Project")
276
- project_output = gr.Textbox(label="Output", lines=5)
277
- file_content = gr.Code(label="File Content", language="python", lines=20)
278
- file_path = gr.Textbox(label="File Path (relative to project)", placeholder="src/main.py")
279
- read_button = gr.Button("Read File")
280
- write_button = gr.Button("Write to File")
281
- command_input = gr.Textbox(label="Terminal Command", placeholder="pip install -r requirements.txt")
282
- command_output = gr.Textbox(label="Command Output", lines=5)
283
- run_command_button = gr.Button("Run Command")
284
- preview_button = gr.Button("Preview Project")
285
-
286
- create_project_button.click(create_hf_project, inputs=[project_name], outputs=project_output)
287
- read_button.click(read_file_content, inputs=file_path, outputs=file_content)
288
- write_button.click(write_to_file, inputs=[file_path, file_content], outputs=project_output)
289
- run_command_button.click(execute_command, inputs=command_input, outputs=command_output)
290
- preview_button.click(preview_project, outputs=project_output)
291
 
292
  demo.launch()
293
 
294
  if __name__ == "__main__":
295
- main()
 
7
  import logging
8
 
9
  import gradio as gr
10
+ from huggingface_hub import InferenceClient, cached_download
11
+ from safe_search import safe_search
12
+ from i_search import google, i_search as i_s
13
 
14
  # --- Configuration ---
15
+ VERBOSE = True # Enable verbose logging
16
+ MAX_HISTORY = 5 # Maximum history turns to keep
17
+ MAX_TOKENS = 2048 # Maximum tokens for LLM responses
18
+ TEMPERATURE = 0.7 # Temperature for LLM responses
19
+ TOP_P = 0.8 # Top-p (nucleus sampling) for LLM responses
20
+ REPETITION_PENALTY = 1.5 # Repetition penalty for LLM responses
21
+ MODEL_NAME = "mistralai/Mixtral-8x7B-Instruct-v0.1" # Name of the LLM model
22
+
23
+ import os
24
+ API_KEY = os.getenv("HUGGINGFACE_API_KEY") # Ensure you set the HUGGINGFACE_API_KEY environment variable
25
 
26
  # --- Logging Setup ---
27
  logging.basicConfig(
28
+ filename="app.log", # Name of the log file
29
+ level=logging.INFO, # Set the logging level (INFO, DEBUG, etc.)
30
  format="%(asctime)s - %(levelname)s - %(message)s",
31
  )
32
 
33
+ # --- Agents ---
34
+ agents = [
35
+ "WEB_DEV",
36
+ "AI_SYSTEM_PROMPT",
37
+ "PYTHON_CODE_DEV",
38
+ "DATA_SCIENCE",
39
+ "UI_UX_DESIGN",
40
+ ]
41
+
42
  # --- Prompts ---
43
  PREFIX = """
44
  {date_time_str}
45
  Purpose: {purpose}
46
+ Safe Search: {safe_search}
47
  """
48
 
49
  LOG_PROMPT = """
 
54
  RESPONSE: {resp}
55
  """
56
 
57
+ COMPRESS_HISTORY_PROMPT = """
58
+ You are a helpful AI assistant. Your task is to compress the following history into a summary that is no longer than 512 tokens.
59
+ History:
60
+ {history}
61
+ """
62
+
63
+ ACTION_PROMPT = """
64
+ You are a helpful AI assistant. You are working on the task: {task}
65
+ Your current history is:
66
+ {history}
67
+ What is your next thought?
68
+ thought:
69
+ What is your next action?
70
+ action:
71
+ """
72
+
73
+ TASK_PROMPT = """
74
+ You are a helpful AI assistant. Your current history is:
75
+ {history}
76
+ What is the next task?
77
+ task:
78
+ """
79
+
80
+ UNDERSTAND_TEST_RESULTS_PROMPT = """
81
+ You are a helpful AI assistant. The test results are:
82
+ {test_results}
83
+ What do you want to know about the test results?
84
+ thought:
85
+ """
86
 
87
  # --- Functions ---
88
  def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 2) -> str:
89
+ """Formats the prompt for the LLM, including the message and relevant history."""
90
+ prompt = " "
91
+ # Keep only the last 'max_history_turns' turns
92
  for user_prompt, bot_response in history[-max_history_turns:]:
93
+ prompt += f"[INST] {user_prompt} [/ "
94
+ prompt += f" {bot_response}"
95
+ prompt += f"[INST] {message} [/ "
96
  return prompt
97
 
98
+ def run_llm(
99
+ prompt_template: str,
100
+ stop_tokens: List[str],
101
+ purpose: str,
102
+ **prompt_kwargs: Dict
103
+ ) -> str:
104
+ """Runs the LLM with the given prompt and parameters."""
105
+ seed = random.randint(1, 1111111111111111)
106
+ logging.info(f"Seed: {seed}") # Log the seed
107
+
108
+ content = PREFIX.format(
109
+ date_time_str=date_time_str,
110
+ purpose=purpose,
111
+ safe_search=safe_search,
112
+ ) + prompt_template.format(**prompt_kwargs)
113
+ if VERBOSE:
114
+ logging.info(LOG_PROMPT.format(content)) # Log the prompt
115
+
116
+ resp = client.text_generation(content, max_new_tokens=MAX_TOKENS, stop_sequences=stop_tokens, temperature=TEMPERATURE, top_p=TOP_P, repetition_penalty=REPETITION_PENALTY)
117
+ if VERBOSE:
118
+ logging.info(LOG_RESPONSE.format(resp)) # Log the response
119
+ return resp
120
+
121
+ def generate(
122
  prompt: str,
123
  history: List[Tuple[str, str]],
124
+ agent_name: str = agents[0],
125
  sys_prompt: str = "",
126
  temperature: float = TEMPERATURE,
127
  max_new_tokens: int = MAX_TOKENS,
128
  top_p: float = TOP_P,
129
  repetition_penalty: float = REPETITION_PENALTY,
130
  ) -> str:
131
+ """Generates text using the LLM."""
132
+ content = PREFIX.format(
 
 
 
 
133
  date_time_str=date_time_str,
134
+ purpose=purpose,
135
+ safe_search=safe_search,
136
+ ) + prompt_template.format(**prompt_kwargs)
 
137
  if VERBOSE:
138
+ logging.info(LOG_PROMPT.format(content)) # Log the prompt
 
 
 
 
 
 
 
 
 
139
 
140
+ stream = client.text_generation(content, stream=True, details=True, return_full_text=False, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty, max_new_tokens=max_new_tokens)
141
+ resp = ""
142
+ for response in stream:
143
+ resp += response.token.text
144
 
145
  if VERBOSE:
146
+ logging.info(LOG_RESPONSE.format(resp)) # Log the response
147
+ return resp
148
+
149
+ def compress_history(purpose: str, task: str, history: List[Tuple[str, str]], directory: str) -> str:
150
+ """Compresses the history into a shorter summary."""
151
+ resp = run_llm(
152
+ COMPRESS_HISTORY_PROMPT,
153
+ stop_tokens=["observation:", "task:", "action:", "thought:"],
154
+ purpose=purpose,
155
+ task=task,
156
+ history="\n".join(f"[INST] {user_prompt} [/] {bot_response}" for user_prompt, bot_response in history),
157
+ )
158
+ history = "observation: {}\n".format(resp)
159
+ return history
160
+
161
+ def call_search(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
162
+ """Performs a search based on the action input."""
163
+ logging.info(f"CALLING SEARCH: {action_input}")
 
 
 
 
 
 
 
 
164
  try:
165
+ if "http" in action_input:
166
+ if "<" in action_input:
167
+ action_input = action_input.strip("<")
168
+ if ">" in action_input:
169
+ action_input = action_input.strip(">")
170
+
171
+ response = i_s(action_input)
172
+ logging.info(f"Search Result: {response}")
173
+ history.append(("observation: search result is: {}".format(response), ""))
174
  else:
175
+ history.append(("observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n", ""))
 
 
 
 
176
  except Exception as e:
177
+ history.append(("observation: {}\n".format(e), ""))
178
+ return "MAIN", None, history, task
179
+
180
+ def call_main(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
181
+ """Handles the main agent interaction loop."""
182
+ logging.info(f"CALLING MAIN: {action_input}")
183
+ resp = run_llm(
184
+ ACTION_PROMPT,
185
+ stop_tokens=["observation:", "task:", "action:", "thought:"],
186
+ purpose=purpose,
187
+ task=task,
188
+ history="\n".join(f"[INST] {user_prompt} [/] {bot_response}" for user_prompt, bot_response in history),
189
+ )
190
+ lines = resp.strip().strip("\n").split("\n")
191
+ for line in lines:
192
+ if line == "":
193
+ continue
194
+ if line.startswith("thought: "):
195
+ history.append((line, ""))
196
+ logging.info(f"Thought: {line}")
197
+ elif line.startswith("action: "):
198
+ action_name, action_input = parse_action(line)
199
+ logging.info(f"Action: {action_name} - {action_input}")
200
+ history.append((line, ""))
201
+ if "COMPLETE" in action_name or "COMPLETE" in action_input:
202
+ task = "END"
203
+ return action_name, action_input, history, task
204
+ else:
205
+ return action_name, action_input, history, task
206
+ else:
207
+ history.append((line, ""))
208
+ logging.info(f"Other Output: {line}")
209
+ return "MAIN", None, history, task
210
+
211
+ def call_set_task(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
212
+ """Sets a new task for the agent."""
213
+ logging.info(f"CALLING SET_TASK: {action_input}")
214
+ task = run_llm(
215
+ TASK_PROMPT,
216
+ stop_tokens=[],
217
+ purpose=purpose,
218
+ task=task,
219
+ history="\n".join(f"[INST] {user_prompt} [/] {bot_response}" for user_prompt, bot_response in history),
220
+ ).strip("\n")
221
+ history.append(("observation: task has been updated to: {}".format(task), ""))
222
+ return "MAIN", None, history, task
223
+
224
+ def end_fn(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
225
+ """Ends the agent interaction."""
226
+ logging.info(f"CALLING END_FN: {action_input}")
227
+ task = "END"
228
+ return "COMPLETE", "COMPLETE", history, task
229
+
230
+ NAME_TO_FUNC: Dict[str, callable] = {
231
+ "MAIN": call_main,
232
+ "UPDATE-TASK": call_set_task,
233
+ "SEARCH": call_search,
234
+ "COMPLETE": end_fn,
235
+ }
236
+
237
+ def run_action(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_name: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
238
+ """Executes the specified action."""
239
+ logging.info(f"RUNNING ACTION: {action_name} - {action_input}")
240
  try:
241
+ if "RESPONSE" in action_name or "COMPLETE" in action_name:
242
+ action_name = "COMPLETE"
243
+ task = "END"
244
+ return action_name, "COMPLETE", history, task
245
+
246
+ # compress the history when it is long
247
+ if len(history) > MAX_HISTORY:
248
+ logging.info("COMPRESSING HISTORY")
249
+ history = compress_history(purpose, task, history, directory)
250
+ if not action_name in NAME_TO_FUNC:
251
+ action_name = "MAIN"
252
+ if action_name == "" or action_name is None:
253
+ action_name = "MAIN"
254
+ assert action_name in NAME_TO_FUNC
255
+
256
+ logging.info(f"RUN: {action_name} - {action_input}")
257
+ return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
258
  except Exception as e:
259
+ history.append(("observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n", ""))
260
+ logging.error(f"Error in run_action: {e}")
261
+ return "MAIN", None, history, task
262
+
263
+ def run(purpose: str, history: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
264
+ """Main agent interaction loop."""
265
+ task = None
266
+ directory = "./"
267
+ if history:
268
+ history = str(history).strip("[]")
269
+ if not history:
270
+ history = []
271
+
272
+ action_name = "UPDATE-TASK" if task is None else "MAIN"
273
+ action_input = None
274
+ while True:
275
+ logging.info(f"---")
276
+ logging.info(f"Purpose: {purpose}")
277
+ logging.info(f"Task: {task}")
278
+ logging.info(f"---")
279
+ logging.info(f"History: {history}")
280
+ logging.info(f"---")
281
+
282
+ action_name, action_input, history, task = run_action(
283
+ purpose,
284
+ task,
285
+ history,
286
+ directory,
287
+ action_name,
288
+ action_input,
289
+ )
290
+ yield (history)
291
+ if task == "END":
292
+ return (history)
293
 
294
+ ################################################
 
 
 
 
 
 
 
 
295
 
296
+ def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 5) -> str:
297
+ """Formats the prompt for the LLM, including the message and relevant history."""
298
+ prompt = " "
299
+ # Keep only the last 'max_history_turns' turns
300
+ for user_prompt, bot_response in history[-max_history_turns:]:
301
+ prompt += f"[INST] {user_prompt} [/ "
302
+ prompt += f" {bot_response}"
303
+ prompt += f"[INST] {message} [/ "
304
+ return prompt
305
 
306
+ def parse_action(line: str) -> Tuple[str, str]:
307
+ """Parses the action line to get the action name and input."""
308
+ parts = line.split(":", 1)
309
+ if len(parts) == 2:
310
+ action_name = parts[0].replace("action", "").strip()
311
+ action_input = parts[1].strip()
312
+ else:
313
+ action_name = parts[0].replace("action", "").strip()
314
+ action_input = ""
315
+ return action_name, action_input
316
 
317
+ def main():
318
+ """Main function to run the Gradio interface."""
319
+ global client
320
+ # Initialize the LLM client with your API key
321
  try:
322
+ client = InferenceClient(
323
+ MODEL_NAME,
324
+ token=API_KEY # Replace with your actual API key
325
+ )
 
 
 
 
326
  except Exception as e:
327
+ logging.error(f"Error initializing LLM client: {e}")
328
+ print("Error initializing LLM client. Please check your API key.")
329
+ return
330
 
 
331
  with gr.Blocks() as demo:
332
+ gr.Markdown("## FragMixt: The No-Code Development Powerhouse")
333
+ gr.Markdown("### Your AI-Powered Development Companion")
334
+
335
+ # Chat Interface
336
+ chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel")
337
+
338
+ # Input Components
339
+ message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
340
+ purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
341
+ agent_name = gr.Dropdown(label="Agents", choices=[s for s in agents], value=agents[0], interactive=True)
342
+ sys_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
343
+ temperature = gr.Slider(label="Temperature", value=TEMPERATURE, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs")
344
+ max_new_tokens = gr.Slider(label="Max new tokens", value=MAX_TOKENS, minimum=0, maximum=1048*10, step=64, interactive=True, info="The maximum numbers of new tokens")
345
+ top_p = gr.Slider(label="Top-p (nucleus sampling)", value=TOP_P, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
346
+ repetition_penalty = gr.Slider(label="Repetition penalty", value=REPETITION_PENALTY, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
347
+
348
+ # Button to submit the message
349
+ submit_button = gr.Button(value="Send")
350
+
351
+ # Project Explorer Tab
352
+ with gr.Tab("Project Explorer"):
353
+ project_path = gr.Textbox(label="Project Path", placeholder="/home/user/app/current_project")
354
+ explore_button = gr.Button(value="Explore")
355
+ project_output = gr.Textbox(label="File Tree", lines=20)
356
+
357
+ # Chat App Logic Tab
358
+ with gr.Tab("Chat App"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
359
  history = gr.State([])
360
+ examples = [
361
+ ["What is the purpose of this AI agent?", "I am designed to assist with no-code development tasks."],
362
+ ["Can you help me generate a Python function to calculate the factorial of a number?", "Sure! Here is a Python function to calculate the factorial of a number:"],
363
+ ["Generate a simple HTML page with a heading and a paragraph.", "```html\n<!DOCTYPE html>\n<html>\n<head>\n<title>My Simple Page</title>\n</head>\n<body>\n<h1>Welcome to my page!</h1>\n<p>This is a simple paragraph.</p>\n</body>\n</html>\n```"],
364
+ ["Create a basic SQL query to select all data from a table named 'users'.", "```sql\nSELECT * FROM users;\n```"],
365
+ ["Design a user interface for a mobile app that allows users to track their daily expenses.", "Here's a basic UI design for a mobile expense tracker app:\n\n**Screen 1: Home**\n- Top: App Name and Balance Display\n- Middle: List of Recent Transactions (Date, Description, Amount)\n- Bottom: Buttons for Add Expense, Add Income, View Categories\n\n**Screen 2: Add Expense**\n- Input fields for Date, Category, Description, Amount\n- Buttons for Save, Cancel\n\n**Screen 3: Expense Categories**\n- List of expense categories (e.g., Food, Transportation, Entertainment)\n- Option to add/edit categories\n\n**Screen 4: Reports**\n- Charts and graphs to visualize spending by category, date range, etc.\n- Filters to customize the reports"],
366
+ ]
367
+
368
+ def chat(purpose: str, message: str, agent_name: str, sys_prompt: str, temperature: float, max_new_tokens: int, top_p: float, repetition_penalty: float, history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
369
+ """Handles the chat interaction."""
370
+ prompt = format_prompt(message, history)
371
+ response = generate(prompt, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
372
+ history.append((message, response))
373
+ return history, history
374
+
375
+ submit_button.click(chat, inputs=[purpose, message, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history], outputs=[chatbot, history])
376
+
377
+ # Project Explorer Logic
378
+ def explore_project(project_path: str) -> str:
379
+ """Explores the project directory and returns a file tree."""
380
+ try:
381
+ tree = subprocess.check_output(["tree", project_path]).decode("utf-8")
382
+ return tree
383
+ except Exception as e:
384
+ return f"Error exploring project: {e}"
385
+
386
+ explore_button.click(explore_project, inputs=[project_path], outputs=[project_output])
387
 
388
  demo.launch()
389
 
390
  if __name__ == "__main__":
391
+ main()