acecalisto3 commited on
Commit
465ccfc
·
1 Parent(s): 6c97e89
Files changed (2) hide show
  1. app.py +1 -1449
  2. def.py +1262 -0
app.py CHANGED
@@ -1,1453 +1,5 @@
1
- import platform
2
- import psutil
3
- from typing import List, Dict, Optional, Any, Tuple
4
- from dataclasses import dataclass
5
- from enum import Enum
6
- import logging
7
- import time
8
- import ast
9
- import pylint.lint
10
- import radon.complexity
11
- import radon.metrics
12
- from pylint.lint import Run
13
- from pylint.reporters import JSONReporter
14
- from coverage import Coverage
15
- import bandit
16
- from bandit.core import manager
17
- from datetime import datetime
18
- import os
19
- import sys
20
  import streamlit as st
21
- import requests
22
- import asyncio
23
- import statistics
24
- import json
25
- import traceback
26
- from typing import Dict, Any
27
- from datetime import datetime
28
- from pathlib import Path
29
-
30
- class AutonomousAgentApp:
31
- """Main application class for the Autonomous Agent System"""
32
-
33
- def __init__(self):
34
- self.workspace_manager = self.WorkspaceManager(workspace_dir='workspace') # Use self.WorkspaceManager
35
- self.pipeline = self._initialize_pipeline()
36
- self.refinement_loop = self.RefinementLoop(pipeline=self.pipeline) # Use self.RefinementLoop
37
- self.interface = self.StreamlitInterface(self) # Use self.StreamlitInterface
38
-
39
- def _initialize_pipeline(self) -> 'AutonomousAgentApp.DevelopmentPipeline':
40
- """Initialize the development pipeline"""
41
- return self.DevelopmentPipeline(
42
- workspace_manager=self.workspace_manager,
43
- tool_manager=self._setup_tool_manager()
44
- )
45
-
46
- def _setup_tool_manager(self):
47
- """Setup tool manager with configuration"""
48
- return self.ToolManager() # Use self.ToolManager
49
-
50
- def run(self):
51
- """Main application entry point"""
52
- try:
53
- logging.info("Starting Autonomous Agent Application")
54
- self.interface.render_main_interface()
55
- except Exception as e:
56
- logging.error(f"Application error: {str(e)}")
57
- st.error("An error occurred while starting the application. Please check the logs.")
58
- raise
59
-
60
- class WorkspaceManager:
61
- """Manages workspace files and directories."""
62
-
63
- def __init__(self, workspace_dir: str = "workspace"):
64
- self.workspace_dir = workspace_dir
65
- self._ensure_workspace_exists()
66
-
67
- def _ensure_workspace_exists(self):
68
- """Ensure the workspace directory exists."""
69
- os.makedirs(self.workspace_dir, exist_ok=True)
70
-
71
- def create_file(self, filename: str, content: str) -> str:
72
- """Create a file in the workspace with the given content."""
73
- file_path = os.path.join(self.workspace_dir, filename)
74
- with open(file_path, "w") as f:
75
- f.write(content)
76
- return f"File '{filename}' created at '{file_path}'."
77
-
78
- def delete_file(self, filename: str) -> str:
79
- """Delete a file from the workspace."""
80
- file_path = os.path.join(self.workspace_dir, filename)
81
- if os.path.exists(file_path):
82
- os.remove(file_path)
83
- return f"File '{filename}' deleted."
84
- return f"File '{filename}' not found."
85
-
86
- def list_files(self) -> List[str]:
87
- """List all files in the workspace."""
88
- return [
89
- os.path.join(root, file)
90
- for root, _, files in os.walk(self.workspace_dir)
91
- for file in files
92
- ]
93
-
94
- def read_file(self, filename: str) -> str:
95
- """Read the content of a file in the workspace."""
96
- file_path = os.path.join(self.workspace_dir, filename)
97
- if os.path.exists(file_path):
98
- with open(file_path, "r") as f:
99
- return f.read()
100
- return f"File '{filename}' not found."
101
-
102
- def get_workspace_tree(self) -> Dict[str, Any]:
103
- """Get the workspace directory structure as a nested dictionary."""
104
- workspace_path = Path(self.workspace_dir)
105
- return self._build_tree(workspace_path)
106
-
107
- def _build_tree(self, path: Path) -> Dict[str, Any]:
108
- """Recursively build a directory tree."""
109
- if path.is_file():
110
- return {"type": "file", "name": path.name}
111
- elif path.is_dir():
112
- return {
113
- "type": "directory",
114
- "name": path.name,
115
- "children": [self._build_tree(child) for child in path.iterdir()],
116
- }
117
-
118
- class AutonomousAgent:
119
- """Autonomous agent that builds tools and agents based on tasks."""
120
-
121
- def __init__(self, workspace_manager: 'AutonomousAgentApp.WorkspaceManager'): # Use fully qualified name
122
- self.workspace_manager = workspace_manager
123
- self.tools_dir = Path(self.workspace_manager.workspace_dir) / "tools"
124
- self.agents_dir = Path(self.workspace_manager.workspace_dir) / "agents"
125
- self.tools_dir.mkdir(exist_ok=True) # Ensure the tools directory exists
126
- self.agents_dir.mkdir(exist_ok=True) # Ensure the agents directory exists
127
- self.running = True # Flag to control the running state
128
-
129
- async def run(self):
130
- """Run the autonomous agent, continuously processing tasks."""
131
- while self.running:
132
- # Default task execution
133
- await self.default_task()
134
- await asyncio.sleep(1) # Prevent busy waiting
135
-
136
- async def default_task(self):
137
- """Perform the default task of analyzing and generating tools/agents."""
138
- logging.info("Running default task...")
139
- # Simulate task processing
140
- await asyncio.sleep(2) # Simulate time taken for the task
141
-
142
- async def pause(self):
143
- """Pause the current operation to accept user input."""
144
- self.running = False
145
- logging.info("Paused. Waiting for user input...")
146
-
147
- async def accept_user_input(self, user_input: str):
148
- """Process user input and execute commands."""
149
- logging.info(f"User input received: {user_input}")
150
- commands = self.extract_commands(user_input)
151
-
152
- for command in commands:
153
- if command.startswith("generate tool"):
154
- await self.generate_tool(command)
155
- elif command.startswith("generate agent"):
156
- await self.generate_agent(command)
157
- # Add more command handling as needed
158
-
159
- def extract_commands(self, user_input: str) -> List[str]:
160
- """Extract commands from user input."""
161
- # Simple command extraction logic (can be improved with NLP)
162
- return user_input.split(';') # Assume commands are separated by semicolons
163
-
164
- async def run_refinement_cycle(self, task: str) -> Dict[str, Any]:
165
- """Run a refinement cycle for the given task."""
166
- # Step 1: Analyze the task
167
- task_analysis = await self._analyze_task(task)
168
-
169
- # Step 2: Search for relevant approaches/methods
170
- search_results = await self._web_search(task)
171
-
172
- # Step 3: Build tools/agents based on the task
173
- tools_built = await self._build_tools(task_analysis, search_results)
174
-
175
- # Step 4: Execute the tools/agents
176
- execution_results = await self._execute_tools(tools_built)
177
-
178
- return {
179
- "task_analysis": task_analysis,
180
- "search_results": search_results,
181
- "tools_built": tools_built,
182
- "execution_results": execution_results,
183
- }
184
-
185
- async def _analyze_task(self, task: str) -> Dict[str, Any]:
186
- """Analyze the task to determine requirements."""
187
- keywords = self._extract_keywords(task)
188
- requirements = self._generate_requirements(keywords)
189
-
190
- return {
191
- "task": task,
192
- "keywords": keywords,
193
- "requirements": requirements,
194
- }
195
-
196
- def _extract_keywords(self, text: str) -> List[str]:
197
- """Extract keywords from the task text."""
198
- stop_words = {"the", "and", "of", "to", "in", "a", "is", "for", "on", "with"}
199
- words = [word.lower() for word in text.split() if word.lower() not in stop_words]
200
- return list(set(words)) # Remove duplicates
201
-
202
- def _generate_requirements(self, keywords: List[str]) -> List[str]:
203
- """Generate requirements based on extracted keywords."""
204
- requirement_map = {
205
- "data": ["data collection", "data processing", "data visualization"],
206
- "web": ["web scraping", "API integration", "web development"],
207
- "ai": ["machine learning", "natural language processing", "computer vision"],
208
- "automation": ["task automation", "workflow optimization", "scripting"],
209
- }
210
-
211
- requirements = []
212
- for keyword in keywords:
213
- if keyword in requirement_map:
214
- requirements.extend(requirement_map[keyword])
215
-
216
- return requirements
217
-
218
- async def _web_search(self, query: str) -> List[Dict[str, Any]]:
219
- """Perform a web search for relevant approaches/methods."""
220
- try:
221
- response = requests.get(
222
- "https://api.example.com/search",
223
- params={"q": query, "limit": 5}
224
- )
225
- response.raise_for_status()
226
- return response.json().get("results", [])
227
- except Exception as e:
228
- logging.error(f"Web search failed: {e}")
229
- return [{"title": "Example Approach", "url": "https://example.com"}]
230
-
231
- async def _build_tools(self, task_analysis: Dict[str, Any], search_results: List[Dict[str, Any]]) -> List[str]:
232
- """Build tools/agents based on the task and search results."""
233
- tools = []
234
- for requirement in task_analysis["requirements"]:
235
- tool_name = f"tool_for_{requirement.replace(' ', '_')}.py"
236
- tool_path = self.tools_dir / tool_name
237
-
238
- # Generate a simple Python script for the tool
239
- tool_code = self._generate_tool_code(requirement, search_results)
240
- with open(tool_path, "w") as f:
241
- f.write(tool_code)
242
-
243
- tools.append(tool_name)
244
-
245
- return tools
246
-
247
- def _generate_tool_code(self, requirement: str, search_results: List[Dict[str, Any]]) -> str:
248
- """Generate Python code for a tool based on the requirement."""
249
- example_code = ""
250
- for result in search_results:
251
- if requirement.lower() in result["title"].lower():
252
- example_code = f"# Example code based on: {result['title']}\n"
253
- example_code += f"# Source: {result['url']}\n"
254
- break
255
-
256
- tool_code = f"""
257
- {example_code}
258
- def {requirement.replace(' ', '_')}():
259
- print("Executing {requirement}...")
260
- # Add your implementation here
261
- if __name__ == "__main__":
262
- {requirement.replace(' ', '_')}()
263
- """
264
- return tool_code.strip()
265
-
266
- async def _execute_tools(self, tools: List[str]) -> Dict[str, Any]:
267
- """Execute the built tools/agents."""
268
- execution_results = {}
269
- for tool in tools:
270
- tool_path = self.tools_dir / tool
271
- try:
272
- process = await asyncio.create_subprocess_exec(
273
- "python", str(tool_path),
274
- stdout=asyncio.subprocess.PIPE,
275
- stderr=asyncio.subprocess.PIPE
276
- )
277
- stdout, stderr = await process.communicate()
278
-
279
- execution_results[tool] = {
280
- "status": "success" if process.returncode == 0 else "failed",
281
- "stdout": stdout.decode(),
282
- "stderr": stderr.decode(),
283
- }
284
- except Exception as e:
285
- execution_results[tool] = {
286
- "status": "error",
287
- "error": str(e),
288
- }
289
-
290
- return execution_results
291
-
292
- async def generate_tool(self, command: str):
293
- """Generate a tool based on the command."""
294
- tool_name = command.split(" ")[-1] # Extract tool name from command
295
- tool_code = f"# Tool: {tool_name}\n\ndef {tool_name}():\n pass\n" # Placeholder code
296
- tool_path = self.tools_dir / f"{tool_name}.py"
297
-
298
- with open(tool_path, "w") as f:
299
- f.write(tool_code)
300
-
301
- logging.info(f"Generated tool: {tool_name}")
302
-
303
- async def generate_agent(self, command: str):
304
- """Generate an agent based on the command."""
305
- agent_name = command.split(" ")[-1] # Extract agent name from command
306
- agent_code = f"# Agent: {agent_name}\n\ndef {agent_name}():\n pass\n" # Placeholder code
307
- agent_path = self.agents_dir / f"{agent_name}.py"
308
-
309
- with open(agent_path, "w") as f:
310
- f.write(agent_code)
311
-
312
- logging.info(f"Generated agent: {agent_name}")
313
-
314
- def stop(self):
315
- """Stop the autonomous agent."""
316
- self.running = False
317
- logging.info("Autonomous agent stopped.")
318
-
319
- class ToolManager:
320
- """Manages various tools used in the development pipeline."""
321
-
322
- def __init__(self):
323
- self.tools = {
324
- "requirements_analyzer": self._requirements_analyzer,
325
- "task_breakdown": self._task_breakdown,
326
- "code_generator": self._code_generator,
327
- "code_quality_checker": self._code_quality_checker,
328
- "test_generator": self._test_generator,
329
- "test_runner": self._test_runner,
330
- "coverage_analyzer": self._coverage_analyzer,
331
- }
332
-
333
- async def execute_tool(self, tool_name: str, input_data: Any) -> Dict[str, Any]:
334
- """Execute a tool with the given input data."""
335
- if tool_name in self.tools:
336
- return await self.tools[tool_name](input_data)
337
- else:
338
- raise ValueError(f"Tool '{tool_name}' not found.")
339
-
340
- async def _requirements_analyzer(self, requirements: str) -> Dict[str, Any]:
341
- """Analyze requirements and return a structured result."""
342
- # Placeholder implementation
343
- return {"status": "success", "result": {"requirements": requirements}}
344
-
345
- async def _task_breakdown(self, requirements: Dict[str, Any]) -> Dict[str, Any]:
346
- """Break down requirements into tasks."""
347
- # Placeholder implementation
348
- return {"status": "success", "result": ["task1", "task2", "task3"]}
349
-
350
- async def _code_generator(self, tasks: List[str]) -> Dict[str, Any]:
351
- """Generate code based on tasks."""
352
- # Placeholder implementation
353
- return {"status": "success", "result": "generated_code"}
354
-
355
- async def _code_quality_checker(self, code: str) -> Dict[str, Any]:
356
- """Check the quality of the generated code."""
357
- # Placeholder implementation
358
- return {"status": "success", "result": {"quality_score": 0.9}}
359
-
360
- async def _test_generator(self, code: str) -> Dict[str, Any]:
361
- """Generate tests for the code."""
362
- # Placeholder implementation
363
- return {"status": "success", "result": ["test1", "test2", "test3"]}
364
-
365
- async def _test_runner(self, tests: List[str]) -> Dict[str, Any]:
366
- """Run the generated tests."""
367
- # Placeholder implementation
368
- return {"status": "success", "result": {"passed": 3, "failed": 0}}
369
-
370
- async def _coverage_analyzer(self, test_results: Dict[str, Any]) -> Dict[str, Any]:
371
- """Analyze test coverage."""
372
- # Placeholder implementation
373
- return {"status": "success", "result": {"coverage": 0.95}}
374
-
375
- class DevelopmentPipeline:
376
- """Advanced development pipeline with stage management and monitoring"""
377
-
378
- class PipelineStage(Enum):
379
- PLANNING = "planning"
380
- DEVELOPMENT = "development"
381
- TESTING = "testing"
382
- DEPLOYMENT = "deployment"
383
- MAINTENANCE = "maintenance"
384
- ROLLBACK = "rollback"
385
-
386
- def __init__(self, workspace_manager, tool_manager):
387
- self.workspace_manager = workspace_manager
388
- self.tool_manager = tool_manager
389
- self.current_stage = None
390
- self.stage_history = []
391
- self.active_processes = {}
392
- self.stage_metrics = {}
393
- self.logger = self._setup_logger()
394
-
395
- def _setup_logger(self) -> logging.Logger:
396
- logger = logging.getLogger("DevelopmentPipeline")
397
- logger.setLevel(logging.DEBUG)
398
- handler = logging.StreamHandler()
399
- formatter = logging.Formatter(
400
- '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
401
- )
402
- handler.setFormatter(formatter)
403
- logger.addHandler(handler)
404
- return logger
405
-
406
- async def execute_stage(self, stage: PipelineStage, context: Dict[str, Any]) -> Dict[str, Any]:
407
- """Execute a pipeline stage with monitoring and error handling"""
408
- self.logger.info(f"Starting stage: {stage.value}")
409
- start_time = time.time()
410
-
411
- try:
412
- # Record stage start
413
- self.current_stage = stage
414
- self._record_stage_start(stage, context)
415
-
416
- # Execute stage-specific logic
417
- result = await self._execute_stage_logic(stage, context)
418
-
419
- # Validate stage output
420
- self._validate_stage_output(stage, result)
421
-
422
- # Update metrics
423
- execution_time = time.time() - start_time
424
- self._update_stage_metrics(stage, execution_time, result)
425
-
426
- # Record stage completion
427
- self._record_stage_completion(stage, result)
428
-
429
- return {
430
- "status": "success",
431
- "stage": stage.value,
432
- "result": result,
433
- "execution_time": execution_time,
434
- "metrics": self.stage_metrics.get(stage, {})
435
- }
436
-
437
- except Exception as e:
438
- error_msg = f"Error in stage {stage.value}: {str(e)}"
439
- self.logger.error(error_msg)
440
-
441
- # Handle stage failure
442
- await self._handle_stage_failure(stage, context, e)
443
-
444
- return {
445
- "status": "error",
446
- "stage": stage.value,
447
- "error": error_msg,
448
- "execution_time": time.time() - start_time
449
- }
450
-
451
- async def _execute_stage_logic(self, stage: PipelineStage, context: Dict[str, Any]) -> Dict[str, Any]:
452
- """Execute stage-specific logic with appropriate tools and enhanced error handling."""
453
- """Execute stage-specific logic with appropriate tools"""
454
- if stage == self.PipelineStage.PLANNING:
455
- return await self._execute_planning_stage(context)
456
- elif stage == self.PipelineStage.DEVELOPMENT:
457
- return await self._execute_development_stage(context)
458
- elif stage == self.PipelineStage.TESTING:
459
- return await self._execute_testing_stage(context)
460
- elif stage == self.PipelineStage.DEPLOYMENT:
461
- return await self._execute_deployment_stage(context)
462
- elif stage == self.PipelineStage.MAINTENANCE:
463
- return await self._execute_maintenance_stage(context)
464
- elif stage == self.PipelineStage.ROLLBACK:
465
- return await self._execute_rollback_stage(context)
466
- else:
467
- raise ValueError(f"Unknown pipeline stage: {stage}")
468
-
469
- async def _execute_planning_stage(self, context: Dict[str, Any]) -> Dict[str, Any]:
470
- """Execute planning stage with requirement analysis and task breakdown"""
471
- try:
472
- # Analyze requirements
473
- requirements = await self.tool_manager.execute_tool(
474
- "requirements_analyzer",
475
- context.get("requirements", "")
476
- )
477
-
478
- # Generate task breakdown
479
- tasks = await self.tool_manager.execute_tool(
480
- "task_breakdown",
481
- requirements["result"]
482
- )
483
-
484
- # Create project structure
485
- project_structure = await self.workspace_manager.create_project_structure(
486
- context["project_name"],
487
- tasks["result"]
488
- )
489
-
490
- return {
491
- "requirements": requirements["result"],
492
- "tasks": tasks["result"],
493
- "project_structure": project_structure
494
- }
495
- except Exception as e:
496
- raise Exception(f"Planning stage failed: {str(e)}")
497
-
498
- async def _execute_development_stage(self, context: Dict[str, Any]) -> Dict[str, Any]:
499
- """Execute development stage with code generation and quality checks"""
500
- try:
501
- # Generate code
502
- code_generation = await self.tool_manager.execute_tool(
503
- "code_generator",
504
- context.get("tasks", [])
505
- )
506
-
507
- # Run initial quality checks
508
- quality_check = await self.tool_manager.execute_tool(
509
- "code_quality_checker",
510
- code_generation["result"]
511
- )
512
-
513
- # Save generated code
514
- saved_files = await self.workspace_manager.save_generated_code(
515
- context["project_name"],
516
- code_generation["result"]
517
- )
518
-
519
- return {
520
- "generated_code": code_generation["result"],
521
- "quality_check": quality_check["result"],
522
- "saved_files": saved_files
523
- }
524
- except Exception as e:
525
- raise Exception(f"Development stage failed: {str(e)}")
526
-
527
- async def _execute_testing_stage(self, context: Dict[str, Any]) -> Dict[str, Any]:
528
- """Execute testing stage with comprehensive test suite"""
529
- try:
530
- # Generate tests
531
- test_generation = await self.tool_manager.execute_tool(
532
- "test_generator",
533
- context.get("generated_code", "")
534
- )
535
-
536
- # Run tests
537
- test_results = await self.tool_manager.execute_tool(
538
- "test_runner",
539
- test_generation["result"]
540
- )
541
-
542
- # Generate coverage report
543
- coverage_report = await self.tool_manager.execute_tool(
544
- "coverage_analyzer",
545
- test_results["result"]
546
- )
547
-
548
- return {
549
- "test_cases": test_generation["result"],
550
- "test_results": test_results["result"],
551
- "coverage_report": coverage_report["result"]
552
- }
553
- except Exception as e:
554
- raise Exception(f"Testing stage failed: {str(e)}")
555
-
556
- def _validate_stage_output(self, stage: PipelineStage, result: Dict[str, Any]):
557
- """Validate stage output against defined criteria"""
558
- validation_rules = self._get_validation_rules(stage)
559
- validation_errors = []
560
-
561
- for rule in validation_rules:
562
- if not rule.validate(result):
563
- validation_errors.append(rule.get_error_message())
564
-
565
- if validation_errors:
566
- raise ValueError(f"Stage validation failed: {'; '.join(validation_errors)}")
567
-
568
- def _update_stage_metrics(self, stage: PipelineStage, execution_time: float, result: Dict[str, Any]):
569
- """Update metrics for the stage"""
570
- if stage not in self.stage_metrics:
571
- self.stage_metrics[stage] = {
572
- "total_executions": 0,
573
- "successful_executions": 0,
574
- "failed_executions": 0,
575
- "average_execution_time": 0,
576
- "last_execution_time": None,
577
- "error_rate": 0
578
- }
579
-
580
- metrics = self.stage_metrics[stage]
581
- metrics["total_executions"] += 1
582
- metrics["last_execution_time"] = execution_time
583
-
584
- if result.get("status") == "success":
585
- metrics["successful_executions"] += 1
586
- else:
587
- metrics["failed_executions"] += 1
588
-
589
- metrics["error_rate"] = metrics["failed_executions"] / metrics["total_executions"]
590
- metrics["average_execution_time"] = (
591
- (metrics["average_execution_time"] * (metrics["total_executions"] - 1) + execution_time)
592
- / metrics["total_executions"]
593
- )
594
-
595
- async def _handle_stage_failure(self, stage: PipelineStage, context: Dict[str, Any], error: Exception):
596
- """Handle stage failure with rollback and recovery options"""
597
- self.logger.error(f"Handling failure in stage {stage.value}: {str(error)}")
598
-
599
- # Record failure
600
- self._record_stage_failure(stage, error)
601
-
602
- # Determine if rollback is needed
603
- if self._should_rollback(stage, error):
604
- await self._execute_rollback(stage, context)
605
-
606
- # Attempt recovery
607
- await self._attempt_recovery(stage, context, error)
608
-
609
- def _should_rollback(self, stage: PipelineStage, error: Exception) -> bool:
610
- """Determine if a rollback is needed based on error severity"""
611
- critical_errors = [
612
- "DatabaseError",
613
- "DeploymentError",
614
- "SecurityViolation"
615
- ]
616
- return any(err in str(error) for err in critical_errors)
617
-
618
- async def _execute_rollback(self, stage: PipelineStage, context: Dict[str, Any]):
619
- """Execute rollback procedure for a failed stage"""
620
- self.logger.info(f"Executing rollback for stage {stage.value}")
621
-
622
- try:
623
- # Get rollback point
624
- rollback_point = self._get_rollback_point(stage)
625
-
626
- # Execute rollback
627
- await self.execute_stage(
628
- self.PipelineStage.ROLLBACK,
629
- {
630
- **context,
631
- "rollback_point": rollback_point,
632
- "failed_stage": stage
633
- }
634
- )
635
-
636
- except Exception as e:
637
- self.logger.error(f"Rollback failed: {str(e)}")
638
- # Implement emergency shutdown if rollback fails
639
- self._emergency_shutdown(stage, e)
640
-
641
- def _emergency_shutdown(self, stage: PipelineStage, error: Exception):
642
- """Handle emergency shutdown when rollback fails"""
643
- self.logger.critical(f"Emergency shutdown initiated for stage {stage.value}")
644
- # Implement emergency shutdown procedures
645
- pass
646
-
647
- class CodeMetricsAnalyzer:
648
- """Analyzes code metrics using various tools"""
649
-
650
- def __init__(self):
651
- self.metrics_history = []
652
-
653
- def analyze_code_quality(self, file_path: str) -> Dict[str, Any]:
654
- """Analyzes code quality using multiple metrics"""
655
- try:
656
- # Pylint analysis
657
- pylint_score = self._run_pylint(file_path)
658
-
659
- # Complexity analysis
660
- complexity_score = self._analyze_complexity(file_path)
661
-
662
- # Test coverage analysis
663
- coverage_score = self._analyze_test_coverage(file_path)
664
-
665
- # Security analysis
666
- security_score = self._analyze_security(file_path)
667
-
668
- # Calculate overall quality score
669
- quality_score = self._calculate_overall_score(
670
- pylint_score,
671
- complexity_score,
672
- coverage_score,
673
- security_score
674
- )
675
-
676
- metrics = {
677
- "quality_score": quality_score,
678
- "pylint_score": pylint_score,
679
- "complexity_score": complexity_score,
680
- "coverage_score": coverage_score,
681
- "security_score": security_score,
682
- "timestamp": datetime.now()
683
- }
684
-
685
- self.metrics_history.append(metrics)
686
- return metrics
687
-
688
- except Exception as e:
689
- logging.error(f"Error analyzing code metrics: {str(e)}")
690
- return {
691
- "error": str(e),
692
- "quality_score": 0.0,
693
- "timestamp": datetime.now()
694
- }
695
-
696
- def _run_pylint(self, file_path: str) -> float:
697
- """Runs pylint analysis"""
698
- try:
699
- reporter = JSONReporter()
700
- Run([file_path], reporter=reporter, do_exit=False)
701
- score = reporter.data.get('score', 0.0)
702
- return float(score) / 10.0 # Normalize to 0-1 scale
703
- except Exception as e:
704
- logging.error(f"Pylint analysis error: {str(e)}")
705
- return 0.0
706
-
707
- def _analyze_complexity(self, file_path: str) -> float:
708
- """Analyzes code complexity"""
709
- try:
710
- with open(file_path, 'r') as file:
711
- code = file.read()
712
-
713
- # Calculate cyclomatic complexity
714
- complexity = radon.complexity.cc_visit(code)
715
- avg_complexity = sum(item.complexity for item in complexity) / len(complexity) if complexity else 0
716
-
717
- # Normalize complexity score (0-1 scale, lower is better)
718
- normalized_score = 1.0 - min(avg_complexity / 10.0, 1.0)
719
- return normalized_score
720
-
721
- except Exception as e:
722
- logging.error(f"Complexity analysis error: {str(e)}")
723
- return 0.0
724
-
725
- async def _analyze_current_state(self, project_name: str) -> Dict[str, Any]:
726
- """Analyze current project state with detailed metrics."""
727
- try:
728
- self.logger.info(f"Analyzing current state for project: {project_name}")
729
-
730
- # Collect code metrics
731
- code_metrics = await self._collect_code_metrics(project_name)
732
- self.logger.info("Code metrics collected successfully.")
733
-
734
- # Analyze test coverage
735
- test_coverage = await self._analyze_test_coverage(project_name)
736
- self.logger.info("Test coverage analysis completed.")
737
-
738
- # Check security vulnerabilities
739
- security_analysis = await self._analyze_security(project_name)
740
- self.logger.info("Security analysis completed.")
741
-
742
- # Measure performance metrics
743
- performance_metrics = await self._measure_performance(project_name)
744
- self.logger.info("Performance metrics measured.")
745
-
746
- # Determine if requirements are met
747
- meets_requirements = await self._check_requirements(
748
- code_metrics,
749
- test_coverage,
750
- security_analysis,
751
- performance_metrics
752
- )
753
- self.logger.info("Requirements check completed.")
754
-
755
- return {
756
- "code_metrics": code_metrics,
757
- "test_coverage": test_coverage,
758
- "security_analysis": security_analysis,
759
- "performance_metrics": performance_metrics,
760
- "meets_requirements": meets_requirements,
761
- "timestamp": datetime.now()
762
- }
763
-
764
- except Exception as e:
765
- self.logger.error(f"Error analyzing current state: {str(e)}")
766
- raise
767
-
768
- def _analyze_security(self, file_path: str) -> float:
769
- """Analyzes code security using bandit"""
770
- try:
771
- conf = manager.BanditManager()
772
- conf.discover_files([file_path])
773
- conf.run_tests()
774
-
775
- # Calculate security score based on findings
776
- total_issues = len(conf.get_issue_list())
777
- max_severity = max((issue.severity for issue in conf.get_issue_list()), default=0)
778
-
779
- # Normalize security score (0-1 scale, higher is better)
780
- security_score = 1.0 - (total_issues * max_severity) / 10.0
781
- return max(0.0, min(1.0, security_score))
782
-
783
- except Exception as e:
784
- logging.error(f"Security analysis error: {str(e)}")
785
- return 0.0
786
-
787
- def _calculate_overall_score(self, pylint_score: float, complexity_score: float,
788
- coverage_score: float, security_score: float) -> float:
789
- """Calculates overall code quality score"""
790
- weights = {
791
- 'pylint': 0.3,
792
- 'complexity': 0.2,
793
- 'coverage': 0.25,
794
- 'security': 0.25
795
- }
796
-
797
- overall_score = (
798
- weights['pylint'] * pylint_score +
799
- weights['complexity'] * complexity_score +
800
- weights['coverage'] * coverage_score +
801
- weights['security'] * security_score
802
- )
803
-
804
- return max(0.0, min(1.0, overall_score))
805
-
806
- def get_metrics_history(self) -> List[Dict[str, Any]]:
807
- """Returns the history of metrics measurements"""
808
- return self.metrics_history
809
-
810
- def get_trend_analysis(self) -> Dict[str, Any]:
811
- """Analyzes trends in metrics over time"""
812
- if not self.metrics_history:
813
- return {"status": "No metrics history available"}
814
-
815
- trends = {
816
- "quality_score": self._calculate_trend([m["quality_score"] for m in self.metrics_history]),
817
- "coverage_score": self._calculate_trend([m["coverage_score"] for m in self.metrics_history]),
818
- "security_score": self._calculate_trend([m["security_score"] for m in self.metrics_history])
819
- }
820
-
821
- return trends
822
-
823
- def _calculate_trend(self, values: List[float]) -> Dict[str, Any]:
824
- """Calculates trend statistics for a metric"""
825
- if not values:
826
- return {"trend": "unknown", "change": 0.0}
827
-
828
- recent_values = values[-3:] # Look at last 3 measurements
829
- if len(recent_values) < 2:
830
- return {"trend": "insufficient data", "change": 0.0}
831
-
832
- change = recent_values[-1] - recent_values[0]
833
- trend = "improving" if change > 0 else "declining" if change < 0 else "stable"
834
-
835
- return {
836
- "trend": trend,
837
- "change": change,
838
- "current": recent_values[-1],
839
- "previous": recent_values[0]
840
- }
841
-
842
- class CodeMetricsAnalyzer:
843
- """Analyzes code metrics using various tools"""
844
-
845
- def __init__(self):
846
- self.metrics_history = []
847
-
848
- def analyze_code_quality(self, file_path: str) -> Dict[str, Any]:
849
- """Analyzes code quality using multiple metrics"""
850
- try:
851
- # Pylint analysis
852
- pylint_score = self._run_pylint(file_path)
853
-
854
- # Complexity analysis
855
- complexity_score = self._analyze_complexity(file_path)
856
-
857
- # Test coverage analysis
858
- coverage_score = self._analyze_test_coverage(file_path)
859
-
860
- # Security analysis
861
- security_score = self._analyze_security(file_path)
862
-
863
- # Calculate overall quality score
864
- quality_score = self._calculate_overall_score(
865
- pylint_score,
866
- complexity_score,
867
- coverage_score,
868
- security_score
869
- )
870
-
871
- metrics = {
872
- "quality_score": quality_score,
873
- "pylint_score": pylint_score,
874
- "complexity_score": complexity_score,
875
- "coverage_score": coverage_score,
876
- "security_score": security_score,
877
- "timestamp": datetime.now()
878
- }
879
-
880
- self.metrics_history.append(metrics)
881
- return metrics
882
-
883
- except Exception as e:
884
- logging.error(f"Error analyzing code metrics: {str(e)}")
885
- return {
886
- "error": str(e),
887
- "quality_score": 0.0,
888
- "timestamp": datetime.now()
889
- }
890
-
891
- def _run_pylint(self, file_path: str) -> float:
892
- """Runs pylint analysis"""
893
- try:
894
- reporter = JSONReporter()
895
- Run([file_path], reporter=reporter, do_exit=False)
896
- score = reporter.data.get('score', 0.0)
897
- return float(score) / 10.0 # Normalize to 0-1 scale
898
- except Exception as e:
899
- logging.error(f"Pylint analysis error: {str(e)}")
900
- return 0.0
901
-
902
- def _analyze_complexity(self, file_path: str) -> float:
903
- """Analyzes code complexity"""
904
- try:
905
- with open(file_path, 'r') as file:
906
- code = file.read()
907
-
908
- # Calculate cyclomatic complexity
909
- complexity = radon.complexity.cc_visit(code)
910
- avg_complexity = sum(item.complexity for item in complexity) / len(complexity) if complexity else 0
911
-
912
- # Normalize complexity score (0-1 scale, lower is better)
913
- normalized_score = 1.0 - min(avg_complexity / 10.0, 1.0)
914
- return normalized_score
915
-
916
- except Exception as e:
917
- logging.error(f"Complexity analysis error: {str(e)}")
918
- return 0.0
919
-
920
- async def _analyze_current_state(self, project_name: str) -> Dict[str, Any]:
921
- """Analyze current project state with detailed metrics."""
922
- try:
923
- self.logger.info(f"Analyzing current state for project: {project_name}")
924
-
925
- # Collect code metrics
926
- code_metrics = await self._collect_code_metrics(project_name)
927
- self.logger.info("Code metrics collected successfully.")
928
-
929
- # Analyze test coverage
930
- test_coverage = await self._analyze_test_coverage(project_name)
931
- self.logger.info("Test coverage analysis completed.")
932
-
933
- # Check security vulnerabilities
934
- security_analysis = await self._analyze_security(project_name)
935
- self.logger.info("Security analysis completed.")
936
-
937
- # Measure performance metrics
938
- performance_metrics = await self._measure_performance(project_name)
939
- self.logger.info("Performance metrics measured.")
940
-
941
- # Determine if requirements are met
942
- meets_requirements = await self._check_requirements(
943
- code_metrics,
944
- test_coverage,
945
- security_analysis,
946
- performance_metrics
947
- )
948
- self.logger.info("Requirements check completed.")
949
-
950
- return {
951
- "code_metrics": code_metrics,
952
- "test_coverage": test_coverage,
953
- "security_analysis": security_analysis,
954
- "performance_metrics": performance_metrics,
955
- "meets_requirements": meets_requirements,
956
- "timestamp": datetime.now()
957
- }
958
-
959
- except Exception as e:
960
- self.logger.error(f"Error analyzing current state: {str(e)}")
961
- raise
962
-
963
- def _analyze_security(self, file_path: str) -> float:
964
- """Analyzes code security using bandit"""
965
- try:
966
- conf = manager.BanditManager()
967
- conf.discover_files([file_path])
968
- conf.run_tests()
969
-
970
- # Calculate security score based on findings
971
- total_issues = len(conf.get_issue_list())
972
- max_severity = max((issue.severity for issue in conf.get_issue_list()), default=0)
973
-
974
- # Normalize security score (0-1 scale, higher is better)
975
- security_score = 1.0 - (total_issues * max_severity) / 10.0
976
- return max(0.0, min(1.0, security_score))
977
-
978
- except Exception as e:
979
- logging.error(f"Security analysis error: {str(e)}")
980
- return 0.0
981
-
982
- def _calculate_overall_score(self, pylint_score: float, complexity_score: float,
983
- coverage_score: float, security_score: float) -> float:
984
- """Calculates overall code quality score"""
985
- weights = {
986
- 'pylint': 0.3,
987
- 'complexity': 0.2,
988
- 'coverage': 0.25,
989
- 'security': 0.25
990
- }
991
-
992
- overall_score = (
993
- weights['pylint'] * pylint_score +
994
- weights['complexity'] * complexity_score +
995
- weights['coverage'] * coverage_score +
996
- weights['security'] * security_score
997
- )
998
-
999
- return max(0.0, min(1.0, overall_score))
1000
-
1001
- def get_metrics_history(self) -> List[Dict[str, Any]]:
1002
- """Returns the history of metrics measurements"""
1003
- return self.metrics_history
1004
-
1005
- def get_trend_analysis(self) -> Dict[str, Any]:
1006
- """Analyzes trends in metrics over time"""
1007
- if not self.metrics_history:
1008
- return {"status": "No metrics history available"}
1009
-
1010
- trends = {
1011
- "quality_score": self._calculate_trend([m["quality_score"] for m in self.metrics_history]),
1012
- "coverage_score": self._calculate_trend([m["coverage_score"] for m in self.metrics_history]),
1013
- "security_score": self._calculate_trend([m["security_score"] for m in self.metrics_history])
1014
- }
1015
-
1016
- return trends
1017
-
1018
- def _calculate_trend(self, values: List[float]) -> Dict[str, Any]:
1019
- """Calculates trend statistics for a metric"""
1020
- if not values:
1021
- return {"trend": "unknown", "change": 0.0}
1022
-
1023
- recent_values = values[-3:] # Look at last 3 measurements
1024
- if len(recent_values) < 2:
1025
- return {"trend": "insufficient data", "change": 0.0}
1026
-
1027
- change = recent_values[-1] - recent_values[0]
1028
- trend = "improving" if change > 0 else "declining" if change < 0 else "stable"
1029
-
1030
- return {
1031
- "trend": trend,
1032
- "change": change,
1033
- "current": recent_values[-1],
1034
- "previous": recent_values[0]
1035
- }
1036
-
1037
- @dataclass
1038
- class QualityMetrics:
1039
- """Advanced quality metrics tracking and analysis"""
1040
- code_quality_score: float = 0.0
1041
- test_coverage: float = 0.0
1042
- security_score: str = "unknown"
1043
- performance_score: float = 0.0
1044
- metrics_analyzer: CodeMetricsAnalyzer = None
1045
-
1046
- def __post_init__(self):
1047
- self.metrics_analyzer = CodeMetricsAnalyzer()
1048
- self.history = []
1049
- self.thresholds = {
1050
- "code_quality": 0.85,
1051
- "test_coverage": 0.90,
1052
- "security": 0.85,
1053
- "performance": 0.80
1054
- }
1055
-
1056
- def analyze_code(self, project_name: str) -> Dict[str, Any]:
1057
- """Comprehensive code analysis"""
1058
- try:
1059
- # Get all Python files in the project
1060
- project_files = self._get_project_files(project_name)
1061
-
1062
- aggregated_metrics = {
1063
- "code_quality": 0.0,
1064
- "test_coverage": 0.0,
1065
- "security": 0.0,
1066
- "performance": 0.0,
1067
- "files_analyzed": len(project_files),
1068
- "detailed_metrics": []
1069
- }
1070
-
1071
- for file_path in project_files:
1072
- metrics = self.metrics_analyzer.analyze_code_quality(file_path)
1073
- aggregated_metrics["detailed_metrics"].append({
1074
- "file": file_path,
1075
- "metrics": metrics
1076
- })
1077
-
1078
- # Update aggregated scores
1079
- aggregated_metrics["code_quality"] += metrics["quality_score"]
1080
- aggregated_metrics["test_coverage"] += metrics["coverage_score"]
1081
- aggregated_metrics["security"] += metrics["security_score"]
1082
-
1083
- # Calculate averages
1084
- if project_files:
1085
- for key in ["code_quality", "test_coverage", "security"]:
1086
- aggregated_metrics[key] /= len(project_files)
1087
-
1088
- # Update instance variables
1089
- self.code_quality_score = aggregated_metrics["code_quality"]
1090
- self.test_coverage = aggregated_metrics["test_coverage"]
1091
- self.security_score = str(aggregated_metrics["security"])
1092
-
1093
- # Add to history
1094
- self.history.append({
1095
- "timestamp": datetime.now(),
1096
- "metrics": aggregated_metrics
1097
- })
1098
-
1099
- return aggregated_metrics
1100
-
1101
- except Exception as e:
1102
- logging.error(f"Error in code analysis: {str(e)}")
1103
- return {
1104
- "error": str(e),
1105
- "code_quality": 0.0,
1106
- "test_coverage": 0.0,
1107
- "security": "error",
1108
- "performance": 0.0
1109
- }
1110
-
1111
- def _get_project_files(self, project_name: str) -> List[str]:
1112
- """Get all Python files in the project"""
1113
- project_dir = os.path.join(os.getcwd(), project_name)
1114
- python_files = []
1115
-
1116
- for root, _, files in os.walk(project_dir):
1117
- for file in files:
1118
- if file.endswith('.py'):
1119
- python_files.append(os.path.join(root, file))
1120
-
1121
- return python_files
1122
-
1123
- def get_improvement_suggestions(self) -> List[str]:
1124
- """Generate improvement suggestions based on metrics"""
1125
- suggestions = []
1126
- latest_metrics = self.history[-1]["metrics"] if self.history else None
1127
-
1128
- if not latest_metrics:
1129
- return ["No metrics available for analysis"]
1130
-
1131
- if latest_metrics["code_quality"] < self.thresholds["code_quality"]:
1132
- suggestions.append(
1133
- f"Code quality score ({latest_metrics['code_quality']:.2f}) is below threshold "
1134
- f"({self.thresholds['code_quality']}). Consider refactoring complex methods."
1135
- )
1136
-
1137
- if latest_metrics["test_coverage"] < self.thresholds["test_coverage"]:
1138
- suggestions.append(
1139
- f"Test coverage ({latest_metrics['test_coverage']:.2f}) is below threshold "
1140
- f"({self.thresholds['test_coverage']}). Add more unit tests."
1141
- )
1142
-
1143
- if float(latest_metrics["security"]) < self.thresholds["security"]:
1144
- suggestions.append(
1145
- f"Security score ({latest_metrics['security']}) is below threshold "
1146
- f"({self.thresholds['security']}). Address security vulnerabilities."
1147
- )
1148
-
1149
- return suggestions
1150
-
1151
- class ErrorTracker:
1152
- """Enhanced error tracking and analysis"""
1153
- def __init__(self):
1154
- self.errors: List[Dict[str, Any]] = []
1155
- self.error_patterns: Dict[str, int] = {}
1156
- self.critical_errors: List[Dict[str, Any]] = []
1157
-
1158
- def add_error(self, error_type: str, message: str, severity: str = "normal"):
1159
- """Add an error with enhanced tracking"""
1160
- error_entry = {
1161
- "type": error_type,
1162
- "message": message,
1163
- "severity": severity,
1164
- "timestamp": datetime.now(),
1165
- "stack_trace": traceback.format_exc()
1166
- }
1167
-
1168
- self.errors.append(error_entry)
1169
-
1170
- # Track error patterns
1171
- if error_type in self.error_patterns:
1172
- self.error_patterns[error_type] += 1
1173
- else:
1174
- self.error_patterns[error_type] = 1
1175
-
1176
- # Track critical errors
1177
- if severity == "critical":
1178
- self.critical_errors.append(error_entry)
1179
- self._notify_critical_error(error_entry)
1180
-
1181
- def _notify_critical_error(self, error: Dict[str, Any]):
1182
- """Handle critical error notification"""
1183
- logging.critical(f"Critical error detected: {error['message']}")
1184
- # Implement notification system here (e.g., email, Slack)
1185
-
1186
- def get_error_analysis(self) -> Dict[str, Any]:
1187
- """Generate comprehensive error analysis"""
1188
- return {
1189
- "total_errors": len(self.errors),
1190
- "error_patterns": self.error_patterns,
1191
- "critical_errors": len(self.critical_errors),
1192
- "most_common_error": max(self.error_patterns.items(), key=lambda x: x[1]) if self.error_patterns else None,
1193
- "error_trend": self._analyze_error_trend()
1194
- }
1195
-
1196
- def _analyze_error_trend(self) -> Dict[str, Any]:
1197
- """Analyze error trends over time"""
1198
- if not self.errors:
1199
- return {"trend": "no errors"}
1200
-
1201
- # Group errors by hour
1202
- error_timeline = {}
1203
- for error in self.errors:
1204
- hour = error["timestamp"].replace(minute=0, second=0, microsecond=0)
1205
- if hour in error_timeline:
1206
- error_timeline[hour] += 1
1207
- else:
1208
- error_timeline[hour] = 1
1209
-
1210
- # Calculate trend
1211
- timeline_values = list(error_timeline.values())
1212
- if len(timeline_values) < 2:
1213
- return {"trend": "insufficient data"}
1214
-
1215
- trend = "increasing" if timeline_values[-1] > timeline_values[0] else "decreasing"
1216
- return {
1217
- "trend": trend,
1218
- "current_rate": timeline_values[-1],
1219
- "initial_rate": timeline_values[0]
1220
- }
1221
-
1222
- class ProjectAnalytics:
1223
- """Enhanced project analytics and reporting"""
1224
- """Enhanced project analytics and reporting"""
1225
- def __init__(self, workspace_manager):
1226
- self.workspace_manager = workspace_manager
1227
- self.metrics_analyzer = CodeMetricsAnalyzer()
1228
- self.analysis_history = []
1229
-
1230
- def generate_project_report(self, project_name: str) -> Dict[str, Any]:
1231
- """Generate comprehensive project report"""
1232
- try:
1233
- current_analysis = {
1234
- "timestamp": datetime.now(),
1235
- "basic_metrics": self._get_basic_metrics(project_name),
1236
- "code_quality": self._get_code_quality_metrics(project_name),
1237
- "performance": self._get_performance_metrics(project_name),
1238
- "security": self._get_security_metrics(project_name),
1239
- "dependencies": self._analyze_dependencies(project_name)
1240
- }
1241
-
1242
- self.analysis_history.append(current_analysis)
1243
-
1244
- return {
1245
- "current_analysis": current_analysis,
1246
- "historical_trends": self._analyze_trends(),
1247
- "recommendations": self._generate_recommendations(current_analysis)
1248
- }
1249
-
1250
- except Exception as e:
1251
- logging.error(f"Error generating project report: {str(e)}")
1252
- return {"error": str(e)}
1253
-
1254
- class StreamlitInterface:
1255
- """Streamlit UI integration for the Autonomous Agent system."""
1256
-
1257
- def __init__(self, app: AutonomousAgentApp):
1258
- self.app = app
1259
-
1260
- def render_main_interface(self):
1261
- """Render the main Streamlit interface."""
1262
- st.title("Autonomous Agent System")
1263
-
1264
- # Create tabs for different functionalities
1265
- tab_names = ["Autonomous Agent", "Workspace Management", "Settings"]
1266
- selected_tab = st.selectbox("Select a Tab", tab_names)
1267
-
1268
- if selected_tab == "Autonomous Agent":
1269
- self.render_autonomous_agent_tab()
1270
- elif selected_tab == "Workspace Management":
1271
- self.render_workspace_management_tab()
1272
- elif selected_tab == "Settings":
1273
- self.render_settings_tab()
1274
-
1275
- def render_autonomous_agent_tab(self):
1276
- """Render the Autonomous Agent tab."""
1277
- st.header("Autonomous Agent")
1278
- task = st.text_area("Enter a task for the autonomous agent:")
1279
-
1280
- if st.button("Run Autonomous Agent"):
1281
- if task:
1282
- # Run the autonomous agent with the provided task
1283
- try:
1284
- result = asyncio.run(self.app.refinement_loop.run_refinement_cycle(task))
1285
- st.success(f"Result: {result}")
1286
- except Exception as e:
1287
- st.error(f"An error occurred: {str(e)}")
1288
-
1289
- def render_workspace_management_tab(self):
1290
- """Render the Workspace Management tab with a workspace explorer."""
1291
- st.header("Workspace Management")
1292
-
1293
- # Workspace Explorer
1294
- st.subheader("Workspace Explorer")
1295
- workspace_tree = self.app.workspace_manager.get_workspace_tree()
1296
- self._render_tree(workspace_tree)
1297
-
1298
- # File creation
1299
- st.subheader("Create a File")
1300
- new_filename = st.text_input("Enter filename:")
1301
- new_file_content = st.text_area("Enter file content:")
1302
- if st.button("Create File"):
1303
- if new_filename and new_file_content:
1304
- result = self.app.workspace_manager.create_file(new_filename, new_file_content)
1305
- st.success(result)
1306
- else:
1307
- st.error("Filename and content are required.")
1308
-
1309
- # File deletion
1310
- st.subheader("Delete a File")
1311
- delete_filename = st.text_input("Enter filename to delete:")
1312
- if st.button("Delete File"):
1313
- if delete_filename:
1314
- result = self.app.workspace_manager.delete_file(delete_filename)
1315
- st.success(result)
1316
- else:
1317
- st.error("Filename is required.")
1318
-
1319
- def _render_tree(self, tree: Dict[str, Any], level: int = 0):
1320
- """Recursively render the workspace directory tree."""
1321
- if tree["type"] == "file":
1322
- st.write(" " * level + f"📄 {tree['name']}")
1323
- elif tree["type"] == "directory":
1324
- st.write(" " * level + f"📁 {tree['name']}")
1325
- for child in tree["children"]:
1326
- self._render_tree(child, level + 1)
1327
-
1328
- def render_settings_tab(self):
1329
- """Render the Settings tab."""
1330
- st.header("Application Settings")
1331
-
1332
- # Section 1: Refinement Process Configuration
1333
- st.subheader("Refinement Process Settings")
1334
-
1335
- # Adjust maximum refinement iterations
1336
- current_max_iter = self.app.refinement_loop.max_iterations
1337
- new_max_iter = st.number_input(
1338
- "Maximum Refinement Iterations",
1339
- min_value=1,
1340
- max_value=20,
1341
- value=current_max_iter,
1342
- help="Maximum number of refinement cycles to perform"
1343
- )
1344
- if new_max_iter != current_max_iter:
1345
- self.app.refinement_loop.max_iterations = new_max_iter
1346
- st.success(f"Updated maximum iterations to {new_max_iter}")
1347
-
1348
- # Section 2: Quality Threshold Configuration
1349
- st.subheader("Quality Thresholds")
1350
-
1351
- # Get current thresholds
1352
- thresholds = self.app.refinement_loop.quality_metrics.thresholds
1353
-
1354
- col1, col2, col3 = st.columns(3)
1355
- with col1:
1356
- new_code_quality = st.slider(
1357
- "Code Quality Threshold",
1358
- 0.0, 1.0, thresholds["code_quality"],
1359
- help="Minimum acceptable code quality score"
1360
- )
1361
- with col2:
1362
- new_test_coverage = st.slider(
1363
- "Test Coverage Threshold",
1364
- 0.0, 1.0, thresholds["test_coverage"],
1365
- help="Minimum required test coverage"
1366
- )
1367
- with col3:
1368
- new_security = st.slider(
1369
- "Security Threshold",
1370
- 0.0, 1.0, thresholds["security"],
1371
- help="Minimum acceptable security score"
1372
- )
1373
-
1374
- if st.button("Update Quality Thresholds"):
1375
- self.app.refinement_loop.quality_metrics.thresholds.update({
1376
- "code_quality": new_code_quality,
1377
- "test_coverage": new_test_coverage,
1378
- "security": new_security
1379
- })
1380
- st.success("Quality thresholds updated!")
1381
-
1382
- # Section 3: Performance Configuration
1383
- st.subheader("Performance Settings")
1384
-
1385
- # Concurrency settings
1386
- concurrency_level = st.selectbox(
1387
- "Max Concurrency",
1388
- options=[1, 2, 4, 8],
1389
- index=2,
1390
- help="Maximum parallel tasks for code analysis"
1391
- )
1392
-
1393
- # Resource limits
1394
- mem_limit = st.slider(
1395
- "Memory Limit (GB)",
1396
- 1, 16, 4,
1397
- help="Maximum memory allocation for pipeline operations"
1398
- )
1399
-
1400
- # Section 4: Security Settings
1401
- st.subheader("Security Configuration")
1402
-
1403
- # Security rules toggle
1404
- enable_security_scan = st.checkbox(
1405
- "Enable Real-time Security Scanning",
1406
- value=True,
1407
- help="Perform continuous security analysis during development"
1408
- )
1409
-
1410
- # Severity level filtering
1411
- security_level = st.selectbox(
1412
- "Minimum Security Severity Level",
1413
- ["Low", "Medium", "High", "Critical"],
1414
- index=1,
1415
- help="Minimum severity level to trigger security alerts"
1416
- )
1417
-
1418
- # Section 5: Workspace Configuration
1419
- st.subheader("Workspace Settings")
1420
- current_workspace = self.app.workspace_manager.workspace_dir
1421
- st.write(f"Current Workspace: `{current_workspace}`")
1422
-
1423
- # Workspace actions
1424
- if st.button("Clear Workspace Cache"):
1425
- self.app.workspace_manager.clean_cache()
1426
- st.success("Workspace cache cleared!")
1427
-
1428
- # Section 6: Diagnostic Settings
1429
- st.subheader("Diagnostics")
1430
-
1431
- # Logging controls
1432
- log_level = st.selectbox(
1433
- "Logging Level",
1434
- ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
1435
- index=1
1436
- )
1437
- logging.getLogger().setLevel(log_level)
1438
-
1439
- # Debug mode toggle
1440
- debug_mode = st.checkbox("Enable Debug Mode")
1441
- if debug_mode:
1442
- self.app.refinement_loop.logger.setLevel(logging.DEBUG)
1443
- else:
1444
- self.app.refinement_loop.logger.setLevel(logging.INFO)
1445
-
1446
- # Section 7: System Information
1447
- st.subheader("System Info")
1448
- st.write(f"Python Version: {sys.version}")
1449
- st.write(f"Platform: {platform.platform()}")
1450
- st.write(f"Available Memory: {psutil.virtual_memory().available / (1024**3):.1f} GB free")
1451
 
1452
  def main():
1453
  app = AutonomousAgentApp()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from definitions import AutonomousAgentApp
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  def main():
5
  app = AutonomousAgentApp()
def.py ADDED
@@ -0,0 +1,1262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import platform
2
+ import psutil
3
+ from typing import List, Dict, Optional, Any, Tuple
4
+ from dataclasses import dataclass
5
+ from enum import Enum
6
+ import logging
7
+ import time
8
+ import ast
9
+ import pylint.lint
10
+ import radon.complexity
11
+ import radon.metrics
12
+ from pylint.lint import Run
13
+ from pylint.reporters import JSONReporter
14
+ from coverage import Coverage
15
+ import bandit
16
+ from bandit.core import manager
17
+ from datetime import datetime
18
+ import os
19
+ import sys
20
+ import requests
21
+ import asyncio
22
+ import statistics
23
+ import json
24
+ import traceback
25
+ from typing import Dict, Any
26
+ from datetime import datetime
27
+ from pathlib import Path
28
+
29
+
30
+ class AutonomousAgentApp:
31
+ """Main application class for the Autonomous Agent System"""
32
+
33
+ def __init__(self):
34
+ self.workspace_manager = self.WorkspaceManager(workspace_dir='workspace') # Use self.WorkspaceManager
35
+ self.pipeline = self._initialize_pipeline()
36
+ self.refinement_loop = self.RefinementLoop(pipeline=self.pipeline) # Use self.RefinementLoop
37
+ self.interface = self.StreamlitInterface(self) # Use self.StreamlitInterface
38
+
39
+ def _initialize_pipeline(self) -> 'AutonomousAgentApp.DevelopmentPipeline':
40
+ """Initialize the development pipeline"""
41
+ return self.DevelopmentPipeline(
42
+ workspace_manager=self.workspace_manager,
43
+ tool_manager=self._setup_tool_manager()
44
+ )
45
+
46
+ def _setup_tool_manager(self):
47
+ """Setup tool manager with configuration"""
48
+ return self.ToolManager() # Use self.ToolManager
49
+
50
+ def run(self):
51
+ """Main application entry point"""
52
+ try:
53
+ logging.info("Starting Autonomous Agent Application")
54
+ self.interface.render_main_interface()
55
+ except Exception as e:
56
+ logging.error(f"Application error: {str(e)}")
57
+ st.error("An error occurred while starting the application. Please check the logs.")
58
+ raise
59
+
60
+ class WorkspaceManager:
61
+ """Manages workspace files and directories."""
62
+
63
+ def __init__(self, workspace_dir: str = "workspace"):
64
+ self.workspace_dir = workspace_dir
65
+ self._ensure_workspace_exists()
66
+
67
+ def _ensure_workspace_exists(self):
68
+ """Ensure the workspace directory exists."""
69
+ os.makedirs(self.workspace_dir, exist_ok=True)
70
+
71
+ def create_file(self, filename: str, content: str) -> str:
72
+ """Create a file in the workspace with the given content."""
73
+ file_path = os.path.join(self.workspace_dir, filename)
74
+ with open(file_path, "w") as f:
75
+ f.write(content)
76
+ return f"File '{filename}' created at '{file_path}'."
77
+
78
+ def delete_file(self, filename: str) -> str:
79
+ """Delete a file from the workspace."""
80
+ file_path = os.path.join(self.workspace_dir, filename)
81
+ if os.path.exists(file_path):
82
+ os.remove(file_path)
83
+ return f"File '{filename}' deleted."
84
+ return f"File '{filename}' not found."
85
+
86
+ def list_files(self) -> List[str]:
87
+ """List all files in the workspace."""
88
+ return [
89
+ os.path.join(root, file)
90
+ for root, _, files in os.walk(self.workspace_dir)
91
+ for file in files
92
+ ]
93
+
94
+ def read_file(self, filename: str) -> str:
95
+ """Read the content of a file in the workspace."""
96
+ file_path = os.path.join(self.workspace_dir, filename)
97
+ if os.path.exists(file_path):
98
+ with open(file_path, "r") as f:
99
+ return f.read()
100
+ return f"File '{filename}' not found."
101
+
102
+ def get_workspace_tree(self) -> Dict[str, Any]:
103
+ """Get the workspace directory structure as a nested dictionary."""
104
+ workspace_path = Path(self.workspace_dir)
105
+ return self._build_tree(workspace_path)
106
+
107
+ def _build_tree(self, path: Path) -> Dict[str, Any]:
108
+ """Recursively build a directory tree."""
109
+ if path.is_file():
110
+ return {"type": "file", "name": path.name}
111
+ elif path.is_dir():
112
+ return {
113
+ "type": "directory",
114
+ "name": path.name,
115
+ "children": [self._build_tree(child) for child in path.iterdir()],
116
+ }
117
+
118
+ class AutonomousAgent:
119
+ """Autonomous agent that builds tools and agents based on tasks."""
120
+
121
+ def __init__(self, workspace_manager: 'AutonomousAgentApp.WorkspaceManager'): # Use fully qualified name
122
+ self.workspace_manager = workspace_manager
123
+ self.tools_dir = Path(self.workspace_manager.workspace_dir) / "tools"
124
+ self.agents_dir = Path(self.workspace_manager.workspace_dir) / "agents"
125
+ self.tools_dir.mkdir(exist_ok=True) # Ensure the tools directory exists
126
+ self.agents_dir.mkdir(exist_ok=True) # Ensure the agents directory exists
127
+ self.running = True # Flag to control the running state
128
+
129
+ async def run(self):
130
+ """Run the autonomous agent, continuously processing tasks."""
131
+ while self.running:
132
+ # Default task execution
133
+ await self.default_task()
134
+ await asyncio.sleep(1) # Prevent busy waiting
135
+
136
+ async def default_task(self):
137
+ """Perform the default task of analyzing and generating tools/agents."""
138
+ logging.info("Running default task...")
139
+ # Simulate task processing
140
+ await asyncio.sleep(2) # Simulate time taken for the task
141
+
142
+ async def pause(self):
143
+ """Pause the current operation to accept user input."""
144
+ self.running = False
145
+ logging.info("Paused. Waiting for user input...")
146
+
147
+ async def accept_user_input(self, user_input: str):
148
+ """Process user input and execute commands."""
149
+ logging.info(f"User input received: {user_input}")
150
+ commands = self.extract_commands(user_input)
151
+
152
+ for command in commands:
153
+ if command.startswith("generate tool"):
154
+ await self.generate_tool(command)
155
+ elif command.startswith("generate agent"):
156
+ await self.generate_agent(command)
157
+ # Add more command handling as needed
158
+
159
+ def extract_commands(self, user_input: str) -> List[str]:
160
+ """Extract commands from user input."""
161
+ # Simple command extraction logic (can be improved with NLP)
162
+ return user_input.split(';') # Assume commands are separated by semicolons
163
+
164
+ async def run_refinement_cycle(self, task: str) -> Dict[str, Any]:
165
+ """Run a refinement cycle for the given task."""
166
+ # Step 1: Analyze the task
167
+ task_analysis = await self._analyze_task(task)
168
+
169
+ # Step 2: Search for relevant approaches/methods
170
+ search_results = await self._web_search(task)
171
+
172
+ # Step 3: Build tools/agents based on the task
173
+ tools_built = await self._build_tools(task_analysis, search_results)
174
+
175
+ # Step 4: Execute the tools/agents
176
+ execution_results = await self._execute_tools(tools_built)
177
+
178
+ return {
179
+ "task_analysis": task_analysis,
180
+ "search_results": search_results,
181
+ "tools_built": tools_built,
182
+ "execution_results": execution_results,
183
+ }
184
+
185
+ async def _analyze_task(self, task: str) -> Dict[str, Any]:
186
+ """Analyze the task to determine requirements."""
187
+ keywords = self._extract_keywords(task)
188
+ requirements = self._generate_requirements(keywords)
189
+
190
+ return {
191
+ "task": task,
192
+ "keywords": keywords,
193
+ "requirements": requirements,
194
+ }
195
+
196
+ def _extract_keywords(self, text: str) -> List[str]:
197
+ """Extract keywords from the task text."""
198
+ stop_words = {"the", "and", "of", "to", "in", "a", "is", "for", "on", "with"}
199
+ words = [word.lower() for word in text.split() if word.lower() not in stop_words]
200
+ return list(set(words)) # Remove duplicates
201
+
202
+ def _generate_requirements(self, keywords: List[str]) -> List[str]:
203
+ """Generate requirements based on extracted keywords."""
204
+ requirement_map = {
205
+ "data": ["data collection", "data processing", "data visualization"],
206
+ "web": ["web scraping", "API integration", "web development"],
207
+ "ai": ["machine learning", "natural language processing", "computer vision"],
208
+ "automation": ["task automation", "workflow optimization", "scripting"],
209
+ }
210
+
211
+ requirements = []
212
+ for keyword in keywords:
213
+ if keyword in requirement_map:
214
+ requirements.extend(requirement_map[keyword])
215
+
216
+ return requirements
217
+
218
+ async def _web_search(self, query: str) -> List[Dict[str, Any]]:
219
+ """Perform a web search for relevant approaches/methods."""
220
+ try:
221
+ response = requests.get(
222
+ "https://api.example.com/search",
223
+ params={"q": query, "limit": 5}
224
+ )
225
+ response.raise_for_status()
226
+ return response.json().get("results", [])
227
+ except Exception as e:
228
+ logging.error(f"Web search failed: {e}")
229
+ return [{"title": "Example Approach", "url": "https://example.com"}]
230
+
231
+ async def _build_tools(self, task_analysis: Dict[str, Any], search_results: List[Dict[str, Any]]) -> List[str]:
232
+ """Build tools/agents based on the task and search results."""
233
+ tools = []
234
+ for requirement in task_analysis["requirements"]:
235
+ tool_name = f"tool_for_{requirement.replace(' ', '_')}.py"
236
+ tool_path = self.tools_dir / tool_name
237
+
238
+ # Generate a simple Python script for the tool
239
+ tool_code = self._generate_tool_code(requirement, search_results)
240
+ with open(tool_path, "w") as f:
241
+ f.write(tool_code)
242
+
243
+ tools.append(tool_name)
244
+
245
+ return tools
246
+
247
+ def _generate_tool_code(self, requirement: str, search_results: List[Dict[str, Any]]) -> str:
248
+ """Generate Python code for a tool based on the requirement."""
249
+ example_code = ""
250
+ for result in search_results:
251
+ if requirement.lower() in result["title"].lower():
252
+ example_code = f"# Example code based on: {result['title']}\n"
253
+ example_code += f"# Source: {result['url']}\n"
254
+ break
255
+
256
+ tool_code = f"""
257
+ {example_code}
258
+ def {requirement.replace(' ', '_')}():
259
+ print("Executing {requirement}...")
260
+ # Add your implementation here
261
+ if __name__ == "__main__":
262
+ {requirement.replace(' ', '_')}()
263
+ """
264
+ return tool_code.strip()
265
+
266
+ async def _execute_tools(self, tools: List[str]) -> Dict[str, Any]:
267
+ """Execute the built tools/agents."""
268
+ execution_results = {}
269
+ for tool in tools:
270
+ tool_path = self.tools_dir / tool
271
+ try:
272
+ process = await asyncio.create_subprocess_exec(
273
+ "python", str(tool_path),
274
+ stdout=asyncio.subprocess.PIPE,
275
+ stderr=asyncio.subprocess.PIPE
276
+ )
277
+ stdout, stderr = await process.communicate()
278
+
279
+ execution_results[tool] = {
280
+ "status": "success" if process.returncode == 0 else "failed",
281
+ "stdout": stdout.decode(),
282
+ "stderr": stderr.decode(),
283
+ }
284
+ except Exception as e:
285
+ execution_results[tool] = {
286
+ "status": "error",
287
+ "error": str(e),
288
+ }
289
+
290
+ return execution_results
291
+
292
+ async def generate_tool(self, command: str):
293
+ """Generate a tool based on the command."""
294
+ tool_name = command.split(" ")[-1] # Extract tool name from command
295
+ tool_code = f"# Tool: {tool_name}\n\ndef {tool_name}():\n pass\n" # Placeholder code
296
+ tool_path = self.tools_dir / f"{tool_name}.py"
297
+
298
+ with open(tool_path, "w") as f:
299
+ f.write(tool_code)
300
+
301
+ logging.info(f"Generated tool: {tool_name}")
302
+
303
+ async def generate_agent(self, command: str):
304
+ """Generate an agent based on the command."""
305
+ agent_name = command.split(" ")[-1] # Extract agent name from command
306
+ agent_code = f"# Agent: {agent_name}\n\ndef {agent_name}():\n pass\n" # Placeholder code
307
+ agent_path = self.agents_dir / f"{agent_name}.py"
308
+
309
+ with open(agent_path, "w") as f:
310
+ f.write(agent_code)
311
+
312
+ logging.info(f"Generated agent: {agent_name}")
313
+
314
+ def stop(self):
315
+ """Stop the autonomous agent."""
316
+ self.running = False
317
+ logging.info("Autonomous agent stopped.")
318
+
319
+ class ToolManager:
320
+ """Manages various tools used in the development pipeline."""
321
+
322
+ def __init__(self):
323
+ self.tools = {
324
+ "requirements_analyzer": self._requirements_analyzer,
325
+ "task_breakdown": self._task_breakdown,
326
+ "code_generator": self._code_generator,
327
+ "code_quality_checker": self._code_quality_checker,
328
+ "test_generator": self._test_generator,
329
+ "test_runner": self._test_runner,
330
+ "coverage_analyzer": self._coverage_analyzer,
331
+ }
332
+
333
+ async def execute_tool(self, tool_name: str, input_data: Any) -> Dict[str, Any]:
334
+ """Execute a tool with the given input data."""
335
+ if tool_name in self.tools:
336
+ return await self.tools[tool_name](input_data)
337
+ else:
338
+ raise ValueError(f"Tool '{tool_name}' not found.")
339
+
340
+ async def _requirements_analyzer(self, requirements: str) -> Dict[str, Any]:
341
+ """Analyze requirements and return a structured result."""
342
+ # Placeholder implementation
343
+ return {"status": "success", "result": {"requirements": requirements}}
344
+
345
+ async def _task_breakdown(self, requirements: Dict[str, Any]) -> Dict[str, Any]:
346
+ """Break down requirements into tasks."""
347
+ # Placeholder implementation
348
+ return {"status": "success", "result": ["task1", "task2", "task3"]}
349
+
350
+ async def _code_generator(self, tasks: List[str]) -> Dict[str, Any]:
351
+ """Generate code based on tasks."""
352
+ # Placeholder implementation
353
+ return {"status": "success", "result": "generated_code"}
354
+
355
+ async def _code_quality_checker(self, code: str) -> Dict[str, Any]:
356
+ """Check the quality of the generated code."""
357
+ # Placeholder implementation
358
+ return {"status": "success", "result": {"quality_score": 0.9}}
359
+
360
+ async def _test_generator(self, code: str) -> Dict[str, Any]:
361
+ """Generate tests for the code."""
362
+ # Placeholder implementation
363
+ return {"status": "success", "result": ["test1", "test2", "test3"]}
364
+
365
+ async def _test_runner(self, tests: List[str]) -> Dict[str, Any]:
366
+ """Run the generated tests."""
367
+ # Placeholder implementation
368
+ return {"status": "success", "result": {"passed": 3, "failed": 0}}
369
+
370
+ async def _coverage_analyzer(self, test_results: Dict[str, Any]) -> Dict[str, Any]:
371
+ """Analyze test coverage."""
372
+ # Placeholder implementation
373
+ return {"status": "success", "result": {"coverage": 0.95}}
374
+
375
+ class DevelopmentPipeline:
376
+ """Advanced development pipeline with stage management and monitoring"""
377
+
378
+ class PipelineStage(Enum):
379
+ PLANNING = "planning"
380
+ DEVELOPMENT = "development"
381
+ TESTING = "testing"
382
+ DEPLOYMENT = "deployment"
383
+ MAINTENANCE = "maintenance"
384
+ ROLLBACK = "rollback"
385
+
386
+ def __init__(self, workspace_manager, tool_manager):
387
+ self.workspace_manager = workspace_manager
388
+ self.tool_manager = tool_manager
389
+ self.current_stage = None
390
+ self.stage_history = []
391
+ self.active_processes = {}
392
+ self.stage_metrics = {}
393
+ self.logger = self._setup_logger()
394
+
395
+ def _setup_logger(self) -> logging.Logger:
396
+ logger = logging.getLogger("DevelopmentPipeline")
397
+ logger.setLevel(logging.DEBUG)
398
+ handler = logging.StreamHandler()
399
+ formatter = logging.Formatter(
400
+ '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
401
+ )
402
+ handler.setFormatter(formatter)
403
+ logger.addHandler(handler)
404
+ return logger
405
+
406
+ async def execute_stage(self, stage: PipelineStage, context: Dict[str, Any]) -> Dict[str, Any]:
407
+ """Execute a pipeline stage with monitoring and error handling"""
408
+ self.logger.info(f"Starting stage: {stage.value}")
409
+ start_time = time.time()
410
+
411
+ try:
412
+ # Record stage start
413
+ self.current_stage = stage
414
+ self._record_stage_start(stage, context)
415
+
416
+ # Execute stage-specific logic
417
+ result = await self._execute_stage_logic(stage, context)
418
+
419
+ # Validate stage output
420
+ self._validate_stage_output(stage, result)
421
+
422
+ # Update metrics
423
+ execution_time = time.time() - start_time
424
+ self._update_stage_metrics(stage, execution_time, result)
425
+
426
+ # Record stage completion
427
+ self._record_stage_completion(stage, result)
428
+
429
+ return {
430
+ "status": "success",
431
+ "stage": stage.value,
432
+ "result": result,
433
+ "execution_time": execution_time,
434
+ "metrics": self.stage_metrics.get(stage, {})
435
+ }
436
+
437
+ except Exception as e:
438
+ error_msg = f"Error in stage {stage.value}: {str(e)}"
439
+ self.logger.error(error_msg)
440
+
441
+ # Handle stage failure
442
+ await self._handle_stage_failure(stage, context, e)
443
+
444
+ return {
445
+ "status": "error",
446
+ "stage": stage.value,
447
+ "error": error_msg,
448
+ "execution_time": time.time() - start_time
449
+ }
450
+
451
+ async def _execute_stage_logic(self, stage: PipelineStage, context: Dict[str, Any]) -> Dict[str, Any]:
452
+ """Execute stage-specific logic with appropriate tools and enhanced error handling."""
453
+ """Execute stage-specific logic with appropriate tools"""
454
+ if stage == self.PipelineStage.PLANNING:
455
+ return await self._execute_planning_stage(context)
456
+ elif stage == self.PipelineStage.DEVELOPMENT:
457
+ return await self._execute_development_stage(context)
458
+ elif stage == self.PipelineStage.TESTING:
459
+ return await self._execute_testing_stage(context)
460
+ elif stage == self.PipelineStage.DEPLOYMENT:
461
+ return await self._execute_deployment_stage(context)
462
+ elif stage == self.PipelineStage.MAINTENANCE:
463
+ return await self._execute_maintenance_stage(context)
464
+ elif stage == self.PipelineStage.ROLLBACK:
465
+ return await self._execute_rollback_stage(context)
466
+ else:
467
+ raise ValueError(f"Unknown pipeline stage: {stage}")
468
+
469
+ async def _execute_planning_stage(self, context: Dict[str, Any]) -> Dict[str, Any]:
470
+ """Execute planning stage with requirement analysis and task breakdown"""
471
+ try:
472
+ # Analyze requirements
473
+ requirements = await self.tool_manager.execute_tool(
474
+ "requirements_analyzer",
475
+ context.get("requirements", "")
476
+ )
477
+
478
+ # Generate task breakdown
479
+ tasks = await self.tool_manager.execute_tool(
480
+ "task_breakdown",
481
+ requirements["result"]
482
+ )
483
+
484
+ # Create project structure
485
+ project_structure = await self.workspace_manager.create_project_structure(
486
+ context["project_name"],
487
+ tasks["result"]
488
+ )
489
+
490
+ return {
491
+ "requirements": requirements["result"],
492
+ "tasks": tasks["result"],
493
+ "project_structure": project_structure
494
+ }
495
+ except Exception as e:
496
+ raise Exception(f"Planning stage failed: {str(e)}")
497
+
498
+ async def _execute_development_stage(self, context: Dict[str, Any]) -> Dict[str, Any]:
499
+ """Execute development stage with code generation and quality checks"""
500
+ try:
501
+ # Generate code
502
+ code_generation = await self.tool_manager.execute_tool(
503
+ "code_generator",
504
+ context.get("tasks", [])
505
+ )
506
+
507
+ # Run initial quality checks
508
+ quality_check = await self.tool_manager.execute_tool(
509
+ "code_quality_checker",
510
+ code_generation["result"]
511
+ )
512
+
513
+ # Save generated code
514
+ saved_files = await self.workspace_manager.save_generated_code(
515
+ context["project_name"],
516
+ code_generation["result"]
517
+ )
518
+
519
+ return {
520
+ "generated_code": code_generation["result"],
521
+ "quality_check": quality_check["result"],
522
+ "saved_files": saved_files
523
+ }
524
+ except Exception as e:
525
+ raise Exception(f"Development stage failed: {str(e)}")
526
+
527
+ async def _execute_testing_stage(self, context: Dict[str, Any]) -> Dict[str, Any]:
528
+ """Execute testing stage with comprehensive test suite"""
529
+ try:
530
+ # Generate tests
531
+ test_generation = await self.tool_manager.execute_tool(
532
+ "test_generator",
533
+ context.get("generated_code", "")
534
+ )
535
+
536
+ # Run tests
537
+ test_results = await self.tool_manager.execute_tool(
538
+ "test_runner",
539
+ test_generation["result"]
540
+ )
541
+
542
+ # Generate coverage report
543
+ coverage_report = await self.tool_manager.execute_tool(
544
+ "coverage_analyzer",
545
+ test_results["result"]
546
+ )
547
+
548
+ return {
549
+ "test_cases": test_generation["result"],
550
+ "test_results": test_results["result"],
551
+ "coverage_report": coverage_report["result"]
552
+ }
553
+ except Exception as e:
554
+ raise Exception(f"Testing stage failed: {str(e)}")
555
+
556
+ def _validate_stage_output(self, stage: PipelineStage, result: Dict[str, Any]):
557
+ """Validate stage output against defined criteria"""
558
+ validation_rules = self._get_validation_rules(stage)
559
+ validation_errors = []
560
+
561
+ for rule in validation_rules:
562
+ if not rule.validate(result):
563
+ validation_errors.append(rule.get_error_message())
564
+
565
+ if validation_errors:
566
+ raise ValueError(f"Stage validation failed: {'; '.join(validation_errors)}")
567
+
568
+ def _update_stage_metrics(self, stage: PipelineStage, execution_time: float, result: Dict[str, Any]):
569
+ """Update metrics for the stage"""
570
+ if stage not in self.stage_metrics:
571
+ self.stage_metrics[stage] = {
572
+ "total_executions": 0,
573
+ "successful_executions": 0,
574
+ "failed_executions": 0,
575
+ "average_execution_time": 0,
576
+ "last_execution_time": None,
577
+ "error_rate": 0
578
+ }
579
+
580
+ metrics = self.stage_metrics[stage]
581
+ metrics["total_executions"] += 1
582
+ metrics["last_execution_time"] = execution_time
583
+
584
+ if result.get("status") == "success":
585
+ metrics["successful_executions"] += 1
586
+ else:
587
+ metrics["failed_executions"] += 1
588
+
589
+ metrics["error_rate"] = metrics["failed_executions"] / metrics["total_executions"]
590
+ metrics["average_execution_time"] = (
591
+ (metrics["average_execution_time"] * (metrics["total_executions"] - 1) + execution_time)
592
+ / metrics["total_executions"]
593
+ )
594
+
595
+ async def _handle_stage_failure(self, stage: PipelineStage, context: Dict[str, Any], error: Exception):
596
+ """Handle stage failure with rollback and recovery options"""
597
+ self.logger.error(f"Handling failure in stage {stage.value}: {str(error)}")
598
+
599
+ # Record failure
600
+ self._record_stage_failure(stage, error)
601
+
602
+ # Determine if rollback is needed
603
+ if self._should_rollback(stage, error):
604
+ await self._execute_rollback(stage, context)
605
+
606
+ # Attempt recovery
607
+ await self._attempt_recovery(stage, context, error)
608
+
609
+ def _should_rollback(self, stage: PipelineStage, error: Exception) -> bool:
610
+ """Determine if a rollback is needed based on error severity"""
611
+ critical_errors = [
612
+ "DatabaseError",
613
+ "DeploymentError",
614
+ "SecurityViolation"
615
+ ]
616
+ return any(err in str(error) for err in critical_errors)
617
+
618
+ async def _execute_rollback(self, stage: PipelineStage, context: Dict[str, Any]):
619
+ """Execute rollback procedure for a failed stage"""
620
+ self.logger.info(f"Executing rollback for stage {stage.value}")
621
+
622
+ try:
623
+ # Get rollback point
624
+ rollback_point = self._get_rollback_point(stage)
625
+
626
+ # Execute rollback
627
+ await self.execute_stage(
628
+ self.PipelineStage.ROLLBACK,
629
+ {
630
+ **context,
631
+ "rollback_point": rollback_point,
632
+ "failed_stage": stage
633
+ }
634
+ )
635
+
636
+ except Exception as e:
637
+ self.logger.error(f"Rollback failed: {str(e)}")
638
+ # Implement emergency shutdown if rollback fails
639
+ self._emergency_shutdown(stage, e)
640
+
641
+ def _emergency_shutdown(self, stage: PipelineStage, error: Exception):
642
+ """Handle emergency shutdown when rollback fails"""
643
+ self.logger.critical(f"Emergency shutdown initiated for stage {stage.value}")
644
+ # Implement emergency shutdown procedures
645
+ pass
646
+
647
+ class CodeMetricsAnalyzer:
648
+ """Analyzes code metrics using various tools"""
649
+
650
+ def __init__(self):
651
+ self.metrics_history = []
652
+
653
+ def analyze_code_quality(self, file_path: str) -> Dict[str, Any]:
654
+ """Analyzes code quality using multiple metrics"""
655
+ try:
656
+ # Pylint analysis
657
+ pylint_score = self._run_pylint(file_path)
658
+
659
+ # Complexity analysis
660
+ complexity_score = self._analyze_complexity(file_path)
661
+
662
+ # Test coverage analysis
663
+ coverage_score = self._analyze_test_coverage(file_path)
664
+
665
+ # Security analysis
666
+ security_score = self._analyze_security(file_path)
667
+
668
+ # Calculate overall quality score
669
+ quality_score = self._calculate_overall_score(
670
+ pylint_score,
671
+ complexity_score,
672
+ coverage_score,
673
+ security_score
674
+ )
675
+
676
+ metrics = {
677
+ "quality_score": quality_score,
678
+ "pylint_score": pylint_score,
679
+ "complexity_score": complexity_score,
680
+ "coverage_score": coverage_score,
681
+ "security_score": security_score,
682
+ "timestamp": datetime.now()
683
+ }
684
+
685
+ self.metrics_history.append(metrics)
686
+ return metrics
687
+
688
+ except Exception as e:
689
+ logging.error(f"Error analyzing code metrics: {str(e)}")
690
+ return {
691
+ "error": str(e),
692
+ "quality_score": 0.0,
693
+ "timestamp": datetime.now()
694
+ }
695
+
696
+ def _run_pylint(self, file_path: str) -> float:
697
+ """Runs pylint analysis"""
698
+ try:
699
+ reporter = JSONReporter()
700
+ Run([file_path], reporter=reporter, do_exit=False)
701
+ score = reporter.data.get('score', 0.0)
702
+ return float(score) / 10.0 # Normalize to 0-1 scale
703
+ except Exception as e:
704
+ logging.error(f"Pylint analysis error: {str(e)}")
705
+ return 0.0
706
+
707
+ def _analyze_complexity(self, file_path: str) -> float:
708
+ """Analyzes code complexity"""
709
+ try:
710
+ with open(file_path, 'r') as file:
711
+ code = file.read()
712
+
713
+ # Calculate cyclomatic complexity
714
+ complexity = radon.complexity.cc_visit(code)
715
+ avg_complexity = sum(item.complexity for item in complexity) / len(complexity) if complexity else 0
716
+
717
+ # Normalize complexity score (0-1 scale, lower is better)
718
+ normalized_score = 1.0 - min(avg_complexity / 10.0, 1.0)
719
+ return normalized_score
720
+
721
+ except Exception as e:
722
+ logging.error(f"Complexity analysis error: {str(e)}")
723
+ return 0.0
724
+
725
+ async def _analyze_current_state(self, project_name: str) -> Dict[str, Any]:
726
+ """Analyze current project state with detailed metrics."""
727
+ try:
728
+ self.logger.info(f"Analyzing current state for project: {project_name}")
729
+
730
+ # Collect code metrics
731
+ code_metrics = await self._collect_code_metrics(project_name)
732
+ self.logger.info("Code metrics collected successfully.")
733
+
734
+ # Analyze test coverage
735
+ test_coverage = await self._analyze_test_coverage(project_name)
736
+ self.logger.info("Test coverage analysis completed.")
737
+
738
+ # Check security vulnerabilities
739
+ security_analysis = await self._analyze_security(project_name)
740
+ self.logger.info("Security analysis completed.")
741
+
742
+ # Measure performance metrics
743
+ performance_metrics = await self._measure_performance(project_name)
744
+ self.logger.info("Performance metrics measured.")
745
+
746
+ # Determine if requirements are met
747
+ meets_requirements = await self._check_requirements(
748
+ code_metrics,
749
+ test_coverage,
750
+ security_analysis,
751
+ performance_metrics
752
+ )
753
+ self.logger.info("Requirements check completed.")
754
+
755
+ return {
756
+ "code_metrics": code_metrics,
757
+ "test_coverage": test_coverage,
758
+ "security_analysis": security_analysis,
759
+ "performance_metrics": performance_metrics,
760
+ "meets_requirements": meets_requirements,
761
+ "timestamp": datetime.now()
762
+ }
763
+
764
+ except Exception as e:
765
+ self.logger.error(f"Error analyzing current state: {str(e)}")
766
+ raise
767
+
768
+ def _analyze_security(self, file_path: str) -> float:
769
+ """Analyzes code security using bandit"""
770
+ try:
771
+ conf = manager.BanditManager()
772
+ conf.discover_files([file_path])
773
+ conf.run_tests()
774
+
775
+ # Calculate security score based on findings
776
+ total_issues = len(conf.get_issue_list())
777
+ max_severity = max((issue.severity for issue in conf.get_issue_list()), default=0)
778
+
779
+ # Normalize security score (0-1 scale, higher is better)
780
+ security_score = 1.0 - (total_issues * max_severity) / 10.0
781
+ return max(0.0, min(1.0, security_score))
782
+
783
+ except Exception as e:
784
+ logging.error(f"Security analysis error: {str(e)}")
785
+ return 0.0
786
+
787
+ def _calculate_overall_score(self, pylint_score: float, complexity_score: float,
788
+ coverage_score: float, security_score: float) -> float:
789
+ """Calculates overall code quality score"""
790
+ weights = {
791
+ 'pylint': 0.3,
792
+ 'complexity': 0.2,
793
+ 'coverage': 0.25,
794
+ 'security': 0.25
795
+ }
796
+
797
+ overall_score = (
798
+ weights['pylint'] * pylint_score +
799
+ weights['complexity'] * complexity_score +
800
+ weights['coverage'] * coverage_score +
801
+ weights['security'] * security_score
802
+ )
803
+
804
+ return max(0.0, min(1.0, overall_score))
805
+
806
+ def get_metrics_history(self) -> List[Dict[str, Any]]:
807
+ """Returns the history of metrics measurements"""
808
+ return self.metrics_history
809
+
810
+ def get_trend_analysis(self) -> Dict[str, Any]:
811
+ """Analyzes trends in metrics over time"""
812
+ if not self.metrics_history:
813
+ return {"status": "No metrics history available"}
814
+
815
+ trends = {
816
+ "quality_score": self._calculate_trend([m["quality_score"] for m in self.metrics_history]),
817
+ "coverage_score": self._calculate_trend([m["coverage_score"] for m in self.metrics_history]),
818
+ "security_score": self._calculate_trend([m["security_score"] for m in self.metrics_history])
819
+ }
820
+
821
+ return trends
822
+
823
+ def _calculate_trend(self, values: List[float]) -> Dict[str, Any]:
824
+ """Calculates trend statistics for a metric"""
825
+ if not values:
826
+ return {"trend": "unknown", "change": 0.0}
827
+
828
+ recent_values = values[-3:] # Look at last 3 measurements
829
+ if len(recent_values) < 2:
830
+ return {"trend": "insufficient data", "change": 0.0}
831
+
832
+ change = recent_values[-1] - recent_values[0]
833
+ trend = "improving" if change > 0 else "declining" if change < 0 else "stable"
834
+
835
+ return {
836
+ "trend": trend,
837
+ "change": change,
838
+ "current": recent_values[-1],
839
+ "previous": recent_values[0]
840
+ }
841
+
842
+ @dataclass
843
+ class QualityMetrics:
844
+ """Advanced quality metrics tracking and analysis"""
845
+ code_quality_score: float = 0.0
846
+ test_coverage: float = 0.0
847
+ security_score: str = "unknown"
848
+ performance_score: float = 0.0
849
+ metrics_analyzer: CodeMetricsAnalyzer = None
850
+
851
+ def __post_init__(self):
852
+ self.metrics_analyzer = CodeMetricsAnalyzer()
853
+ self.history = []
854
+ self.thresholds = {
855
+ "code_quality": 0.85,
856
+ "test_coverage": 0.90,
857
+ "security": 0.85,
858
+ "performance": 0.80
859
+ }
860
+
861
+ def analyze_code(self, project_name: str) -> Dict[str, Any]:
862
+ """Comprehensive code analysis"""
863
+ try:
864
+ # Get all Python files in the project
865
+ project_files = self._get_project_files(project_name)
866
+
867
+ aggregated_metrics = {
868
+ "code_quality": 0.0,
869
+ "test_coverage": 0.0,
870
+ "security": 0.0,
871
+ "performance": 0.0,
872
+ "files_analyzed": len(project_files),
873
+ "detailed_metrics": []
874
+ }
875
+
876
+ for file_path in project_files:
877
+ metrics = self.metrics_analyzer.analyze_code_quality(file_path)
878
+ aggregated_metrics["detailed_metrics"].append({
879
+ "file": file_path,
880
+ "metrics": metrics
881
+ })
882
+
883
+ # Update aggregated scores
884
+ aggregated_metrics["code_quality"] += metrics["quality_score"]
885
+ aggregated_metrics["test_coverage"] += metrics["coverage_score"]
886
+ aggregated_metrics["security"] += metrics["security_score"]
887
+
888
+ # Calculate averages
889
+ if project_files:
890
+ for key in ["code_quality", "test_coverage", "security"]:
891
+ aggregated_metrics[key] /= len(project_files)
892
+
893
+ # Update instance variables
894
+ self.code_quality_score = aggregated_metrics["code_quality"]
895
+ self.test_coverage = aggregated_metrics["test_coverage"]
896
+ self.security_score = str(aggregated_metrics["security"])
897
+
898
+ # Add to history
899
+ self.history.append({
900
+ "timestamp": datetime.now(),
901
+ "metrics": aggregated_metrics
902
+ })
903
+
904
+ return aggregated_metrics
905
+
906
+ except Exception as e:
907
+ logging.error(f"Error in code analysis: {str(e)}")
908
+ return {
909
+ "error": str(e),
910
+ "code_quality": 0.0,
911
+ "test_coverage": 0.0,
912
+ "security": "error",
913
+ "performance": 0.0
914
+ }
915
+
916
+ def _get_project_files(self, project_name: str) -> List[str]:
917
+ """Get all Python files in the project"""
918
+ project_dir = os.path.join(os.getcwd(), project_name)
919
+ python_files = []
920
+
921
+ for root, _, files in os.walk(project_dir):
922
+ for file in files:
923
+ if file.endswith('.py'):
924
+ python_files.append(os.path.join(root, file))
925
+
926
+ return python_files
927
+
928
+ def get_improvement_suggestions(self) -> List[str]:
929
+ """Generate improvement suggestions based on metrics"""
930
+ suggestions = []
931
+ latest_metrics = self.history[-1]["metrics"] if self.history else None
932
+
933
+ if not latest_metrics:
934
+ return ["No metrics available for analysis"]
935
+
936
+ if latest_metrics["code_quality"] < self.thresholds["code_quality"]:
937
+ suggestions.append(
938
+ f"Code quality score ({latest_metrics['code_quality']:.2f}) is below threshold "
939
+ f"({self.thresholds['code_quality']}). Consider refactoring complex methods."
940
+ )
941
+
942
+ if latest_metrics["test_coverage"] < self.thresholds["test_coverage"]:
943
+ suggestions.append(
944
+ f"Test coverage ({latest_metrics['test_coverage']:.2f}) is below threshold "
945
+ f"({self.thresholds['test_coverage']}). Add more unit tests."
946
+ )
947
+
948
+ if float(latest_metrics["security"]) < self.thresholds["security"]:
949
+ suggestions.append(
950
+ f"Security score ({latest_metrics['security']}) is below threshold "
951
+ f"({self.thresholds['security']}). Address security vulnerabilities."
952
+ )
953
+
954
+ return suggestions
955
+
956
+ class ErrorTracker:
957
+ """Enhanced error tracking and analysis"""
958
+ def __init__(self):
959
+ self.errors: List[Dict[str, Any]] = []
960
+ self.error_patterns: Dict[str, int] = {}
961
+ self.critical_errors: List[Dict[str, Any]] = []
962
+
963
+ def add_error(self, error_type: str, message: str, severity: str = "normal"):
964
+ """Add an error with enhanced tracking"""
965
+ error_entry = {
966
+ "type": error_type,
967
+ "message": message,
968
+ "severity": severity,
969
+ "timestamp": datetime.now(),
970
+ "stack_trace": traceback.format_exc()
971
+ }
972
+
973
+ self.errors.append(error_entry)
974
+
975
+ # Track error patterns
976
+ if error_type in self.error_patterns:
977
+ self.error_patterns[error_type] += 1
978
+ else:
979
+ self.error_patterns[error_type] = 1
980
+
981
+ # Track critical errors
982
+ if severity == "critical":
983
+ self.critical_errors.append(error_entry)
984
+ self._notify_critical_error(error_entry)
985
+
986
+ def _notify_critical_error(self, error: Dict[str, Any]):
987
+ """Handle critical error notification"""
988
+ logging.critical(f"Critical error detected: {error['message']}")
989
+ # Implement notification system here (e.g., email, Slack)
990
+
991
+ def get_error_analysis(self) -> Dict[str, Any]:
992
+ """Generate comprehensive error analysis"""
993
+ return {
994
+ "total_errors": len(self.errors),
995
+ "error_patterns": self.error_patterns,
996
+ "critical_errors": len(self.critical_errors),
997
+ "most_common_error": max(self.error_patterns.items(), key=lambda x: x[1]) if self.error_patterns else None,
998
+ "error_trend": self._analyze_error_trend()
999
+ }
1000
+
1001
+ def _analyze_error_trend(self) -> Dict[str, Any]:
1002
+ """Analyze error trends over time"""
1003
+ if not self.errors:
1004
+ return {"trend": "no errors"}
1005
+
1006
+ # Group errors by hour
1007
+ error_timeline = {}
1008
+ for error in self.errors:
1009
+ hour = error["timestamp"].replace(minute=0, second=0, microsecond=0)
1010
+ if hour in error_timeline:
1011
+ error_timeline[hour] += 1
1012
+ else:
1013
+ error_timeline[hour] = 1
1014
+
1015
+ # Calculate trend
1016
+ timeline_values = list(error_timeline.values())
1017
+ if len(timeline_values) < 2:
1018
+ return {"trend": "insufficient data"}
1019
+
1020
+ trend = "increasing" if timeline_values[-1] > timeline_values[0] else "decreasing"
1021
+ return {
1022
+ "trend": trend,
1023
+ "current_rate": timeline_values[-1],
1024
+ "initial_rate": timeline_values[0]
1025
+ }
1026
+
1027
+ class ProjectAnalytics:
1028
+ """Enhanced project analytics and reporting"""
1029
+ """Enhanced project analytics and reporting"""
1030
+ def __init__(self, workspace_manager):
1031
+ self.workspace_manager = workspace_manager
1032
+ self.metrics_analyzer = CodeMetricsAnalyzer()
1033
+ self.analysis_history = []
1034
+
1035
+ def generate_project_report(self, project_name: str) -> Dict[str, Any]:
1036
+ """Generate comprehensive project report"""
1037
+ try:
1038
+ current_analysis = {
1039
+ "timestamp": datetime.now(),
1040
+ "basic_metrics": self._get_basic_metrics(project_name),
1041
+ "code_quality": self._get_code_quality_metrics(project_name),
1042
+ "performance": self._get_performance_metrics(project_name),
1043
+ "security": self._get_security_metrics(project_name),
1044
+ "dependencies": self._analyze_dependencies(project_name)
1045
+ }
1046
+
1047
+ self.analysis_history.append(current_analysis)
1048
+
1049
+ return {
1050
+ "current_analysis": current_analysis,
1051
+ "historical_trends": self._analyze_trends(),
1052
+ "recommendations": self._generate_recommendations(current_analysis)
1053
+ }
1054
+
1055
+ except Exception as e:
1056
+ logging.error(f"Error generating project report: {str(e)}")
1057
+ return {"error": str(e)}
1058
+
1059
+ class StreamlitInterface:
1060
+ """Streamlit UI integration for the Autonomous Agent system."""
1061
+
1062
+ def __init__(self, app: AutonomousAgentApp):
1063
+ self.app = app
1064
+
1065
+ def render_main_interface(self):
1066
+ """Render the main Streamlit interface."""
1067
+ st.title("Autonomous Agent System")
1068
+
1069
+ # Create tabs for different functionalities
1070
+ tab_names = ["Autonomous Agent", "Workspace Management", "Settings"]
1071
+ selected_tab = st.selectbox("Select a Tab", tab_names)
1072
+
1073
+ if selected_tab == "Autonomous Agent":
1074
+ self.render_autonomous_agent_tab()
1075
+ elif selected_tab == "Workspace Management":
1076
+ self.render_workspace_management_tab()
1077
+ elif selected_tab == "Settings":
1078
+ self.render_settings_tab()
1079
+
1080
+ def render_autonomous_agent_tab(self):
1081
+ """Render the Autonomous Agent tab."""
1082
+ st.header("Autonomous Agent")
1083
+ task = st.text_area("Enter a task for the autonomous agent:")
1084
+
1085
+ if st.button("Run Autonomous Agent"):
1086
+ if task:
1087
+ # Run the autonomous agent with the provided task
1088
+ try:
1089
+ result = asyncio.run(self.app.refinement_loop.run_refinement_cycle(task))
1090
+ st.success(f"Result: {result}")
1091
+ except Exception as e:
1092
+ st.error(f"An error occurred: {str(e)}")
1093
+
1094
+ def render_workspace_management_tab(self):
1095
+ """Render the Workspace Management tab with a workspace explorer."""
1096
+ st.header("Workspace Management")
1097
+
1098
+ # Workspace Explorer
1099
+ st.subheader("Workspace Explorer")
1100
+ workspace_tree = self.app.workspace_manager.get_workspace_tree()
1101
+ self._render_tree(workspace_tree)
1102
+
1103
+ # File creation
1104
+ st.subheader("Create a File")
1105
+ new_filename = st.text_input("Enter filename:")
1106
+ new_file_content = st.text_area("Enter file content:")
1107
+ if st.button("Create File"):
1108
+ if new_filename and new_file_content:
1109
+ result = self.app.workspace_manager.create_file(new_filename, new_file_content)
1110
+ st.success(result)
1111
+ else:
1112
+ st.error("Filename and content are required.")
1113
+
1114
+ # File deletion
1115
+ st.subheader("Delete a File")
1116
+ delete_filename = st.text_input("Enter filename to delete:")
1117
+ if st.button("Delete File"):
1118
+ if delete_filename:
1119
+ result = self.app.workspace_manager.delete_file(delete_filename)
1120
+ st.success(result)
1121
+ else:
1122
+ st.error("Filename is required.")
1123
+
1124
+ def _render_tree(self, tree: Dict[str, Any], level: int = 0):
1125
+ """Recursively render the workspace directory tree."""
1126
+ if tree["type"] == "file":
1127
+ st.write(" " * level + f"📄 {tree['name']}")
1128
+ elif tree["type"] == "directory":
1129
+ st.write(" " * level + f"📁 {tree['name']}")
1130
+ for child in tree["children"]:
1131
+ self._render_tree(child, level + 1)
1132
+
1133
+ def render_settings_tab(self):
1134
+ """Render the Settings tab."""
1135
+ st.header("Application Settings")
1136
+
1137
+ # Section 1: Refinement Process Configuration
1138
+ st.subheader("Refinement Process Settings")
1139
+
1140
+ # Adjust maximum refinement iterations
1141
+ current_max_iter = self.app.refinement_loop.max_iterations
1142
+ new_max_iter = st.number_input(
1143
+ "Maximum Refinement Iterations",
1144
+ min_value=1,
1145
+ max_value=20,
1146
+ value=current_max_iter,
1147
+ help="Maximum number of refinement cycles to perform"
1148
+ )
1149
+ if new_max_iter != current_max_iter:
1150
+ self.app.refinement_loop.max_iterations = new_max_iter
1151
+ st.success(f"Updated maximum iterations to {new_max_iter}")
1152
+
1153
+ # Section 2: Quality Threshold Configuration
1154
+ st.subheader("Quality Thresholds")
1155
+
1156
+ # Get current thresholds
1157
+ thresholds = self.app.refinement_loop.quality_metrics.thresholds
1158
+
1159
+ col1, col2, col3 = st.columns(3)
1160
+ with col1:
1161
+ new_code_quality = st.slider(
1162
+ "Code Quality Threshold",
1163
+ 0.0, 1.0, thresholds["code_quality"],
1164
+ help="Minimum acceptable code quality score"
1165
+ )
1166
+ with col2:
1167
+ new_test_coverage = st.slider(
1168
+ "Test Coverage Threshold",
1169
+ 0.0, 1.0, thresholds["test_coverage"],
1170
+ help="Minimum required test coverage"
1171
+ )
1172
+ with col3:
1173
+ new_security = st.slider(
1174
+ "Security Threshold",
1175
+ 0.0, 1.0, thresholds["security"],
1176
+ help="Minimum acceptable security score"
1177
+ )
1178
+
1179
+ if st.button("Update Quality Thresholds"):
1180
+ self.app.refinement_loop.quality_metrics.thresholds.update({
1181
+ "code_quality": new_code_quality,
1182
+ "test_coverage": new_test_coverage,
1183
+ "security": new_security
1184
+ })
1185
+ st.success("Quality thresholds updated!")
1186
+
1187
+ # Section 3: Performance Configuration
1188
+ st.subheader("Performance Settings")
1189
+
1190
+ # Concurrency settings
1191
+ concurrency_level = st.selectbox(
1192
+ "Max Concurrency",
1193
+ options=[1, 2, 4, 8],
1194
+ index=2,
1195
+ help="Maximum parallel tasks for code analysis"
1196
+ )
1197
+
1198
+ # Resource limits
1199
+ mem_limit = st.slider(
1200
+ "Memory Limit (GB)",
1201
+ 1, 16, 4,
1202
+ help="Maximum memory allocation for pipeline operations"
1203
+ )
1204
+
1205
+ # Section 4: Security Settings
1206
+ st.subheader("Security Configuration")
1207
+
1208
+ # Security rules toggle
1209
+ enable_security_scan = st.checkbox(
1210
+ "Enable Real-time Security Scanning",
1211
+ value=True,
1212
+ help="Perform continuous security analysis during development"
1213
+ )
1214
+
1215
+ # Severity level filtering
1216
+ security_level = st.selectbox(
1217
+ "Minimum Security Severity Level",
1218
+ ["Low", "Medium", "High", "Critical"],
1219
+ index=1,
1220
+ help="Minimum severity level to trigger security alerts"
1221
+ )
1222
+
1223
+ # Section 5: Workspace Configuration
1224
+ st.subheader("Workspace Settings")
1225
+ current_workspace = self.app.workspace_manager.workspace_dir
1226
+ st.write(f"Current Workspace: `{current_workspace}`")
1227
+
1228
+ # Workspace actions
1229
+ if st.button("Clear Workspace Cache"):
1230
+ self.app.workspace_manager.clean_cache()
1231
+ st.success("Workspace cache cleared!")
1232
+
1233
+ # Section 6: Diagnostic Settings
1234
+ st.subheader("Diagnostics")
1235
+
1236
+ # Logging controls
1237
+ log_level = st.selectbox(
1238
+ "Logging Level",
1239
+ ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
1240
+ index=1
1241
+ )
1242
+ st.session_state.log_level = log_level # Store in session state
1243
+ logging.getLogger().setLevel(log_level)
1244
+
1245
+ # Debug mode toggle
1246
+ debug_mode = st.checkbox("Enable Debug Mode")
1247
+ st.session_state.debug_mode = debug_mode # Store in session state
1248
+ if debug_mode:
1249
+ self.app.refinement_loop.logger.setLevel(logging.DEBUG)
1250
+ else:
1251
+ self.app.refinement_loop.logger.setLevel(logging.INFO)
1252
+
1253
+ # Section 7: System Information
1254
+ st.subheader("System Info")
1255
+ st.write(f"Python Version: {sys.version}")
1256
+ st.write(f"Platform: {platform.platform()}")
1257
+ st.write(f"Available Memory: {psutil.virtual_memory().available / (1024**3):.1f} GB free")
1258
+
1259
+
1260
+ if __name__ == "__main__":
1261
+ import streamlit as st
1262
+ main()