acecalisto3 commited on
Commit
e232df4
·
verified ·
1 Parent(s): fcd88a0

Update definitions.py

Browse files
Files changed (1) hide show
  1. definitions.py +175 -175
definitions.py CHANGED
@@ -30,199 +30,199 @@ from pathlib import Path
30
  # Set logging level from environment variable
31
  logging.basicConfig(level=os.getenv('LOG_LEVEL', 'INFO'))
32
 
33
- class CodeMetricsAnalyzer:
34
- """Analyzes code metrics using various tools"""
35
 
36
- def __init__(self):
37
- self.metrics_history = []
38
-
39
- def analyze_code_quality(self, file_path: str) -> Dict[str, Any]:
40
- """Analyzes code quality using multiple metrics"""
41
- try:
42
- # Pylint analysis
43
- pylint_score = self._run_pylint(file_path)
44
-
45
- # Complexity analysis
46
- complexity_score = self._analyze_complexity(file_path)
47
-
48
- # Test coverage analysis
49
- coverage_score = self._analyze_test_coverage(file_path)
50
- # Security analysis
51
- security_score = self._analyze_security(file_path)
52
-
53
- # Calculate overall quality score
54
- quality_score = self._calculate_overall_score(
55
- pylint_score,
56
- complexity_score,
57
- coverage_score,
58
- security_score
59
- )
60
-
61
- metrics = {
62
- "quality_score": quality_score,
63
- "pylint_score": pylint_score,
64
- "complexity_score": complexity_score,
65
- "coverage_score": coverage_score,
66
- "security_score": security_score,
67
- "timestamp": datetime.now()
68
- }
69
 
70
- self.metrics_history.append(metrics)
71
- return metrics
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
- except Exception as e:
74
- logging.error(f"Error analyzing code metrics: {str(e)}")
75
- return {
76
- "error": str(e),
77
- "quality_score": 0.0,
78
- "timestamp": datetime.now()
79
- }
 
80
 
81
- def _run_pylint(self, file_path: str) -> float:
82
- """Runs pylint analysis"""
83
- try:
84
- reporter = JSONReporter()
85
- Run([file_path], reporter=reporter, do_exit=False)
86
- score = reporter.data.get('score', 0.0)
87
- return float(score) / 10.0 # Normalize to 0-1 scale
88
- except Exception as e:
89
- logging.error(f"Pylint analysis error: {str(e)}")
90
- return 0.0
91
 
92
- def _analyze_complexity(self, file_path: str) -> float:
93
- """Analyzes code complexity"""
94
- try:
95
- with open(file_path, 'r') as file:
96
- code = file.read()
97
-
98
- # Calculate cyclomatic complexity
99
- complexity = radon.complexity.cc_visit(code)
100
- avg_complexity = sum(item.complexity for item in complexity) / len(complexity) if complexity else 0
101
 
102
- # Normalize complexity score (0-1 scale, lower is better)
103
- normalized_score = 1.0 - min(avg_complexity / 10.0, 1.0)
104
- return normalized_score
105
-
106
- except Exception as e:
107
- logging.error(f"Complexity analysis error: {str(e)}")
108
- return 0.0
109
-
110
- async def _analyze_current_state(self, project_name: str) -> Dict[str, Any]:
111
- """Analyze current project state with detailed metrics."""
112
- try:
113
- self.logger.info(f"Analyzing current state for project: {project_name}")
114
-
115
- # Collect code metrics
116
- code_metrics = await self._collect_code_metrics(project_name)
117
- self.logger.info("Code metrics collected successfully.")
118
-
119
- # Analyze test coverage
120
- test_coverage = await self._analyze_test_coverage(project_name)
121
- self.logger.info("Test coverage analysis completed.")
122
-
123
- # Check security vulnerabilities
124
- security_analysis = await self._analyze_security(project_name)
125
- self.logger.info("Security analysis completed.")
126
-
127
- # Measure performance metrics
128
- performance_metrics = await self._measure_performance(project_name)
129
- self.logger.info("Performance metrics measured.")
130
-
131
- # Determine if requirements are met
132
- meets_requirements = await self._check_requirements(
133
- code_metrics,
134
- test_coverage,
135
- security_analysis,
136
- performance_metrics
137
- )
138
- self.logger.info("Requirements check completed.")
139
-
140
- return {
141
- "code_metrics": code_metrics,
142
- "test_coverage": test_coverage,
143
- "security_analysis": security_analysis,
144
- "performance_metrics": performance_metrics,
145
- "meets_requirements": meets_requirements,
146
- "timestamp": datetime.now()
147
- }
148
-
149
- except Exception as e:
150
- self.logger.error(f"Error analyzing current state: {str(e)}")
151
- raise
152
 
153
- def _analyze_security(self, file_path: str) -> float:
154
- """Analyzes code security using bandit"""
155
- try:
156
- conf = manager.BanditManager()
157
- conf.discover_files([file_path])
158
- conf.run_tests()
159
-
160
- # Calculate security score based on findings
161
- total_issues = len(conf.get_issue_list())
162
- max_severity = max((issue.severity for issue in conf.get_issue_list()), default=0)
163
-
164
- # Normalize security score (0-1 scale, higher is better)
165
- security_score = 1.0 - (total_issues * max_severity) / 10.0
166
- return max(0.0, min(1.0, security_score))
167
-
168
- except Exception as e:
169
- logging.error(f"Security analysis error: {str(e)}")
170
- return 0.0
171
-
172
- def _calculate_overall_score(self, pylint_score: float, complexity_score: float,
173
- coverage_score: float, security_score: float) -> float:
174
- """Calculates overall code quality score"""
175
- weights = {
176
- 'pylint': 0.3,
177
- 'complexity': 0.2,
178
- 'coverage': 0.25,
179
- 'security': 0.25
180
- }
181
-
182
- overall_score = (
183
- weights['pylint'] * pylint_score +
184
- weights['complexity'] * complexity_score +
185
- weights['coverage'] * coverage_score +
186
- weights['security'] * security_score
 
 
 
 
 
 
 
 
 
 
 
187
  )
 
 
 
 
 
 
 
 
 
 
188
 
189
- return max(0.0, min(1.0, overall_score))
 
 
 
 
 
 
 
 
 
190
 
191
- def get_metrics_history(self) -> List[Dict[str, Any]]:
192
- """Returns the history of metrics measurements"""
193
- return self.metrics_history
194
 
195
- def get_trend_analysis(self) -> Dict[str, Any]:
196
- """Analyzes trends in metrics over time"""
197
- if not self.metrics_history:
198
- return {"status": "No metrics history available"}
199
 
200
- trends = {
201
- "quality_score": self._calculate_trend([m["quality_score"] for m in self.metrics_history]),
202
- "coverage_score": self._calculate_trend([m["coverage_score"] for m in self.metrics_history]),
203
- "security_score": self._calculate_trend([m["security_score"] for m in self.metrics_history])
 
 
 
 
 
 
 
 
204
  }
205
 
206
- return trends
 
 
 
 
 
 
 
207
 
208
- def _calculate_trend(self, values: List[float]) -> Dict[str, Any]:
209
- """Calculates trend statistics for a metric"""
210
- if not values:
211
- return {"trend": "unknown", "change": 0.0}
212
 
213
- recent_values = values[-3:] # Look at last 3 measurements
214
- if len(recent_values) < 2:
215
- return {"trend": "insufficient data", "change": 0.0}
 
216
 
217
- change = recent_values[-1] - recent_values[0]
218
- trend = "improving" if change > 0 else "declining" if change < 0 else "stable"
 
 
 
219
 
220
- return {
221
- "trend": trend,
222
- "change": change,
223
- "current": recent_values[-1],
224
- "previous": recent_values[0]
225
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
 
227
  class AutonomousAgentApp:
228
  """Main application class for the Autonomous Agent System"""
 
30
  # Set logging level from environment variable
31
  logging.basicConfig(level=os.getenv('LOG_LEVEL', 'INFO'))
32
 
33
+ class CodeMetricsAnalyzer:
34
+ """Analyzes code metrics using various tools"""
35
 
36
+ def __init__(self):
37
+ self.metrics_history = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
+ def analyze_code_quality(self, file_path: str) -> Dict[str, Any]:
40
+ """Analyzes code quality using multiple metrics"""
41
+ try:
42
+ # Pylint analysis
43
+ pylint_score = self._run_pylint(file_path)
44
+
45
+ # Complexity analysis
46
+ complexity_score = self._analyze_complexity(file_path)
47
+
48
+ # Test coverage analysis
49
+ coverage_score = self._analyze_test_coverage(file_path)
50
+ # Security analysis
51
+ security_score = self._analyze_security(file_path)
52
+
53
+ # Calculate overall quality score
54
+ quality_score = self._calculate_overall_score(
55
+ pylint_score,
56
+ complexity_score,
57
+ coverage_score,
58
+ security_score
59
+ )
60
 
61
+ metrics = {
62
+ "quality_score": quality_score,
63
+ "pylint_score": pylint_score,
64
+ "complexity_score": complexity_score,
65
+ "coverage_score": coverage_score,
66
+ "security_score": security_score,
67
+ "timestamp": datetime.now()
68
+ }
69
 
70
+ self.metrics_history.append(metrics)
71
+ return metrics
 
 
 
 
 
 
 
 
72
 
73
+ except Exception as e:
74
+ logging.error(f"Error analyzing code metrics: {str(e)}")
75
+ return {
76
+ "error": str(e),
77
+ "quality_score": 0.0,
78
+ "timestamp": datetime.now()
79
+ }
 
 
80
 
81
+ def _run_pylint(self, file_path: str) -> float:
82
+ """Runs pylint analysis"""
83
+ try:
84
+ reporter = JSONReporter()
85
+ Run([file_path], reporter=reporter, do_exit=False)
86
+ score = reporter.data.get('score', 0.0)
87
+ return float(score) / 10.0 # Normalize to 0-1 scale
88
+ except Exception as e:
89
+ logging.error(f"Pylint analysis error: {str(e)}")
90
+ return 0.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
+ def _analyze_complexity(self, file_path: str) -> float:
93
+ """Analyzes code complexity"""
94
+ try:
95
+ with open(file_path, 'r') as file:
96
+ code = file.read()
97
+
98
+ # Calculate cyclomatic complexity
99
+ complexity = radon.complexity.cc_visit(code)
100
+ avg_complexity = sum(item.complexity for item in complexity) / len(complexity) if complexity else 0
101
+
102
+ # Normalize complexity score (0-1 scale, lower is better)
103
+ normalized_score = 1.0 - min(avg_complexity / 10.0, 1.0)
104
+ return normalized_score
105
+
106
+ except Exception as e:
107
+ logging.error(f"Complexity analysis error: {str(e)}")
108
+ return 0.0
109
+
110
+ async def _analyze_current_state(self, project_name: str) -> Dict[str, Any]:
111
+ """Analyze current project state with detailed metrics."""
112
+ try:
113
+ self.logger.info(f"Analyzing current state for project: {project_name}")
114
+
115
+ # Collect code metrics
116
+ code_metrics = await self._collect_code_metrics(project_name)
117
+ self.logger.info("Code metrics collected successfully.")
118
+
119
+ # Analyze test coverage
120
+ test_coverage = await self._analyze_test_coverage(project_name)
121
+ self.logger.info("Test coverage analysis completed.")
122
+
123
+ # Check security vulnerabilities
124
+ security_analysis = await self._analyze_security(project_name)
125
+ self.logger.info("Security analysis completed.")
126
+
127
+ # Measure performance metrics
128
+ performance_metrics = await self._measure_performance(project_name)
129
+ self.logger.info("Performance metrics measured.")
130
+
131
+ # Determine if requirements are met
132
+ meets_requirements = await self._check_requirements(
133
+ code_metrics,
134
+ test_coverage,
135
+ security_analysis,
136
+ performance_metrics
137
  )
138
+ self.logger.info("Requirements check completed.")
139
+
140
+ return {
141
+ "code_metrics": code_metrics,
142
+ "test_coverage": test_coverage,
143
+ "security_analysis": security_analysis,
144
+ "performance_metrics": performance_metrics,
145
+ "meets_requirements": meets_requirements,
146
+ "timestamp": datetime.now()
147
+ }
148
 
149
+ except Exception as e:
150
+ self.logger.error(f"Error analyzing current state: {str(e)}")
151
+ raise
152
+
153
+ def _analyze_security(self, file_path: str) -> float:
154
+ """Analyzes code security using bandit"""
155
+ try:
156
+ conf = manager.BanditManager()
157
+ conf.discover_files([file_path])
158
+ conf.run_tests()
159
 
160
+ # Calculate security score based on findings
161
+ total_issues = len(conf.get_issue_list())
162
+ max_severity = max((issue.severity for issue in conf.get_issue_list()), default=0)
163
 
164
+ # Normalize security score (0-1 scale, higher is better)
165
+ security_score = 1.0 - (total_issues * max_severity) / 10.0
166
+ return max(0.0, min(1.0, security_score))
 
167
 
168
+ except Exception as e:
169
+ logging.error(f"Security analysis error: {str(e)}")
170
+ return 0.0
171
+
172
+ def _calculate_overall_score(self, pylint_score: float, complexity_score: float,
173
+ coverage_score: float, security_score: float) -> float:
174
+ """Calculates overall code quality score"""
175
+ weights = {
176
+ 'pylint': 0.3,
177
+ 'complexity': 0.2,
178
+ 'coverage': 0.25,
179
+ 'security': 0.25
180
  }
181
 
182
+ overall_score = (
183
+ weights['pylint'] * pylint_score +
184
+ weights['complexity'] * complexity_score +
185
+ weights['coverage'] * coverage_score +
186
+ weights['security'] * security_score
187
+ )
188
+
189
+ return max(0.0, min(1.0, overall_score))
190
 
191
+ def get_metrics_history(self) -> List[Dict[str, Any]]:
192
+ """Returns the history of metrics measurements"""
193
+ return self.metrics_history
 
194
 
195
+ def get_trend_analysis(self) -> Dict[str, Any]:
196
+ """Analyzes trends in metrics over time"""
197
+ if not self.metrics_history:
198
+ return {"status": "No metrics history available"}
199
 
200
+ trends = {
201
+ "quality_score": self._calculate_trend([m["quality_score"] for m in self.metrics_history]),
202
+ "coverage_score": self._calculate_trend([m["coverage_score"] for m in self.metrics_history]),
203
+ "security_score": self._calculate_trend([m["security_score"] for m in self.metrics_history])
204
+ }
205
 
206
+ return trends
207
+
208
+ def _calculate_trend(self, values: List[float]) -> Dict[str, Any]:
209
+ """Calculates trend statistics for a metric"""
210
+ if not values:
211
+ return {"trend": "unknown", "change": 0.0}
212
+
213
+ recent_values = values[-3:] # Look at last 3 measurements
214
+ if len(recent_values) < 2:
215
+ return {"trend": "insufficient data", "change": 0.0}
216
+
217
+ change = recent_values[-1] - recent_values[0]
218
+ trend = "improving" if change > 0 else "declining" if change < 0 else "stable"
219
+
220
+ return {
221
+ "trend": trend,
222
+ "change": change,
223
+ "current": recent_values[-1],
224
+ "previous": recent_values[0]
225
+ }
226
 
227
  class AutonomousAgentApp:
228
  """Main application class for the Autonomous Agent System"""