Update app.py
Browse files
app.py
CHANGED
@@ -225,8 +225,6 @@ gemini = os.getenv("GEMINI")
|
|
225 |
genai.configure(api_key=gemini)
|
226 |
model = genai.GenerativeModel("gemini-1.5-flash")
|
227 |
|
228 |
-
|
229 |
-
|
230 |
def read_project_files(project_path):
|
231 |
"""Reads all files in the project directory and its subdirectories."""
|
232 |
file_paths = []
|
@@ -298,6 +296,69 @@ def identify_required_functions(project_path, functionality_description):
|
|
298 |
# Process and return the response
|
299 |
return response.text
|
300 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
301 |
|
302 |
def extract_cleaned_gemini_output(gemini_output):
|
303 |
"""
|
@@ -330,14 +391,12 @@ def extract_cleaned_gemini_output(gemini_output):
|
|
330 |
return "\n".join(line for line in cleaned_output if line)
|
331 |
|
332 |
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
def split_into_chunks(content, chunk_size=1000):
|
338 |
"""Splits large content into smaller chunks."""
|
339 |
return [content[i:i + chunk_size] for i in range(0, len(content), chunk_size)]
|
340 |
|
|
|
|
|
341 |
def generate_detailed_documentation(file_contents, functionality_description):
|
342 |
"""
|
343 |
Generates detailed documentation using Gemini directly.
|
|
|
225 |
genai.configure(api_key=gemini)
|
226 |
model = genai.GenerativeModel("gemini-1.5-flash")
|
227 |
|
|
|
|
|
228 |
def read_project_files(project_path):
|
229 |
"""Reads all files in the project directory and its subdirectories."""
|
230 |
file_paths = []
|
|
|
296 |
# Process and return the response
|
297 |
return response.text
|
298 |
|
299 |
+
def generate_qwen_documentation(function_list, project_path, functionality_description):
|
300 |
+
"""
|
301 |
+
Iteratively generates documentation for functions using Qwen and ensures all are fully documented.
|
302 |
+
Args:
|
303 |
+
function_list (list): Initial list of functions related to the specified functionality.
|
304 |
+
project_path (str): Path to the project directory.
|
305 |
+
functionality_description (str): Description of the functionality.
|
306 |
+
Returns:
|
307 |
+
dict: A dictionary with Qwen-generated documentation for each function.
|
308 |
+
"""
|
309 |
+
completed_documentation = {}
|
310 |
+
remaining_functions = function_list
|
311 |
+
|
312 |
+
while remaining_functions:
|
313 |
+
# Generate documentation for remaining functions using Qwen
|
314 |
+
qwen_outputs = query_qwen_for_documentation(remaining_functions, project_path)
|
315 |
+
|
316 |
+
# Clean and validate Qwen's output using Gemini
|
317 |
+
completed, truncated = clean_and_validate_qwen_output(qwen_outputs, remaining_functions)
|
318 |
+
|
319 |
+
# Update documentation and determine remaining functions
|
320 |
+
completed_documentation.update(completed)
|
321 |
+
remaining_functions = truncated # Retry only truncated or missing functions
|
322 |
+
|
323 |
+
return completed_documentation
|
324 |
+
|
325 |
+
def query_qwen_for_documentation(function_list, project_path):
|
326 |
+
"""
|
327 |
+
Queries Hugging Face API to generate documentation for functions.
|
328 |
+
Args:
|
329 |
+
function_list (list): List of functions for documentation.
|
330 |
+
project_path (str): Path to the project directory.
|
331 |
+
Returns:
|
332 |
+
dict: Hugging Face API's output for each function.
|
333 |
+
"""
|
334 |
+
|
335 |
+
passkey = os.getenv("QWEN") # Hugging Face API key from environment variables
|
336 |
+
api_url = "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-14B-Instruct"
|
337 |
+
headers = {"Authorization": f"Bearer {passkey}"}
|
338 |
+
|
339 |
+
outputs = {}
|
340 |
+
for file_name, function in function_list:
|
341 |
+
prompt = f"Generate detailed documentation for the following function in file `{file_name}`:\n{function}"
|
342 |
+
payload = {"inputs": prompt, "parameters": {"max_length": 1024, "truncate": True}}
|
343 |
+
|
344 |
+
# Send the request to the Hugging Face API
|
345 |
+
try:
|
346 |
+
response = requests.post(api_url, headers=headers, json=payload)
|
347 |
+
response.raise_for_status()
|
348 |
+
result = response.json()
|
349 |
+
|
350 |
+
# Extract generated text from the API response
|
351 |
+
if "generated_text" in result:
|
352 |
+
outputs[(file_name, function)] = {"prompt": prompt, "output": result["generated_text"]}
|
353 |
+
else:
|
354 |
+
st.error(f"No generated text for function: {function}. Response: {result}")
|
355 |
+
|
356 |
+
except requests.exceptions.RequestException as e:
|
357 |
+
st.error(f"Error querying Hugging Face API for function {function}: {e}")
|
358 |
+
outputs[(file_name, function)] = {"prompt": prompt, "output": None}
|
359 |
+
|
360 |
+
return outputs
|
361 |
+
|
362 |
|
363 |
def extract_cleaned_gemini_output(gemini_output):
|
364 |
"""
|
|
|
391 |
return "\n".join(line for line in cleaned_output if line)
|
392 |
|
393 |
|
|
|
|
|
|
|
|
|
394 |
def split_into_chunks(content, chunk_size=1000):
|
395 |
"""Splits large content into smaller chunks."""
|
396 |
return [content[i:i + chunk_size] for i in range(0, len(content), chunk_size)]
|
397 |
|
398 |
+
|
399 |
+
|
400 |
def generate_detailed_documentation(file_contents, functionality_description):
|
401 |
"""
|
402 |
Generates detailed documentation using Gemini directly.
|