Update app.py
Browse files
app.py
CHANGED
@@ -209,6 +209,8 @@ gemini = os.getenv("GEMINI")
|
|
209 |
genai.configure(api_key=gemini)
|
210 |
model = genai.GenerativeModel("gemini-1.5-flash")
|
211 |
|
|
|
|
|
212 |
def read_project_files(project_path):
|
213 |
"""Reads all files in the project directory and its subdirectories."""
|
214 |
file_paths = []
|
@@ -270,17 +272,13 @@ def identify_required_functions(project_path, functionality_description):
|
|
270 |
return response.text
|
271 |
|
272 |
|
273 |
-
#
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
torch_dtype="auto",
|
278 |
-
device_map="auto"
|
279 |
-
)
|
280 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
281 |
|
282 |
-
def validate_and_generate_documentation(
|
283 |
-
"""Uses
|
284 |
# Generate the prompt for the Qwen model
|
285 |
prompt = f"""
|
286 |
User-specified functionality: '{functionality_description}'
|
@@ -290,7 +288,7 @@ def validate_and_generate_documentation(hf_model, tokenizer, gemini_output, file
|
|
290 |
Project files:
|
291 |
"""
|
292 |
for file_path, content in file_contents.items():
|
293 |
-
# Truncate content to avoid
|
294 |
truncated_content = content[:1000] if len(content) > 1000 else content
|
295 |
prompt += f"File: {os.path.basename(file_path)}\n{truncated_content}\n\n"
|
296 |
|
@@ -318,30 +316,15 @@ def validate_and_generate_documentation(hf_model, tokenizer, gemini_output, file
|
|
318 |
- Data structures: <Details of data structures used>
|
319 |
"""
|
320 |
|
321 |
-
#
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
)
|
331 |
-
model_inputs = tokenizer([text], return_tensors="pt", truncation=True, max_length=32768).to(hf_model.device)
|
332 |
-
|
333 |
-
# Generate output from the model
|
334 |
-
generated_ids = hf_model.generate(
|
335 |
-
**model_inputs,
|
336 |
-
max_new_tokens=2048
|
337 |
-
)
|
338 |
-
generated_ids = [
|
339 |
-
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|
340 |
-
]
|
341 |
-
|
342 |
-
# Decode and return the response
|
343 |
-
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
344 |
-
return response
|
345 |
|
346 |
def generate_documentation_page():
|
347 |
st.subheader(f"Generate Documentation for {st.session_state.current_project}")
|
@@ -368,9 +351,9 @@ def generate_documentation_page():
|
|
368 |
file_paths = read_project_files(project_folder)
|
369 |
file_contents = read_files(file_paths)
|
370 |
|
371 |
-
# Call Hugging Face
|
372 |
final_documentation = validate_and_generate_documentation(
|
373 |
-
|
374 |
)
|
375 |
|
376 |
# Display the final documentation
|
|
|
209 |
genai.configure(api_key=gemini)
|
210 |
model = genai.GenerativeModel("gemini-1.5-flash")
|
211 |
|
212 |
+
|
213 |
+
|
214 |
def read_project_files(project_path):
|
215 |
"""Reads all files in the project directory and its subdirectories."""
|
216 |
file_paths = []
|
|
|
272 |
return response.text
|
273 |
|
274 |
|
275 |
+
# Hugging Face Inference API endpoint for the model
|
276 |
+
API_URL = "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B-Instruct"
|
277 |
+
qwen = os.getenv("QWEN")
|
278 |
+
headers = {"Authorization": f"Bearer {qwen}"}
|
|
|
|
|
|
|
|
|
279 |
|
280 |
+
def validate_and_generate_documentation(api_url, headers, gemini_output, file_contents, functionality_description):
|
281 |
+
"""Uses the Hugging Face Inference API to validate functions and generate documentation."""
|
282 |
# Generate the prompt for the Qwen model
|
283 |
prompt = f"""
|
284 |
User-specified functionality: '{functionality_description}'
|
|
|
288 |
Project files:
|
289 |
"""
|
290 |
for file_path, content in file_contents.items():
|
291 |
+
# Truncate content to avoid excessively large payloads
|
292 |
truncated_content = content[:1000] if len(content) > 1000 else content
|
293 |
prompt += f"File: {os.path.basename(file_path)}\n{truncated_content}\n\n"
|
294 |
|
|
|
316 |
- Data structures: <Details of data structures used>
|
317 |
"""
|
318 |
|
319 |
+
# Send the prompt to the Hugging Face API
|
320 |
+
payload = {"inputs": prompt}
|
321 |
+
response = requests.post(api_url, headers=headers, json=payload)
|
322 |
+
|
323 |
+
# Handle the API response
|
324 |
+
if response.status_code == 200:
|
325 |
+
return response.json().get("generated_text", "No output generated.")
|
326 |
+
else:
|
327 |
+
raise ValueError(f"Error: {response.status_code}, {response.text}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
328 |
|
329 |
def generate_documentation_page():
|
330 |
st.subheader(f"Generate Documentation for {st.session_state.current_project}")
|
|
|
351 |
file_paths = read_project_files(project_folder)
|
352 |
file_contents = read_files(file_paths)
|
353 |
|
354 |
+
# Call the Hugging Face API for validation and documentation
|
355 |
final_documentation = validate_and_generate_documentation(
|
356 |
+
API_URL, headers, gemini_result, file_contents, functionality
|
357 |
)
|
358 |
|
359 |
# Display the final documentation
|