f5_model_final / app /main.py
EL GHAFRAOUI AYOUB
C'
3b0cd66
raw
history blame
10.3 kB
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import StreamingResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from pydantic import BaseModel
from app.controllers.f5_model import TextGenerationHandler
from typing import List, Optional
import logging
from app.helpers.plan_parser import parse_plan_sections
import json
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.StreamHandler(),
logging.FileHandler('app.log')
]
)
logger = logging.getLogger(__name__)
app = FastAPI(title="F5 Model Test Application")
templates = Jinja2Templates(directory="app/templates")
# Initialize the F5 model
model_handler = TextGenerationHandler()
class ChatMessage(BaseModel):
role: str
content: str
class ChatRequest(BaseModel):
messages: List[ChatMessage]
stream: Optional[bool] = False
class FeatureRequest(BaseModel):
requirements: str
class Feature(BaseModel):
feature: str
short_description: str
class FeaturesResponse(BaseModel):
features: List[Feature]
class ProjectPlanRequest(BaseModel):
project_title: str
requirements: str
features: List[str]
platform: str = "AWS" # Default to AWS
additional_requirements: str = ""
class ProjectSection(BaseModel):
title: str
content: str
class ProjectPlan(BaseModel):
executive_summary: str
scope_objectives: dict
architecture_overview: str
component_design: List[dict]
security_compliance: List[str]
deployment_testing: List[str]
team_roles: List[dict]
cost_estimates: dict
project_phases: List[dict]
@app.get("/")
async def index(request: Request):
return templates.TemplateResponse("index.html", {"request": request})
@app.post("/chat")
async def chat(request: ChatRequest):
try:
logger.info(f"Chat request received with {len(request.messages)} messages")
# Improve the prompt with better context
prompt = (
"You are a helpful AI assistant specializing in SaaS applications and software development. "
"Please provide detailed and professional responses.\n\n"
)
for msg in request.messages:
if msg.role == "user":
prompt += f"[INST] {msg.content} [/INST]\n"
else:
prompt += f"{msg.content}\n"
logger.info(f"Formatted prompt: {prompt}")
if request.stream:
logger.info("Starting streaming response")
async def generate():
async for chunk in model_handler.stream_response(prompt):
logger.debug(f"Streaming chunk: {chunk}")
yield f"data: {chunk}\n\n"
return StreamingResponse(generate(), media_type="text/event-stream")
else:
response = await model_handler.generate_response(prompt)
logger.info(f"Generated response: {response}")
return {"response": response}
except Exception as e:
logger.error(f"Error in chat endpoint: {str(e)}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@app.post("/generate-features", response_model=FeaturesResponse)
async def generate_features(request: FeatureRequest):
try:
logger.info(f"Feature generation request received with requirements: {request.requirements}")
# Improved prompt for better feature generation
prompt = (
"You are a SaaS product expert. Generate 20 practical features for a SaaS application.\n"
"For each feature:\n"
"1. Provide a short, clear feature name\n"
"2. Write a concise description explaining its value\n"
"Format each feature as:\n"
"Feature Name\n"
"Clear description of what the feature does and its benefits.\n\n"
f"Requirements: {request.requirements}\n\n"
"Generate 20 features in this exact format."
)
logger.info(f"Generated prompt: {prompt}")
response = await model_handler.generate_response(prompt)
logger.info(f"Model response: {response}")
# Parse the response into features
features = []
lines = response.split('\n')
current_feature = None
current_description = []
for line in lines:
line = line.strip()
if not line:
if current_feature and current_description:
features.append(Feature(
feature=current_feature,
short_description=' '.join(current_description)
))
current_feature = None
current_description = []
elif not current_feature:
current_feature = line
else:
current_description.append(line)
# Add the last feature if exists
if current_feature and current_description:
features.append(Feature(
feature=current_feature,
short_description=' '.join(current_description)
))
# If no features were parsed, provide fallback features
if not features:
features = [
Feature(
feature="User Management",
short_description="Complete user authentication and authorization system"
),
Feature(
feature="Subscription Billing",
short_description="Automated billing and subscription management"
),
Feature(
feature="Analytics Dashboard",
short_description="Real-time metrics and usage analytics"
),
Feature(
feature="API Integration",
short_description="RESTful API endpoints for third-party integration"
),
Feature(
feature="Multi-tenant Architecture",
short_description="Secure data isolation for multiple customers"
)
]
logger.info(f"Returning features: {features}")
return FeaturesResponse(features=features)
except Exception as e:
logger.error(f"Error in generate-features endpoint: {str(e)}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@app.post("/generate-plan")
async def generate_plan(request: ProjectPlanRequest):
try:
logger.info(f"Plan generation request received for project: {request.project_title}")
async def generate():
features_list = request.features
if features_list and isinstance(features_list[0], dict):
features_str = ', '.join([f.get('feature', '') for f in features_list])
else:
features_str = ', '.join(features_list)
prompt = f"""As a technical project planner, create a detailed project plan for a {request.platform} SaaS application named '{request.project_title}'.
Follow this structure exactly, replacing the placeholders with detailed content:
1. Executive Summary
Brief overview of the {request.project_title} SaaS application
Key business objectives
Technical approach using {request.platform}
Expected outcomes
2. Project Scope and Objectives
Core Features:
- {features_str}
Technical Goals:
- Scalable {request.platform} architecture
- Secure data handling
- High availability
3. Architecture Overview
{request.platform} Components:
- Frontend: [Specify technology]
- Backend: [Specify {request.platform} services]
- Database: [Specify database solution]
- Storage: [Specify storage solution]
- Authentication: [Specify auth service]
4. Component Design
[For each major component, specify:
- Technical specifications
- Integration points
- Performance requirements]
5. Security and Compliance
{request.platform}-specific security measures:
- Data encryption
- Access controls
- Compliance requirements
6. Deployment Strategy
- CI/CD pipeline
- Testing approach
- Monitoring setup
- {request.platform} specific considerations
7. Team Requirements
- Required roles
- Technical skills
- Team structure
8. Cost Estimation
- {request.platform} service costs
- Development costs
- Operational costs
9. Project Timeline
Phase 1: Setup and Infrastructure
Phase 2: Core Development
Phase 3: Testing and Deployment
Phase 4: Launch and Monitoring
Additional Context:
Requirements: {request.requirements}
Additional Requirements: {request.additional_requirements}"""
response_content = ""
async for chunk in model_handler.stream_response(prompt):
if chunk["type"] == "content":
response_content = chunk["content"]
sections = parse_plan_sections(response_content)
# The `yield` keyword in Python is used in generator functions to return a value
# to the caller and temporarily suspend the function's execution. In the context
# of asynchronous programming, `yield` is used to create asynchronous generators
# that can produce values asynchronously.
yield f"""data: {json.dumps({
'type': 'complete',
'project_title': request.project_title,
'sections': sections,
'raw_content': response_content
})}\n\n"""
elif chunk["type"] == "error":
yield f"data: {json.dumps({
'type': 'error',
'content': chunk['content']
})}\n\n"
return StreamingResponse(generate(), media_type="text/event-stream")
except Exception as e:
logger.error(f"Error generating plan: {str(e)}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)