Spaces:
Sleeping
Sleeping
from fastapi import FastAPI, HTTPException, UploadFile, File | |
from llama_cpp import Llama | |
# Initialize FastAPI app | |
app = FastAPI() | |
# Load the Llama model | |
try: | |
llm = Llama.from_pretrained( | |
repo_id="QuantFactory/Lily-Cybersecurity-7B-v0.2-GGUF", | |
filename="Lily-Cybersecurity-7B-v0.2.Q3_K_S.gguf", | |
) | |
except Exception as e: | |
raise RuntimeError(f"Failed to load model: {e}") | |
# Define the route for security log analysis with file upload | |
async def analyze_security_logs(file: UploadFile = File(...)): | |
try: | |
# Read the content of the uploaded file | |
log_data = await file.read() | |
log_data = log_data.decode("utf-8") | |
# Security-focused prompt | |
prompt = ( | |
"Analyze the following network log data for any indicators of malicious activity, " | |
"such as unusual IP addresses, unauthorized access attempts, data exfiltration, or anomalies. " | |
"Provide details on potential threats, IPs involved, and suggest actions if any threats are detected.\n\n" | |
f"{log_data}" | |
) | |
# Generate response from the model | |
response = llm.create_chat_completion( | |
messages=[ | |
{ | |
"role": "user", | |
"content": prompt | |
} | |
] | |
) | |
# Extract and return the analysis text | |
analysis_text = response["choices"][0]["message"]["content"] | |
return {"analysis": analysis_text} | |
except Exception as e: | |
raise HTTPException(status_code=500, detail=str(e)) | |
# To run the app, use: uvicorn app:app --reload | |