File size: 1,671 Bytes
f620305
 
4919f63
f620305
 
4919f63
f620305
 
 
 
 
4919f63
f620305
 
 
 
 
 
 
 
 
 
4919f63
f620305
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4919f63
f620305
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
from fastapi import FastAPI, HTTPException, UploadFile, File
from llama_cpp import Llama

# Initialize FastAPI app
app = FastAPI()

# Load the Llama model
try:
    llm = Llama.from_pretrained(
        repo_id="QuantFactory/Lily-Cybersecurity-7B-v0.2-GGUF",
        filename="Lily-Cybersecurity-7B-v0.2.Q3_K_S.gguf",
    )
except Exception as e:
    raise RuntimeError(f"Failed to load model: {e}")

# Define the route for security log analysis with file upload
@app.post("/analyze_security_logs")
async def analyze_security_logs(file: UploadFile = File(...)):
    try:
        # Read the content of the uploaded file
        log_data = await file.read()
        log_data = log_data.decode("utf-8")

        # Security-focused prompt
        prompt = (
            "Analyze the following network log data for any indicators of malicious activity, "
            "such as unusual IP addresses, unauthorized access attempts, data exfiltration, or anomalies. "
            "Provide details on potential threats, IPs involved, and suggest actions if any threats are detected.\n\n"
            f"{log_data}"
        )
        
        # Generate response from the model
        response = llm.create_chat_completion(
            messages=[
                {
                    "role": "user",
                    "content": prompt
                }
            ]
        )
        
        # Extract and return the analysis text
        analysis_text = response["choices"][0]["message"]["content"]
        return {"analysis": analysis_text}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

# To run the app, use: uvicorn app:app --reload