saifeddinemk commited on
Commit
66279bf
1 Parent(s): f9436bc

Fixed app v2

Browse files
Files changed (1) hide show
  1. app.py +26 -35
app.py CHANGED
@@ -1,4 +1,4 @@
1
- from fastapi import FastAPI, HTTPException, UploadFile, File
2
  from pydantic import BaseModel
3
  from llama_cpp import Llama
4
 
@@ -14,48 +14,39 @@ try:
14
  except Exception as e:
15
  raise RuntimeError(f"Failed to load model: {e}")
16
 
 
 
 
 
17
  # Define response model
18
  class AnalysisResponse(BaseModel):
19
  analysis: str
20
 
21
- # Define the route for security log analysis with file upload
22
  @app.post("/analyze_security_logs", response_model=AnalysisResponse)
23
- async def analyze_security_logs(file: UploadFile = File(...)):
24
  try:
25
- # Read the content of the uploaded file
26
- log_data = await file.read()
27
- log_data = log_data.decode("utf-8")
28
-
29
- # Set maximum tokens for each chunk
30
- max_chunk_size = 512 # Adjust this based on model's token limit
31
- log_chunks = [log_data[i:i+max_chunk_size] for i in range(0, len(log_data), max_chunk_size)]
32
 
33
- # Accumulate the results
34
- full_analysis = ""
35
- for chunk in log_chunks:
36
- # Security-focused prompt with each chunk
37
- prompt = (
38
- "Analyze the following network log data for any indicators of malicious activity, "
39
- "such as unusual IP addresses, unauthorized access attempts, data exfiltration, or anomalies. "
40
- "Provide details on potential threats, IPs involved, and suggest actions if any threats are detected.\n\n"
41
- f"{chunk}"
42
- )
43
-
44
- # Generate response from the model
45
- response = llm.create_chat_completion(
46
- messages=[
47
- {
48
- "role": "user",
49
- "content": prompt
50
- }
51
- ]
52
- )
53
-
54
- # Extract and accumulate analysis text
55
- analysis_text = response["choices"][0]["message"]["content"]
56
- full_analysis += analysis_text + "\n"
57
 
58
- return AnalysisResponse(analysis=full_analysis)
 
 
59
  except Exception as e:
60
  raise HTTPException(status_code=500, detail=str(e))
61
 
 
1
+ from fastapi import FastAPI, HTTPException
2
  from pydantic import BaseModel
3
  from llama_cpp import Llama
4
 
 
14
  except Exception as e:
15
  raise RuntimeError(f"Failed to load model: {e}")
16
 
17
+ # Define request model for log data
18
+ class LogRequest(BaseModel):
19
+ log_data: str
20
+
21
  # Define response model
22
  class AnalysisResponse(BaseModel):
23
  analysis: str
24
 
25
+ # Define the route for security log analysis
26
  @app.post("/analyze_security_logs", response_model=AnalysisResponse)
27
+ async def analyze_security_logs(request: LogRequest):
28
  try:
29
+ # Security-focused prompt
30
+ prompt = (
31
+ "Analyze the following network log data for any indicators of malicious activity, "
32
+ "such as unusual IP addresses, unauthorized access attempts, data exfiltration, or anomalies. "
33
+ "Provide details on potential threats, IPs involved, and suggest actions if any threats are detected.\n\n"
34
+ f"{request.log_data}"
35
+ )
36
 
37
+ # Generate response from the model
38
+ response = llm.create_chat_completion(
39
+ messages=[
40
+ {
41
+ "role": "user",
42
+ "content": prompt
43
+ }
44
+ ]
45
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
+ # Extract and return the analysis text
48
+ analysis_text = response["choices"][0]["message"]["content"]
49
+ return AnalysisResponse(analysis=analysis_text)
50
  except Exception as e:
51
  raise HTTPException(status_code=500, detail=str(e))
52