Spaces:
Sleeping
Sleeping
saifeddinemk
commited on
Commit
•
dc39bf5
1
Parent(s):
6b101f4
Fixed app v2
Browse files
app.py
CHANGED
@@ -1,3 +1,19 @@
|
|
1 |
-
import
|
|
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
+
import torch
|
3 |
|
4 |
+
# Load the tokenizer and model
|
5 |
+
tokenizer = AutoTokenizer.from_pretrained("ZySec-AI/SecurityLLM")
|
6 |
+
model = AutoModelForCausalLM.from_pretrained("ZySec-AI/SecurityLLM")
|
7 |
+
|
8 |
+
# Example prompt for generating a response related to security
|
9 |
+
security_prompt = "Analyze the following network log for potential security issues: 2024-11-08 12:30:00 SRC_IP=192.168.1.1 DEST_IP=10.0.0.5 PROTOCOL=TCP PACKET_SIZE=1500 SRC_PORT=443 DEST_PORT=80"
|
10 |
+
|
11 |
+
# Tokenize the prompt
|
12 |
+
inputs = tokenizer(security_prompt, return_tensors="pt")
|
13 |
+
|
14 |
+
# Generate a response from the model
|
15 |
+
output = model.generate(inputs['input_ids'], max_length=150, num_return_sequences=1, no_repeat_ngram_size=2)
|
16 |
+
|
17 |
+
# Decode and print the generated text
|
18 |
+
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
19 |
+
print("Generated Response:\n", generated_text)
|