saifeddinemk commited on
Commit
3588fbc
1 Parent(s): dc39bf5

Fixed app v2

Browse files
Files changed (1) hide show
  1. app.py +6 -18
app.py CHANGED
@@ -1,19 +1,7 @@
1
- from transformers import AutoTokenizer, AutoModelForCausalLM
2
- import torch
3
 
4
- # Load the tokenizer and model
5
- tokenizer = AutoTokenizer.from_pretrained("ZySec-AI/SecurityLLM")
6
- model = AutoModelForCausalLM.from_pretrained("ZySec-AI/SecurityLLM")
7
-
8
- # Example prompt for generating a response related to security
9
- security_prompt = "Analyze the following network log for potential security issues: 2024-11-08 12:30:00 SRC_IP=192.168.1.1 DEST_IP=10.0.0.5 PROTOCOL=TCP PACKET_SIZE=1500 SRC_PORT=443 DEST_PORT=80"
10
-
11
- # Tokenize the prompt
12
- inputs = tokenizer(security_prompt, return_tensors="pt")
13
-
14
- # Generate a response from the model
15
- output = model.generate(inputs['input_ids'], max_length=150, num_return_sequences=1, no_repeat_ngram_size=2)
16
-
17
- # Decode and print the generated text
18
- generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
19
- print("Generated Response:\n", generated_text)
 
1
+ from transformers import pipeline
 
2
 
3
+ messages = [
4
+ {"role": "user", "content": "Who are you?"},
5
+ ]
6
+ pipe = pipeline("text-generation", model="segolilylabs/Lily-Cybersecurity-7B-v0.2")
7
+ pipe(messages)