File size: 5,045 Bytes
37e3169
 
2f22f14
5b52a89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37e3169
2f22f14
 
5b52a89
2f22f14
 
 
 
 
 
5b52a89
2f22f14
 
 
 
5b52a89
 
 
2f22f14
5b52a89
2f22f14
5b52a89
 
 
 
 
 
2f22f14
 
5b52a89
2f22f14
 
 
 
 
 
 
 
 
 
 
37e3169
58412c3
2f22f14
5b52a89
2f22f14
58412c3
2f22f14
 
 
 
5b52a89
2f22f14
 
 
58412c3
5b52a89
 
 
58412c3
5b52a89
58412c3
5b52a89
 
 
 
 
 
37e3169
58412c3
5b52a89
37e3169
 
 
2f22f14
37e3169
2f22f14
 
37e3169
 
 
 
 
5b52a89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37e3169
5b52a89
58412c3
 
5b52a89
 
58412c3
5b52a89
37e3169
 
 
58412c3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import gradio as gr
from huggingface_hub import InferenceClient
from transformers import pipeline
from typing import List, Dict, Any, Tuple
import re

# Assuming that `InferenceClient` is initialized properly
client = InferenceClient()

def is_valid_input(input_str: str) -> bool:
    """
    Validates the input using regex to prevent malicious patterns.
    
    Args:
        input_str (str): The user's input string to be validated.
    
    Returns:
        bool: True if input is safe; False otherwise.
    """
    pattern = r'^[A-Za-z0-9\s,.!?-]*$'  # Allows alphanumeric and some punctuation
    return bool(re.match(pattern, input_str))

def generate_attack(
    prompt: str,
    history: List[Tuple[str, str]],
) -> List[str]:
    """
    Simulates a Blackhat AI scenario by generating attack strategies and potential impacts.
    
    Args:
        prompt (str): The user's input to the simulator.
        history (List[Tuple[str, str]]): The user's message history with timestamps, where each tuple contains (user_message, assistant_response).
    
    Returns:
        List[str]: A list of attack responses from the AI.
    """
    if not is_valid_input(prompt):
        raise ValueError("Invalid input detected. Please use only alphanumeric characters and allowed punctuation.")
    
    messages = [
        {"role": "system", "content": f"Responding to {prompt}..."}
    ]
    
    for user_msg, assistant_msg in history:
        if user_msg:
            messages.append({"role": "user", "content": user_msg})
        if assistant_msg:
            messages.append({"role": "assistant", "content": assistant_msg})

    messages.append({"role": "user", "content": prompt})
    
    response = ""
    for message in client.chat_completion(
        messages,
        max_tokens=100,  # limit the length of responses
        stream=True,
        temperature=0.5,  # adjust the temperature to improve response quality
        top_p=None,      # disable top-p filtering
    ):
        token = message.choices[0].delta.content
        response += token
        yield response

def simulate_attack(
    prompt: str,
    history: List[Tuple[str, str]],
) -> List[str]:
    """
    Simulates a Blackhat AI scenario by generating attack strategies and potential impacts.
    
    Args:
        prompt (str): The user's input to the simulator.
        history (List[Tuple[str, str]]): The user's message history with timestamps, where each tuple contains (user_message, assistant_response).
    
    Returns:
        List[str]: A list of attack responses from the AI.
    """
    if not is_valid_input(prompt):
        raise ValueError("Invalid input detected. Please use only alphanumeric characters and allowed punctuation.")
    
    messages = [
        {"role": "system", "content": "Simulating a Blackhat AI scenario..."}
    ]
    
    for user_msg, assistant_msg in history:
        if user_msg:
            messages.append({"role": "user", "content": user_msg})
        if assistant_msg:
            messages.append({"role": "assistant", "content": assistant_msg})

    messages.append({"role": "user", "content": prompt})
    
    response = ""
    for message in client.chat_completion(
        messages,
        max_tokens=100,  # limit the length of responses
        stream=True,
        temperature=0.5,  # adjust the temperature to improve response quality
        top_p=None,      # disable top-p filtering
    ):
        token = message.choices[0].delta.content
        response += token
        yield response

# Define the Gradio ChatInterface with file upload and security-focused configuration
def handle_file_upload(file: Any) -> str:
    """
    Handles file uploads by extracting contents or performing actions based on the file type.
    
    Args:
        file (Any): The uploaded file.
    
    Returns:
        str: A response or summary of the file content.
    """
    if file is None:
        return "No file uploaded."
    
    file_type = file.name.split('.')[-1].lower()
    if file_type in ['txt', 'json', 'csv']:
        content = file.read().decode("utf-8")
        return f"File uploaded: {file.name}, {len(content)} characters."
    else:
        return "Unsupported file type. Please upload a .txt, .json, or .csv file."

demo = gr.Interface(
    fn=generate_attack,
    inputs=[
        gr.Textbox(label="User Prompt", placeholder="Enter attack scenario...", lines=2),
        gr.File(label="Upload File", file_types=["txt", "json", "csv"], file_count="single"),
        gr.Textbox(value="You are an AI simulator for cybersecurity training, designed to generate attack scenarios, analyze their impacts, and suggest countermeasures.", label="System Message")
    ],
    outputs="text",
    title="Blackhat AI Simulator",
    description=(
        "This simulator generates adversarial scenarios, analyzes attack vectors, and provides ethical countermeasures. "
        "Use responsibly for cybersecurity training and awareness."
    ),
    theme="dark"
)

if __name__ == "__main__":
    demo.launch()