Spaces:
Running
Running
import gradio as gr | |
from huggingface_hub import InferenceClient | |
from transformers import pipeline | |
from typing import List, Dict, Any, Tuple | |
import re | |
# Assuming that `InferenceClient` is initialized properly | |
client = InferenceClient() | |
def is_valid_input(input_str: str) -> bool: | |
""" | |
Validates the input using regex to prevent malicious patterns. | |
Args: | |
input_str (str): The user's input string to be validated. | |
Returns: | |
bool: True if input is safe; False otherwise. | |
""" | |
pattern = r'^[A-Za-z0-9\s,.!?-]*$' # Allows alphanumeric and some punctuation | |
return bool(re.match(pattern, input_str)) | |
def generate_attack( | |
prompt: str, | |
history: List[Tuple[str, str]], | |
) -> List[str]: | |
""" | |
Simulates a Blackhat AI scenario by generating attack strategies and potential impacts. | |
Args: | |
prompt (str): The user's input to the simulator. | |
history (List[Tuple[str, str]]): The user's message history with timestamps, where each tuple contains (user_message, assistant_response). | |
Returns: | |
List[str]: A list of attack responses from the AI. | |
""" | |
if not is_valid_input(prompt): | |
raise ValueError("Invalid input detected. Please use only alphanumeric characters and allowed punctuation.") | |
messages = [ | |
{"role": "system", "content": f"Responding to {prompt}..."} | |
] | |
for user_msg, assistant_msg in history: | |
if user_msg: | |
messages.append({"role": "user", "content": user_msg}) | |
if assistant_msg: | |
messages.append({"role": "assistant", "content": assistant_msg}) | |
messages.append({"role": "user", "content": prompt}) | |
response = "" | |
for message in client.chat_completion( | |
messages, | |
max_tokens=100, # limit the length of responses | |
stream=True, | |
temperature=0.5, # adjust the temperature to improve response quality | |
top_p=None, # disable top-p filtering | |
): | |
token = message.choices[0].delta.content | |
response += token | |
yield response | |
def simulate_attack( | |
prompt: str, | |
history: List[Tuple[str, str]], | |
) -> List[str]: | |
""" | |
Simulates a Blackhat AI scenario by generating attack strategies and potential impacts. | |
Args: | |
prompt (str): The user's input to the simulator. | |
history (List[Tuple[str, str]]): The user's message history with timestamps, where each tuple contains (user_message, assistant_response). | |
Returns: | |
List[str]: A list of attack responses from the AI. | |
""" | |
if not is_valid_input(prompt): | |
raise ValueError("Invalid input detected. Please use only alphanumeric characters and allowed punctuation.") | |
messages = [ | |
{"role": "system", "content": "Simulating a Blackhat AI scenario..."} | |
] | |
for user_msg, assistant_msg in history: | |
if user_msg: | |
messages.append({"role": "user", "content": user_msg}) | |
if assistant_msg: | |
messages.append({"role": "assistant", "content": assistant_msg}) | |
messages.append({"role": "user", "content": prompt}) | |
response = "" | |
for message in client.chat_completion( | |
messages, | |
max_tokens=100, # limit the length of responses | |
stream=True, | |
temperature=0.5, # adjust the temperature to improve response quality | |
top_p=None, # disable top-p filtering | |
): | |
token = message.choices[0].delta.content | |
response += token | |
yield response | |
# Define the Gradio ChatInterface with file upload and security-focused configuration | |
def handle_file_upload(file: Any) -> str: | |
""" | |
Handles file uploads by extracting contents or performing actions based on the file type. | |
Args: | |
file (Any): The uploaded file. | |
Returns: | |
str: A response or summary of the file content. | |
""" | |
if file is None: | |
return "No file uploaded." | |
file_type = file.name.split('.')[-1].lower() | |
if file_type in ['txt', 'json', 'csv']: | |
content = file.read().decode("utf-8") | |
return f"File uploaded: {file.name}, {len(content)} characters." | |
else: | |
return "Unsupported file type. Please upload a .txt, .json, or .csv file." | |
demo = gr.Interface( | |
fn=generate_attack, | |
inputs=[ | |
gr.Textbox(label="User Prompt", placeholder="Enter attack scenario...", lines=2), | |
gr.File(label="Upload File", file_types=["txt", "json", "csv"], file_count="single"), | |
gr.Textbox(value="You are an AI simulator for cybersecurity training, designed to generate attack scenarios, analyze their impacts, and suggest countermeasures.", label="System Message") | |
], | |
outputs="text", | |
title="Blackhat AI Simulator", | |
description=( | |
"This simulator generates adversarial scenarios, analyzes attack vectors, and provides ethical countermeasures. " | |
"Use responsibly for cybersecurity training and awareness." | |
), | |
theme="dark" | |
) | |
if __name__ == "__main__": | |
demo.launch() | |