Canstralian commited on
Commit
eb17996
·
verified ·
1 Parent(s): 76ca6c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -53
app.py CHANGED
@@ -1,60 +1,62 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
- import time
4
- import random
5
 
6
- # Load the models and tokenizer
7
- model_name = "Canstralian/text2shellcommands" # Choose your model, can be changed based on use case
8
- tokenizer = AutoTokenizer.from_pretrained(model_name)
9
- model = AutoModelForCausalLM.from_pretrained(model_name)
10
 
11
- # Function to generate shell command or response based on the prompt
12
- def generate_shell_command(prompt):
13
- inputs = tokenizer(prompt, return_tensors="pt")
14
- outputs = model.generate(**inputs, max_length=50, num_return_sequences=1)
15
- command = tokenizer.decode(outputs[0], skip_special_tokens=True)
16
- return command
 
 
17
 
18
- # Function to simulate a retro terminal environment with some 90s hacker vibe
19
- def terminal_ui(prompt):
20
- # Simulate typing effect
21
- fake_typing_effect = [
22
- "Initializing...\n",
23
- "Boot sequence complete...\n",
24
- "Connecting to secure network...\n",
25
- "Accessing restricted files...\n",
26
- "Running diagnostics...\n",
27
- "Command input: " + prompt + "\n"
28
- ]
29
-
30
- # Adding some suspense and random time delays to create that 'hacker' feel
31
- for line in fake_typing_effect:
32
- time.sleep(random.uniform(0.5, 1.5)) # Simulate typing delay
33
- print(line) # Simulate print to terminal
34
- time.sleep(0.3)
35
-
36
- # Get AI-generated response for the command prompt
37
- command_response = generate_shell_command(prompt)
38
-
39
- # Simulate result display with some retro terminal feedback
40
- result_output = f"\n[ SYSTEM STATUS: OK ]\n[ {random.choice(['OK', 'ERROR', 'WARNING'])} ]\n\n"
41
- result_output += f"Command executed: {command_response}\n"
42
- result_output += "[ End of output ]"
43
-
44
- return result_output
45
 
46
- # Create a Gradio interface with a retro terminal design
47
- def retro_terminal_interface(prompt):
48
- result = terminal_ui(prompt)
49
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
- # Launch the Gradio app with a terminal theme
52
- iface = gr.Interface(
53
- fn=retro_terminal_interface,
54
- inputs=gr.Textbox(placeholder="Type your shell command here...", label="Enter Command:"),
55
- outputs=gr.Textbox(label="Terminal Output", lines=20, interactive=False),
56
- theme="compact", # Use Gradio's built-in compact theme for a terminal-like feel
57
- live=True # Enable live feedback to simulate a real-time terminal experience
58
- )
59
 
60
- iface.launch()
 
 
1
  import gradio as gr
2
+ import pandas as pd
3
+ from utils.dataset_loader import load_dataset
 
4
 
5
+ # Load dataset
6
+ df = load_dataset()
 
 
7
 
8
+ # Function to simulate conversation with model selection
9
+ def chat_interface(user_input, selected_model, prompt_id=None):
10
+ if df is not None and prompt_id is not None:
11
+ prompt = df.iloc[prompt_id]["prompt_text"] # Replace with the actual column name
12
+ response = f"[{selected_model}] used the debugging prompt: '{prompt}'.\nUser said: '{user_input}'\nResponse: Simulated output."
13
+ else:
14
+ response = f"[{selected_model}] says: You entered '{user_input}'. This is a simulated response."
15
+ return response
16
 
17
+ # List of available models (Updated as per your request)
18
+ models = ["Canstralian/text2shellcommands", "Canstralian/RabbitRedux", "Canstralian/CySec_Known_Exploit_Analyzer"]
19
+ prompt_ids = df.index.tolist() if df is not None else []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
+ # Gradio Interface
22
+ with gr.Blocks(css="./static/styles.css") as demo:
23
+ with gr.Row():
24
+ gr.Markdown("### Retro Hacker Chat with Debugging Prompts", elem_classes="retro-terminal")
25
+ with gr.Row():
26
+ user_input = gr.Textbox(
27
+ label="Enter your message:",
28
+ placeholder="Type your message here...",
29
+ elem_classes="retro-terminal"
30
+ )
31
+ model_selector = gr.Dropdown(
32
+ choices=models,
33
+ label="Select Model",
34
+ value=models[0],
35
+ elem_classes="retro-terminal"
36
+ )
37
+ if prompt_ids:
38
+ prompt_selector = gr.Dropdown(
39
+ choices=prompt_ids,
40
+ label="Select Debugging Prompt ID",
41
+ value=prompt_ids[0],
42
+ elem_classes="retro-terminal"
43
+ )
44
+ else:
45
+ prompt_selector = None
46
+ with gr.Row():
47
+ response_box = gr.Textbox(
48
+ label="Model Response:",
49
+ placeholder="The model's response will appear here...",
50
+ elem_classes="retro-terminal"
51
+ )
52
+ with gr.Row():
53
+ send_button = gr.Button("Send", elem_classes="retro-terminal")
54
 
55
+ # Link input and output
56
+ if prompt_selector:
57
+ send_button.click(chat_interface, inputs=[user_input, model_selector, prompt_selector], outputs=response_box)
58
+ else:
59
+ send_button.click(chat_interface, inputs=[user_input, model_selector], outputs=response_box)
 
 
 
60
 
61
+ # Launch the interface
62
+ demo.launch()