Spaces:
Running
Running
Commit
·
481806b
1
Parent(s):
3514b7e
Fixing errors
Browse files- gradio_app.py +8 -39
- model/analyzer.py +7 -6
gradio_app.py
CHANGED
@@ -1,45 +1,14 @@
|
|
1 |
import gradio as gr
|
2 |
from model.analyzer import analyze_content
|
3 |
-
import time # For simulating the loading bar
|
4 |
|
5 |
-
#
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
|
14 |
-
# Create the Gradio interface
|
15 |
-
# Create the Gradio interface
|
16 |
-
with gr.Blocks(css=".center-text {text-align: center;} .gradient-bg {background: linear-gradient(135deg, #ff9a9e, #fad0c4);}") as iface:
|
17 |
-
# Header with centered text
|
18 |
-
gr.Markdown(
|
19 |
-
"""
|
20 |
-
<div class="center-text">
|
21 |
-
<h1><b>TREAT</b></h1>
|
22 |
-
<h3><b>Trigger Recognition for Enjoyable and Appropriate Television</b></h3>
|
23 |
-
</div>
|
24 |
-
""",
|
25 |
-
elem_classes="gradient-bg"
|
26 |
-
)
|
27 |
-
|
28 |
-
# Input Section
|
29 |
-
script_input = gr.Textbox(lines=8, label="Input Text", placeholder="Paste your script here...")
|
30 |
-
analyze_button = gr.Button("Analyze Content")
|
31 |
-
|
32 |
-
# Loading Bar and Results
|
33 |
-
loading_bar = gr.Textbox(label="Progress", interactive=False)
|
34 |
-
results_output = gr.JSON(label="Results")
|
35 |
-
|
36 |
-
# Connect the button to the function
|
37 |
-
analyze_button.click(
|
38 |
-
fn=analyze_with_loading,
|
39 |
-
inputs=script_input,
|
40 |
-
outputs=[loading_bar, results_output],
|
41 |
-
)
|
42 |
-
|
43 |
-
# Launch the app
|
44 |
if __name__ == "__main__":
|
45 |
iface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from model.analyzer import analyze_content
|
|
|
3 |
|
4 |
+
# Create and launch the Gradio interface
|
5 |
+
iface = gr.Interface(
|
6 |
+
fn=analyze_content,
|
7 |
+
inputs=gr.Textbox(lines=8, label="Input Text"),
|
8 |
+
outputs=gr.JSON(),
|
9 |
+
title="Content Analysis",
|
10 |
+
description="Analyze text content for sensitive topics"
|
11 |
+
)
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
if __name__ == "__main__":
|
14 |
iface.launch()
|
model/analyzer.py
CHANGED
@@ -25,7 +25,7 @@ def analyze_script(script):
|
|
25 |
# Load model with token authentication
|
26 |
model = AutoModelForCausalLM.from_pretrained(
|
27 |
"meta-llama/Llama-3.2-1B",
|
28 |
-
|
29 |
torch_dtype=torch.float16 if device == "cuda" else torch.float32, # Use 16-bit precision for CUDA, 32-bit for CPU
|
30 |
device_map="auto" # Automatically map model to available device
|
31 |
)
|
@@ -188,11 +188,12 @@ def analyze_script(script):
|
|
188 |
|
189 |
return final_triggers
|
190 |
|
|
|
191 |
def analyze_content(script):
|
192 |
-
#
|
193 |
triggers = analyze_script(script)
|
194 |
-
|
195 |
-
# Define the result based on triggers found
|
196 |
if isinstance(triggers, list) and triggers != ["None"]:
|
197 |
result = {
|
198 |
"detected_triggers": triggers,
|
@@ -207,7 +208,7 @@ def analyze_content(script):
|
|
207 |
"model": "Llama-3.2-1B",
|
208 |
"analysis_timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
209 |
}
|
210 |
-
|
211 |
print("\nFinal Result Dictionary:", result)
|
212 |
return result
|
213 |
|
@@ -221,4 +222,4 @@ iface = gr.Interface(
|
|
221 |
)
|
222 |
|
223 |
if __name__ == "__main__":
|
224 |
-
iface.launch()
|
|
|
25 |
# Load model with token authentication
|
26 |
model = AutoModelForCausalLM.from_pretrained(
|
27 |
"meta-llama/Llama-3.2-1B",
|
28 |
+
token=hf_token, # Pass the token to authenticate
|
29 |
torch_dtype=torch.float16 if device == "cuda" else torch.float32, # Use 16-bit precision for CUDA, 32-bit for CPU
|
30 |
device_map="auto" # Automatically map model to available device
|
31 |
)
|
|
|
188 |
|
189 |
return final_triggers
|
190 |
|
191 |
+
# Define the Gradio interface
|
192 |
def analyze_content(script):
|
193 |
+
# Perform the analysis on the input script using the analyze_script function
|
194 |
triggers = analyze_script(script)
|
195 |
+
|
196 |
+
# Define the result based on the triggers found
|
197 |
if isinstance(triggers, list) and triggers != ["None"]:
|
198 |
result = {
|
199 |
"detected_triggers": triggers,
|
|
|
208 |
"model": "Llama-3.2-1B",
|
209 |
"analysis_timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
210 |
}
|
211 |
+
|
212 |
print("\nFinal Result Dictionary:", result)
|
213 |
return result
|
214 |
|
|
|
222 |
)
|
223 |
|
224 |
if __name__ == "__main__":
|
225 |
+
iface.launch()
|