Spaces:
Running
Running
Commit
·
11e3d09
1
Parent(s):
a7a10f6
Trying the 3B model
Browse files- model/analyzer.py +4 -4
model/analyzer.py
CHANGED
@@ -33,7 +33,7 @@ class ContentAnalyzer:
|
|
33 |
|
34 |
print("Loading tokenizer...")
|
35 |
self.tokenizer = AutoTokenizer.from_pretrained(
|
36 |
-
"meta-llama/Llama-3.2-
|
37 |
use_fast=True
|
38 |
)
|
39 |
|
@@ -42,7 +42,7 @@ class ContentAnalyzer:
|
|
42 |
|
43 |
print(f"Loading model on {self.device}...")
|
44 |
self.model = AutoModelForCausalLM.from_pretrained(
|
45 |
-
"meta-llama/Llama-3.2-
|
46 |
token=self.hf_token,
|
47 |
torch_dtype=torch.float16 if self.device == "cuda" else torch.float32,
|
48 |
device_map="auto"
|
@@ -278,7 +278,7 @@ async def analyze_content(
|
|
278 |
result = {
|
279 |
"detected_triggers": triggers,
|
280 |
"confidence": "High - Content detected" if triggers != ["None"] else "High - No concerning content detected",
|
281 |
-
"model": "Llama-3.2-
|
282 |
"analysis_timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
283 |
}
|
284 |
|
@@ -293,7 +293,7 @@ async def analyze_content(
|
|
293 |
return {
|
294 |
"detected_triggers": ["Error occurred during analysis"],
|
295 |
"confidence": "Error",
|
296 |
-
"model": "Llama-3.2-
|
297 |
"analysis_timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
298 |
"error": str(e)
|
299 |
}
|
|
|
33 |
|
34 |
print("Loading tokenizer...")
|
35 |
self.tokenizer = AutoTokenizer.from_pretrained(
|
36 |
+
"meta-llama/Llama-3.2-3B",
|
37 |
use_fast=True
|
38 |
)
|
39 |
|
|
|
42 |
|
43 |
print(f"Loading model on {self.device}...")
|
44 |
self.model = AutoModelForCausalLM.from_pretrained(
|
45 |
+
"meta-llama/Llama-3.2-3B",
|
46 |
token=self.hf_token,
|
47 |
torch_dtype=torch.float16 if self.device == "cuda" else torch.float32,
|
48 |
device_map="auto"
|
|
|
278 |
result = {
|
279 |
"detected_triggers": triggers,
|
280 |
"confidence": "High - Content detected" if triggers != ["None"] else "High - No concerning content detected",
|
281 |
+
"model": "Llama-3.2-3B",
|
282 |
"analysis_timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
283 |
}
|
284 |
|
|
|
293 |
return {
|
294 |
"detected_triggers": ["Error occurred during analysis"],
|
295 |
"confidence": "Error",
|
296 |
+
"model": "Llama-3.2-3B",
|
297 |
"analysis_timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
298 |
"error": str(e)
|
299 |
}
|