Update app.py
Browse files
app.py
CHANGED
@@ -19,15 +19,20 @@ from transformers import pipeline
|
|
19 |
nsfw_detector = pipeline("text-classification", model="michellejieli/NSFW_text_classifier")
|
20 |
|
21 |
def synthesize_speech(text):
|
22 |
-
#
|
|
|
|
|
|
|
|
|
23 |
|
24 |
# First, check if the input text contains NSFW content.
|
25 |
-
nsfw_result = nsfw_detector(text)
|
26 |
-
if
|
27 |
-
#
|
28 |
error_audio_path = hf_hub_download(repo_id="DLI-SLQ/speaker_01234", filename="error_audio.wav")
|
29 |
with open(error_audio_path, 'rb') as error_audio_file:
|
30 |
error_audio = error_audio_file.read()
|
|
|
31 |
return error_audio, "NSFW content detected. Cannot process."
|
32 |
|
33 |
# If the content is safe, proceed with speech synthesis.
|
|
|
19 |
nsfw_detector = pipeline("text-classification", model="michellejieli/NSFW_text_classifier")
|
20 |
|
21 |
def synthesize_speech(text):
|
22 |
+
# Check for NSFW content using the classifier
|
23 |
+
nsfw_result = nsfw_detector(text)
|
24 |
+
# Extract the label and score from the result
|
25 |
+
label = nsfw_result[0]['label']
|
26 |
+
score = nsfw_result[0]['score']
|
27 |
|
28 |
# First, check if the input text contains NSFW content.
|
29 |
+
#nsfw_result = nsfw_detector(text)
|
30 |
+
if label == 'NSFW' and score >= 0.95:
|
31 |
+
# Download and read the error audio file
|
32 |
error_audio_path = hf_hub_download(repo_id="DLI-SLQ/speaker_01234", filename="error_audio.wav")
|
33 |
with open(error_audio_path, 'rb') as error_audio_file:
|
34 |
error_audio = error_audio_file.read()
|
35 |
+
# Return the error audio and a warning message
|
36 |
return error_audio, "NSFW content detected. Cannot process."
|
37 |
|
38 |
# If the content is safe, proceed with speech synthesis.
|