Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -8,6 +8,7 @@ import gradio as gr
|
|
8 |
import spaces
|
9 |
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextIteratorStreamer
|
10 |
from peft import PeftModel
|
|
|
11 |
|
12 |
# Constants
|
13 |
MAX_MAX_NEW_TOKENS = 2048
|
@@ -55,7 +56,7 @@ class Story(Document):
|
|
55 |
|
56 |
# Utility function for prompts
|
57 |
def make_prompt(entry):
|
58 |
-
return f"### Human:{entry} ### Assistant:"
|
59 |
# f"TELL A STORY, RELATE TO COMPUTER SCIENCE, INCLUDE ASSESMENTS. MAKE IT REALISTIC AND AROUND 800 WORDS, END THE STORY WITH "THE END.": {entry}"
|
60 |
|
61 |
|
@@ -65,6 +66,13 @@ def process_text(text):
|
|
65 |
|
66 |
return text
|
67 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
@spaces.GPU
|
69 |
def generate(
|
70 |
message: str,
|
@@ -75,6 +83,9 @@ def generate(
|
|
75 |
top_k: int = 30,
|
76 |
repetition_penalty: float = 1.0,
|
77 |
) -> Iterator[str]:
|
|
|
|
|
|
|
78 |
conversation = []
|
79 |
for user, assistant in chat_history:
|
80 |
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
|
|
|
8 |
import spaces
|
9 |
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextIteratorStreamer
|
10 |
from peft import PeftModel
|
11 |
+
import requests
|
12 |
|
13 |
# Constants
|
14 |
MAX_MAX_NEW_TOKENS = 2048
|
|
|
56 |
|
57 |
# Utility function for prompts
|
58 |
def make_prompt(entry):
|
59 |
+
return f"### Human, Don't answer inappropriate messages:{entry} ### Assistant:"
|
60 |
# f"TELL A STORY, RELATE TO COMPUTER SCIENCE, INCLUDE ASSESMENTS. MAKE IT REALISTIC AND AROUND 800 WORDS, END THE STORY WITH "THE END.": {entry}"
|
61 |
|
62 |
|
|
|
66 |
|
67 |
return text
|
68 |
|
69 |
+
def contains_profanity(text, profanity_set):
|
70 |
+
words = text.split()
|
71 |
+
return any(word.lower() in profanity_set for word in words)
|
72 |
+
|
73 |
+
response = requests.get('https://raw.githubusercontent.com/LDNOOBW/List-of-Dirty-Naughty-Obscene-and-Otherwise-Bad-Words/master/en')
|
74 |
+
bad_words = set(response.text.splitlines())
|
75 |
+
|
76 |
@spaces.GPU
|
77 |
def generate(
|
78 |
message: str,
|
|
|
83 |
top_k: int = 30,
|
84 |
repetition_penalty: float = 1.0,
|
85 |
) -> Iterator[str]:
|
86 |
+
if contains_profanity(message, bad_words):
|
87 |
+
yield "I'm sorry, but I can't process your request due to inappropriate content."
|
88 |
+
return
|
89 |
conversation = []
|
90 |
for user, assistant in chat_history:
|
91 |
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
|