Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,186 +1,90 @@
|
|
1 |
import torch
|
2 |
from peft import PeftModel, PeftConfig
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
|
4 |
-
import gradio as gr
|
5 |
-
import re
|
6 |
-
import json
|
7 |
-
from datetime import datetime
|
8 |
from threading import Thread
|
|
|
|
|
9 |
|
10 |
-
# Load the model and tokenizer
|
11 |
MODEL_PATH = "Ozaii/zephyr-bae"
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
print("Zephyr loaded successfully! Time to charm!")
|
37 |
-
except Exception as e:
|
38 |
-
print(f"Oops! Zephyr seems to be playing hide and seek. Error: {str(e)}")
|
39 |
-
raise
|
40 |
-
|
41 |
-
# Prepare the model for generation
|
42 |
-
model.eval()
|
43 |
-
|
44 |
-
# Feedback data (Note: This won't persist in Spaces, but keeping the structure for potential future use)
|
45 |
-
feedback_data = []
|
46 |
-
|
47 |
-
def clean_response(response):
|
48 |
-
# Remove any non-Zephyr dialogue or narration
|
49 |
-
response = re.sub(r'(Kaan|Kanan|Kan|knan):.*?(\n|$)', '', response, flags=re.IGNORECASE)
|
50 |
-
response = re.sub(r'\*.*?\*', '', response)
|
51 |
-
response = re.sub(r'\(.*?\)', '', response)
|
52 |
-
|
53 |
-
# Find Zephyr's response
|
54 |
-
match = re.search(r'Zephyr:\s*(.*?)(?=$|\n[A-Za-z]+:|Kaan:)', response, re.DOTALL | re.IGNORECASE)
|
55 |
-
if match:
|
56 |
-
return match.group(1).strip()
|
57 |
-
else:
|
58 |
-
return response.strip()
|
59 |
|
60 |
def generate_response(prompt, max_new_tokens=128):
|
61 |
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
|
62 |
-
|
63 |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
64 |
-
|
65 |
generation_kwargs = dict(
|
66 |
input_ids=inputs.input_ids,
|
67 |
max_new_tokens=max_new_tokens,
|
68 |
-
do_sample=True,
|
69 |
temperature=0.7,
|
70 |
top_p=0.9,
|
71 |
repetition_penalty=1.2,
|
72 |
-
no_repeat_ngram_size=3,
|
73 |
streamer=streamer,
|
74 |
-
eos_token_id=tokenizer.encode("Kaan:", add_special_tokens=False)[0] # Stop at "Kaan:"
|
75 |
)
|
76 |
-
|
77 |
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
78 |
thread.start()
|
79 |
-
|
80 |
-
generated_text = ""
|
81 |
-
for new_text in streamer:
|
82 |
-
generated_text += new_text
|
83 |
-
cleaned_response = clean_response(generated_text)
|
84 |
-
if cleaned_response:
|
85 |
-
yield cleaned_response
|
86 |
|
87 |
def chat_with_zephyr(message, history):
|
88 |
-
conversation_history = history[-3:] # Limit to last 3 exchanges
|
89 |
-
|
90 |
-
full_prompt
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
for
|
95 |
-
|
96 |
-
|
97 |
-
last_response = response
|
98 |
-
|
99 |
-
def add_feedback(user_message, bot_message, rating, note):
|
100 |
-
feedback_entry = {
|
101 |
-
"user_message": user_message,
|
102 |
-
"bot_message": bot_message,
|
103 |
-
"rating": rating,
|
104 |
-
"note": note,
|
105 |
-
"timestamp": datetime.now().isoformat()
|
106 |
-
}
|
107 |
-
feedback_data.append(feedback_entry)
|
108 |
-
return "Feedback saved successfully!"
|
109 |
-
|
110 |
-
# Gradio interface
|
111 |
-
def chat_with_zephyr(message, history):
|
112 |
-
# Implement your chat logic here
|
113 |
-
response = "Hello! I'm Zephyr. How can I help you today?" # Placeholder
|
114 |
-
return response
|
115 |
-
|
116 |
-
iface = gr.ChatInterface(
|
117 |
-
chat_with_zephyr,
|
118 |
-
title="Chat with Zephyr: Your AI Boyfriend",
|
119 |
-
description="Zephyr is an AI trained to be your virtual boyfriend. Chat with him and see where the conversation goes!",
|
120 |
-
examples=["Hey Zephyr, how are you feeling today?", "What's your idea of a perfect date?", "Tell me something romantic!"],
|
121 |
-
cache_examples=False,
|
122 |
-
)
|
123 |
-
|
124 |
|
125 |
css = """
|
126 |
body {
|
127 |
background-color: #1a1a2e;
|
128 |
color: #e0e0ff;
|
129 |
}
|
130 |
-
|
131 |
-
|
132 |
-
overflow-y: auto;
|
133 |
-
border: 1px solid #3a3a5e;
|
134 |
-
border-radius: 10px;
|
135 |
-
padding: 10px;
|
136 |
-
background-color: #0a0a1e;
|
137 |
-
}
|
138 |
-
#chatbot .message {
|
139 |
-
padding: 10px;
|
140 |
-
margin-bottom: 10px;
|
141 |
-
border-radius: 15px;
|
142 |
-
}
|
143 |
-
#chatbot .user {
|
144 |
-
background-color: #2a2a4e;
|
145 |
-
text-align: right;
|
146 |
-
margin-left: 20%;
|
147 |
-
}
|
148 |
-
#chatbot .bot {
|
149 |
-
background-color: #3a3a5e;
|
150 |
-
text-align: left;
|
151 |
-
margin-right: 20%;
|
152 |
-
}
|
153 |
-
#feedback-section {
|
154 |
-
margin-top: 20px;
|
155 |
-
padding: 15px;
|
156 |
-
border: 1px solid #3a3a5e;
|
157 |
-
border-radius: 10px;
|
158 |
-
background-color: #0a0a1e;
|
159 |
}
|
160 |
"""
|
161 |
|
162 |
with gr.Blocks(css=css) as iface:
|
163 |
-
gr.Markdown("# Chat with Zephyr: Your AI Boyfriend
|
164 |
-
chatbot = gr.Chatbot(
|
165 |
msg = gr.Textbox(placeholder="Tell Zephyr what's on your mind...", label="Your message")
|
166 |
-
|
167 |
-
clear = gr.Button("Clear Chat")
|
168 |
-
undo = gr.Button("Undo Last Message")
|
169 |
|
170 |
-
msg.submit(
|
171 |
clear.click(lambda: None, None, chatbot, queue=False)
|
172 |
-
undo.click(undo_last_message, chatbot, chatbot)
|
173 |
|
174 |
-
gr.Markdown("
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
|
186 |
-
|
|
|
1 |
import torch
|
2 |
from peft import PeftModel, PeftConfig
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
|
|
|
|
|
|
|
|
|
4 |
from threading import Thread
|
5 |
+
import gradio as gr
|
6 |
+
import spaces
|
7 |
|
|
|
8 |
MODEL_PATH = "Ozaii/zephyr-bae"
|
9 |
|
10 |
+
@spaces.GPU
|
11 |
+
def load_model():
|
12 |
+
print("Attempting to load Zephyr... Cross your fingers! π€")
|
13 |
+
try:
|
14 |
+
peft_config = PeftConfig.from_pretrained(MODEL_PATH)
|
15 |
+
base_model = AutoModelForCausalLM.from_pretrained(
|
16 |
+
peft_config.base_model_name_or_path,
|
17 |
+
torch_dtype=torch.float16,
|
18 |
+
device_map="auto",
|
19 |
+
load_in_8bit=True
|
20 |
+
)
|
21 |
+
model = PeftModel.from_pretrained(base_model, MODEL_PATH, torch_dtype=torch.float16)
|
22 |
+
tokenizer = AutoTokenizer.from_pretrained(peft_config.base_model_name_or_path)
|
23 |
+
tokenizer.pad_token = tokenizer.eos_token
|
24 |
+
tokenizer.padding_side = "right"
|
25 |
+
print("Zephyr loaded successfully! Time to charm!")
|
26 |
+
return model, tokenizer
|
27 |
+
except Exception as e:
|
28 |
+
print(f"Oops! Zephyr seems to be playing hide and seek. Error: {str(e)}")
|
29 |
+
raise
|
30 |
+
|
31 |
+
model, tokenizer = load_model()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
def generate_response(prompt, max_new_tokens=128):
|
34 |
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
|
|
|
35 |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
|
|
36 |
generation_kwargs = dict(
|
37 |
input_ids=inputs.input_ids,
|
38 |
max_new_tokens=max_new_tokens,
|
|
|
39 |
temperature=0.7,
|
40 |
top_p=0.9,
|
41 |
repetition_penalty=1.2,
|
|
|
42 |
streamer=streamer,
|
|
|
43 |
)
|
|
|
44 |
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
45 |
thread.start()
|
46 |
+
return streamer
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
def chat_with_zephyr(message, history):
|
49 |
+
conversation_history = history[-3:] # Limit to last 3 exchanges
|
50 |
+
full_prompt = "\n".join([f"Human: {h[0]}\nZephyr: {h[1]}" for h in conversation_history])
|
51 |
+
full_prompt += f"\nHuman: {message}\nZephyr:"
|
52 |
+
|
53 |
+
streamer = generate_response(full_prompt)
|
54 |
+
response = ""
|
55 |
+
for new_text in streamer:
|
56 |
+
response += new_text
|
57 |
+
yield response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
css = """
|
60 |
body {
|
61 |
background-color: #1a1a2e;
|
62 |
color: #e0e0ff;
|
63 |
}
|
64 |
+
.gradio-container {
|
65 |
+
background-color: #1a1a2e;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
}
|
67 |
"""
|
68 |
|
69 |
with gr.Blocks(css=css) as iface:
|
70 |
+
gr.Markdown("# Chat with Zephyr: Your AI Boyfriend π")
|
71 |
+
chatbot = gr.Chatbot(height=500)
|
72 |
msg = gr.Textbox(placeholder="Tell Zephyr what's on your mind...", label="Your message")
|
73 |
+
clear = gr.Button("Clear Chat")
|
|
|
|
|
74 |
|
75 |
+
msg.submit(chat_with_zephyr, [msg, chatbot], [chatbot])
|
76 |
clear.click(lambda: None, None, chatbot, queue=False)
|
|
|
77 |
|
78 |
+
gr.Markdown("""
|
79 |
+
## Welcome to Zephyr, Your AI Boyfriend!
|
80 |
+
Zephyr is here to charm you with his wit, humor, and cosmic energy. Feel free to flirt, ask for advice, or just chat about anything under the stars!
|
81 |
+
|
82 |
+
**Some conversation starters:**
|
83 |
+
- "Hey Zephyr, how's the cosmic energy today?"
|
84 |
+
- "What's your idea of a perfect date in the digital realm?"
|
85 |
+
- "Tell me something that would make me fall for you even more!"
|
86 |
+
|
87 |
+
Remember, Zephyr is an AI and this is for fun and entertainment. Enjoy your chat! π
|
88 |
+
""")
|
89 |
|
90 |
+
iface.launch()
|