Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,24 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
|
|
|
|
3 |
|
4 |
# Initialize the InferenceClient with the model ID from Hugging Face
|
5 |
client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta")
|
6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
def generate_response(
|
8 |
messages: list[dict],
|
9 |
max_tokens: int,
|
@@ -23,9 +38,7 @@ def generate_response(
|
|
23 |
str: The AI's response as it is generated.
|
24 |
"""
|
25 |
response = ""
|
26 |
-
|
27 |
try:
|
28 |
-
# Generate a response from the model with streaming
|
29 |
for message in client.chat_completion(
|
30 |
messages=messages,
|
31 |
max_tokens=max_tokens,
|
@@ -60,9 +73,7 @@ def build_messages(system_message: str, history: list[tuple[str, str]], user_mes
|
|
60 |
if assistant_response:
|
61 |
messages.append({"role": "assistant", "content": assistant_response})
|
62 |
|
63 |
-
# Add the latest user message to the conversation
|
64 |
messages.append({"role": "user", "content": user_message})
|
65 |
-
|
66 |
return messages
|
67 |
|
68 |
def respond(
|
@@ -90,7 +101,23 @@ def respond(
|
|
90 |
messages = build_messages(system_message, history, message)
|
91 |
yield from generate_response(messages, max_tokens, temperature, top_p)
|
92 |
|
93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
with gr.Blocks() as demo:
|
95 |
gr.Markdown("# 🧠 AI Chatbot Interface")
|
96 |
gr.Markdown("### Customize your AI Chatbot's behavior and responses.")
|
@@ -105,6 +132,7 @@ with gr.Blocks() as demo:
|
|
105 |
max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
|
106 |
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
107 |
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
|
|
|
108 |
|
109 |
with gr.Row():
|
110 |
chatbot = gr.Chatbot()
|
@@ -126,6 +154,8 @@ with gr.Blocks() as demo:
|
|
126 |
message = gr.Textbox(label="Your message:", lines=1)
|
127 |
submit_btn = gr.Button("Send")
|
128 |
clear_btn = gr.Button("Clear Chat")
|
|
|
|
|
129 |
|
130 |
# Handle sample prompt selection
|
131 |
def update_message(prompt: str) -> str:
|
@@ -134,10 +164,16 @@ with gr.Blocks() as demo:
|
|
134 |
sample_prompt.change(fn=update_message, inputs=sample_prompt, outputs=message)
|
135 |
|
136 |
# Update the chatbot with the new message and response
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
submit_btn.click(
|
138 |
-
fn=
|
139 |
-
inputs=[message,
|
140 |
-
outputs=[chatbot],
|
141 |
show_progress=True
|
142 |
)
|
143 |
|
@@ -146,6 +182,14 @@ with gr.Blocks() as demo:
|
|
146 |
return []
|
147 |
|
148 |
clear_btn.click(fn=clear_chat, inputs=None, outputs=chatbot)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
|
150 |
# Launch the Gradio interface
|
151 |
if __name__ == "__main__":
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
+
from datetime import datetime
|
4 |
+
import json
|
5 |
|
6 |
# Initialize the InferenceClient with the model ID from Hugging Face
|
7 |
client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta")
|
8 |
|
9 |
+
# Load chat history from a file if it exists
|
10 |
+
def load_chat_history(filename="chat_history.json"):
|
11 |
+
try:
|
12 |
+
with open(filename, "r") as file:
|
13 |
+
return json.load(file)
|
14 |
+
except FileNotFoundError:
|
15 |
+
return []
|
16 |
+
|
17 |
+
# Save chat history to a file
|
18 |
+
def save_chat_history(history, filename="chat_history.json"):
|
19 |
+
with open(filename, "w") as file:
|
20 |
+
json.dump(history, file)
|
21 |
+
|
22 |
def generate_response(
|
23 |
messages: list[dict],
|
24 |
max_tokens: int,
|
|
|
38 |
str: The AI's response as it is generated.
|
39 |
"""
|
40 |
response = ""
|
|
|
41 |
try:
|
|
|
42 |
for message in client.chat_completion(
|
43 |
messages=messages,
|
44 |
max_tokens=max_tokens,
|
|
|
73 |
if assistant_response:
|
74 |
messages.append({"role": "assistant", "content": assistant_response})
|
75 |
|
|
|
76 |
messages.append({"role": "user", "content": user_message})
|
|
|
77 |
return messages
|
78 |
|
79 |
def respond(
|
|
|
101 |
messages = build_messages(system_message, history, message)
|
102 |
yield from generate_response(messages, max_tokens, temperature, top_p)
|
103 |
|
104 |
+
def update_chat_history(user_message: str, assistant_response: str, history: list[tuple[str, str]]) -> list[tuple[str, str]]:
|
105 |
+
"""
|
106 |
+
Updates the chat history with the latest user message and assistant response.
|
107 |
+
|
108 |
+
Args:
|
109 |
+
user_message (str): The latest user message.
|
110 |
+
assistant_response (str): The response generated by the assistant.
|
111 |
+
history (list): The existing chat history.
|
112 |
+
|
113 |
+
Returns:
|
114 |
+
list: The updated chat history.
|
115 |
+
"""
|
116 |
+
history.append((user_message, assistant_response))
|
117 |
+
save_chat_history(history)
|
118 |
+
return history
|
119 |
+
|
120 |
+
# Define the UI layout with additional features
|
121 |
with gr.Blocks() as demo:
|
122 |
gr.Markdown("# 🧠 AI Chatbot Interface")
|
123 |
gr.Markdown("### Customize your AI Chatbot's behavior and responses.")
|
|
|
132 |
max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
|
133 |
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
134 |
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
|
135 |
+
theme = gr.Radio(choices=["Light", "Dark"], label="Theme", value="Light")
|
136 |
|
137 |
with gr.Row():
|
138 |
chatbot = gr.Chatbot()
|
|
|
154 |
message = gr.Textbox(label="Your message:", lines=1)
|
155 |
submit_btn = gr.Button("Send")
|
156 |
clear_btn = gr.Button("Clear Chat")
|
157 |
+
feedback = gr.Textbox(label="Feedback:", lines=1)
|
158 |
+
submit_feedback = gr.Button("Submit Feedback")
|
159 |
|
160 |
# Handle sample prompt selection
|
161 |
def update_message(prompt: str) -> str:
|
|
|
164 |
sample_prompt.change(fn=update_message, inputs=sample_prompt, outputs=message)
|
165 |
|
166 |
# Update the chatbot with the new message and response
|
167 |
+
def handle_send(message: str, system_message: str, max_tokens: int, temperature: float, top_p: float):
|
168 |
+
history = load_chat_history()
|
169 |
+
response = list(respond(message, history, system_message, max_tokens, temperature, top_p))[0]
|
170 |
+
history = update_chat_history(message, response, history)
|
171 |
+
return response, history
|
172 |
+
|
173 |
submit_btn.click(
|
174 |
+
fn=handle_send,
|
175 |
+
inputs=[message, system_message, max_tokens, temperature, top_p],
|
176 |
+
outputs=[chatbot, gr.State()],
|
177 |
show_progress=True
|
178 |
)
|
179 |
|
|
|
182 |
return []
|
183 |
|
184 |
clear_btn.click(fn=clear_chat, inputs=None, outputs=chatbot)
|
185 |
+
|
186 |
+
# Handle feedback submission
|
187 |
+
def submit_user_feedback(feedback: str):
|
188 |
+
# In a real application, you would save this feedback to a database or file
|
189 |
+
print(f"Feedback received: {feedback}")
|
190 |
+
return "Thank you for your feedback!"
|
191 |
+
|
192 |
+
submit_feedback.click(fn=submit_user_feedback, inputs=feedback, outputs=[gr.Textbox(value="Feedback submitted! Thank you.", lines=1, placeholder="")])
|
193 |
|
194 |
# Launch the Gradio interface
|
195 |
if __name__ == "__main__":
|