Threatthriver commited on
Commit
aea68a1
1 Parent(s): 7d179e0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -170
app.py CHANGED
@@ -1,44 +1,49 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- from datetime import datetime
4
- import json
5
 
6
  # Initialize the InferenceClient with the model ID from Hugging Face
7
  client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta")
8
 
9
- # Load chat history from a file if it exists
10
- def load_chat_history(filename="chat_history.json"):
11
- try:
12
- with open(filename, "r") as file:
13
- return json.load(file)
14
- except FileNotFoundError:
15
- return []
16
-
17
- # Save chat history to a file
18
- def save_chat_history(history, filename="chat_history.json"):
19
- with open(filename, "w") as file:
20
- json.dump(history, file)
21
-
22
- def generate_response(
23
- messages: list[dict],
24
  max_tokens: int,
25
  temperature: float,
26
  top_p: float,
27
  ):
28
  """
29
- Generates a response from the AI model based on the provided messages.
30
-
31
  Args:
32
- messages (list): A list of messages representing the conversation history.
 
 
33
  max_tokens (int): The maximum number of tokens for the output.
34
- temperature (float): Sampling temperature for controlling randomness.
35
  top_p (float): Top-p (nucleus sampling) for controlling diversity.
36
 
37
  Yields:
38
  str: The AI's response as it is generated.
39
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  response = ""
 
41
  try:
 
42
  for message in client.chat_completion(
43
  messages=messages,
44
  max_tokens=max_tokens,
@@ -53,166 +58,29 @@ def generate_response(
53
  except Exception as e:
54
  yield f"An error occurred: {str(e)}"
55
 
56
- def build_messages(system_message: str, history: list[tuple[str, str]], user_message: str) -> list[dict]:
57
- """
58
- Builds the list of messages for the model based on system message, history, and latest user message.
59
-
60
- Args:
61
- system_message (str): A system-level message guiding the AI's behavior.
62
- history (list): A list of tuples representing the conversation history (user, assistant).
63
- user_message (str): The latest user message.
64
-
65
- Returns:
66
- list: A list of message dictionaries formatted for the API call.
67
- """
68
- messages = [{"role": "system", "content": system_message}]
69
-
70
- for user_input, assistant_response in history:
71
- if user_input:
72
- messages.append({"role": "user", "content": user_input})
73
- if assistant_response:
74
- messages.append({"role": "assistant", "content": assistant_response})
75
-
76
- messages.append({"role": "user", "content": user_message})
77
- return messages
78
-
79
- def respond(
80
- message: str,
81
- history: list[tuple[str, str]],
82
- system_message: str,
83
- max_tokens: int,
84
- temperature: float,
85
- top_p: float,
86
- ):
87
- """
88
- Handles the interaction with the model to generate a response based on user input and chat history.
89
-
90
- Args:
91
- message (str): The user's input message.
92
- history (list): A list of tuples representing the conversation history (user, assistant).
93
- system_message (str): A system-level message guiding the AI's behavior.
94
- max_tokens (int): The maximum number of tokens for the output.
95
- temperature (float): Sampling temperature for controlling randomness.
96
- top_p (float): Top-p (nucleus sampling) for controlling diversity.
97
-
98
- Yields:
99
- str: The AI's response as it is generated.
100
- """
101
- messages = build_messages(system_message, history, message)
102
- yield from generate_response(messages, max_tokens, temperature, top_p)
103
-
104
- def update_chat_history(user_message: str, assistant_response: str, history: list[tuple[str, str]]) -> list[tuple[str, str]]:
105
- """
106
- Updates the chat history with the latest user message and assistant response.
107
-
108
- Args:
109
- user_message (str): The latest user message.
110
- assistant_response (str): The response generated by the assistant.
111
- history (list): The existing chat history.
112
-
113
- Returns:
114
- list: The updated chat history.
115
- """
116
- history.append((user_message, assistant_response))
117
- save_chat_history(history)
118
- return history
119
-
120
- # --- Enhanced UI Features ---
121
- def update_settings(max_tokens, temperature, top_p):
122
- """Updates the settings based on user input."""
123
- return gr.Markdown(f"**Current Settings:**\n* Max Tokens: {max_tokens}\n* Temperature: {temperature}\n* Top-p: {top_p}")
124
-
125
- def display_history(history):
126
- """Displays the chat history in a more readable format."""
127
- formatted_history = ""
128
- for user_msg, assistant_msg in history:
129
- formatted_history += f"**User:** {user_msg}\n**Assistant:** {assistant_msg}\n\n"
130
- return formatted_history
131
-
132
-
133
- # Define the UI layout with additional features
134
  with gr.Blocks() as demo:
135
  gr.Markdown("# 🧠 AI Chatbot Interface")
136
  gr.Markdown("### Customize your AI Chatbot's behavior and responses.")
137
-
138
  with gr.Row():
 
139
  with gr.Column():
140
- system_message = gr.Textbox(
141
- value="You are a helpful assistant knowledgeable in various topics. Provide clear, concise, and friendly responses.",
142
- label="System message",
143
- lines=3
144
- )
145
  max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
146
  temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
147
  top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
148
-
149
- # Display current settings
150
- settings_output = gr.Markdown(f"**Current Settings:**\n* Max Tokens: {max_tokens.value}\n* Temperature: {temperature.value}\n* Top-p: {top_p.value}")
151
- max_tokens.change(fn=update_settings, inputs=[max_tokens, temperature, top_p], outputs=settings_output)
152
- temperature.change(fn=update_settings, inputs=[max_tokens, temperature, top_p], outputs=settings_output)
153
- top_p.change(fn=update_settings, inputs=[max_tokens, temperature, top_p], outputs=settings_output)
154
-
155
- with gr.Row():
156
- chatbot = gr.Chatbot()
157
- # Display chat history in a separate area
158
- history_output = gr.Textbox(label="Chat History", lines=10, interactive=False)
159
-
160
  with gr.Row():
161
- with gr.Column():
162
- sample_prompt = gr.Dropdown(
163
- choices=[
164
- "Can you explain the theory of relativity?",
165
- "What are some tips for improving productivity at work?",
166
- "Tell me a fun fact about space.",
167
- "How can I cook a perfect omelette?",
168
- "What's the latest news in technology?"
169
- ],
170
- label="Sample Prompts",
171
- value="Can you explain the theory of relativity?",
172
- type="value"
173
- )
174
- message = gr.Textbox(label="Your message:", lines=1)
175
- submit_btn = gr.Button("Send")
176
- clear_btn = gr.Button("Clear Chat")
177
- feedback = gr.Textbox(label="Feedback:", lines=1)
178
- submit_feedback = gr.Button("Submit Feedback")
179
-
180
- # Handle sample prompt selection
181
- def update_message(prompt: str) -> str:
182
- return prompt
183
-
184
- sample_prompt.change(fn=update_message, inputs=sample_prompt, outputs=message)
185
 
186
  # Update the chatbot with the new message and response
187
- def handle_send(message: str, system_message: str, max_tokens: int, temperature: float, top_p: float):
188
- history = load_chat_history()
189
- response = list(respond(message, history, system_message, max_tokens, temperature, top_p))[0]
190
- history = update_chat_history(message, response, history)
191
- formatted_history = display_history(history)
192
- return response, history, formatted_history
193
-
194
- submit_btn.click(
195
- fn=handle_send,
196
- inputs=[message, system_message, max_tokens, temperature, top_p],
197
- outputs=[chatbot, gr.State(), history_output],
198
- show_progress=True
199
- )
200
-
201
- # Clear the chat history
202
- def clear_chat() -> list:
203
- save_chat_history([]) # Clear the saved history as well
204
- return [], "" # Return empty list for chatbot and empty string for history output
205
-
206
- clear_btn.click(fn=clear_chat, inputs=None, outputs=[chatbot, history_output])
207
-
208
- # Handle feedback submission
209
- def submit_user_feedback(feedback: str):
210
- # In a real application, you would save this feedback to a database or file
211
- print(f"Feedback received: {feedback}")
212
- return "Thank you for your feedback!"
213
-
214
- submit_feedback.click(fn=submit_user_feedback, inputs=feedback, outputs=[gr.Textbox(value="Feedback submitted! Thank you.", lines=1, placeholder="")])
215
 
216
  # Launch the Gradio interface
217
  if __name__ == "__main__":
218
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
3
 
4
  # Initialize the InferenceClient with the model ID from Hugging Face
5
  client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta")
6
 
7
+ def respond(
8
+ message: str,
9
+ history: list[tuple[str, str]],
10
+ system_message: str,
 
 
 
 
 
 
 
 
 
 
 
11
  max_tokens: int,
12
  temperature: float,
13
  top_p: float,
14
  ):
15
  """
16
+ Generates a response from the AI model based on the user's message and chat history.
17
+
18
  Args:
19
+ message (str): The user's input message.
20
+ history (list): A list of tuples representing the conversation history (user, assistant).
21
+ system_message (str): A system-level message guiding the AI's behavior.
22
  max_tokens (int): The maximum number of tokens for the output.
23
+ temperature (float): Sampling temperature for controlling the randomness.
24
  top_p (float): Top-p (nucleus sampling) for controlling diversity.
25
 
26
  Yields:
27
  str: The AI's response as it is generated.
28
  """
29
+
30
+ # Prepare the conversation history for the API call
31
+ messages = [{"role": "system", "content": system_message}]
32
+
33
+ for user_input, assistant_response in history:
34
+ if user_input:
35
+ messages.append({"role": "user", "content": user_input})
36
+ if assistant_response:
37
+ messages.append({"role": "assistant", "content": assistant_response})
38
+
39
+ # Add the latest user message to the conversation
40
+ messages.append({"role": "user", "content": message})
41
+
42
+ # Initialize an empty response
43
  response = ""
44
+
45
  try:
46
+ # Generate a response from the model with streaming
47
  for message in client.chat_completion(
48
  messages=messages,
49
  max_tokens=max_tokens,
 
58
  except Exception as e:
59
  yield f"An error occurred: {str(e)}"
60
 
61
+ # Define the UI layout with a more user-friendly design
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  with gr.Blocks() as demo:
63
  gr.Markdown("# 🧠 AI Chatbot Interface")
64
  gr.Markdown("### Customize your AI Chatbot's behavior and responses.")
65
+
66
  with gr.Row():
67
+ chatbot = gr.Chatbot()
68
  with gr.Column():
69
+ system_message = gr.Textbox(value="You are a friendly Chatbot.", label="System message", lines=2)
 
 
 
 
70
  max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
71
  temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
72
  top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
73
+
 
 
 
 
 
 
 
 
 
 
 
74
  with gr.Row():
75
+ message = gr.Textbox(label="Your message:", lines=1)
76
+ submit_btn = gr.Button("Send")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  # Update the chatbot with the new message and response
79
+ submit_btn.click(respond,
80
+ inputs=[message, chatbot, system_message, max_tokens, temperature, top_p],
81
+ outputs=[chatbot],
82
+ show_progress=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
  # Launch the Gradio interface
85
  if __name__ == "__main__":
86
+ demo.launch()