Spaces:
Running
Running
Canstralian
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,105 +1,155 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
|
|
|
|
3 |
|
4 |
-
# Hugging Face client
|
5 |
-
client = InferenceClient("
|
6 |
|
7 |
-
#
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
"""
|
10 |
-
|
11 |
-
|
12 |
-
Parameters:
|
13 |
-
message (str): User's current message/input.
|
14 |
-
history (list): List of tuples representing conversation history (user's and assistant's messages).
|
15 |
-
system_message (str): System-level instructions to the assistant to guide its responses.
|
16 |
-
max_tokens (int): Maximum number of tokens to generate in the response.
|
17 |
-
temperature (float): Degree of randomness in the response generation.
|
18 |
-
top_p (float): Controls the diversity of the response using nucleus sampling.
|
19 |
-
|
20 |
-
Yields:
|
21 |
-
str: Streamed response as tokens are generated.
|
22 |
"""
|
23 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
messages = [{"role": "system", "content": system_message}]
|
25 |
-
|
26 |
-
# Loop through the history and add past conversation to the messages
|
27 |
for user_message, assistant_message in history:
|
28 |
if user_message:
|
29 |
messages.append({"role": "user", "content": user_message})
|
30 |
if assistant_message:
|
31 |
messages.append({"role": "assistant", "content": assistant_message})
|
32 |
-
|
33 |
-
# Append the current user message to the conversation
|
34 |
-
messages.append({"role": "user", "content": message})
|
35 |
|
36 |
-
|
37 |
response = ""
|
38 |
|
39 |
-
#
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
"You are NLPToolkit Agent, an advanced natural language processing assistant. "
|
55 |
-
"You specialize in tasks such as text summarization, sentiment analysis, text classification, "
|
56 |
-
"entity recognition, and answering technical questions about NLP models and datasets. "
|
57 |
-
"Assist users with clear, concise, and actionable outputs."
|
58 |
-
)
|
59 |
|
60 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
def create_interface():
|
62 |
"""
|
63 |
-
Create
|
64 |
-
|
65 |
-
Parameters:
|
66 |
-
None
|
67 |
-
|
68 |
-
Returns:
|
69 |
-
gr.Interface: The Gradio interface object.
|
70 |
"""
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
)
|
85 |
-
gr.
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
if __name__ == "__main__":
|
104 |
demo = create_interface()
|
105 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
+
import langdetect
|
4 |
+
import json
|
5 |
|
6 |
+
# Initialize Hugging Face client with the new model
|
7 |
+
client = InferenceClient(model="OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5")
|
8 |
|
9 |
+
# Default system message to guide the assistant
|
10 |
+
default_system_message = (
|
11 |
+
"You are NLPToolkit Agent, an advanced assistant specializing in NLP tasks such as text summarization, "
|
12 |
+
"sentiment analysis, text classification, and entity recognition. Adapt your responses to the selected task."
|
13 |
+
)
|
14 |
+
|
15 |
+
# Predefined task-specific instructions
|
16 |
+
task_instructions = {
|
17 |
+
"Summarization": "Summarize the text clearly and concisely.",
|
18 |
+
"Sentiment Analysis": "Analyze the sentiment of the text (positive, neutral, negative).",
|
19 |
+
"Text Classification": "Classify the text into relevant categories.",
|
20 |
+
"Entity Recognition": "Identify and list named entities in the text."
|
21 |
+
}
|
22 |
+
|
23 |
+
|
24 |
+
# Preprocessing user input
|
25 |
+
def preprocess_text(text):
|
26 |
+
"""
|
27 |
+
Clean and validate the user's input text.
|
28 |
+
"""
|
29 |
+
try:
|
30 |
+
# Detect input language
|
31 |
+
language = langdetect.detect(text)
|
32 |
+
if language != "en":
|
33 |
+
return f"Input language detected as {language}. Please provide input in English."
|
34 |
+
except Exception:
|
35 |
+
return "Unable to detect language. Please provide valid text input."
|
36 |
+
return text.strip()
|
37 |
+
|
38 |
+
|
39 |
+
# Respond function for handling user input and generating a response
|
40 |
+
def respond(task, message, history, system_message, max_tokens, temperature, top_p):
|
41 |
"""
|
42 |
+
Handle user messages and generate responses using the NLP model.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
"""
|
44 |
+
# Apply task-specific instructions
|
45 |
+
system_message = f"{system_message} Task: {task_instructions.get(task, 'General NLP task')}"
|
46 |
+
|
47 |
+
# Preprocess the user's input
|
48 |
+
message = preprocess_text(message)
|
49 |
+
if message.startswith("Input language detected") or message.startswith("Unable to detect"):
|
50 |
+
yield message
|
51 |
+
return
|
52 |
+
|
53 |
+
# Prepare conversation history
|
54 |
messages = [{"role": "system", "content": system_message}]
|
|
|
|
|
55 |
for user_message, assistant_message in history:
|
56 |
if user_message:
|
57 |
messages.append({"role": "user", "content": user_message})
|
58 |
if assistant_message:
|
59 |
messages.append({"role": "assistant", "content": assistant_message})
|
|
|
|
|
|
|
60 |
|
61 |
+
messages.append({"role": "user", "content": message})
|
62 |
response = ""
|
63 |
|
64 |
+
# Stream response from the Hugging Face model
|
65 |
+
try:
|
66 |
+
for chunk in client.chat_completion(
|
67 |
+
messages=messages,
|
68 |
+
max_tokens=max_tokens,
|
69 |
+
stream=True,
|
70 |
+
temperature=temperature,
|
71 |
+
top_p=top_p,
|
72 |
+
):
|
73 |
+
token = chunk.choices[0].delta.content
|
74 |
+
response += token
|
75 |
+
yield response
|
76 |
+
except Exception as e:
|
77 |
+
yield f"Error generating response: {str(e)}"
|
78 |
+
|
|
|
|
|
|
|
|
|
|
|
79 |
|
80 |
+
# Save conversation history to a JSON file
|
81 |
+
def save_history(history):
|
82 |
+
with open("chat_history.json", "w") as f:
|
83 |
+
json.dump(history, f)
|
84 |
+
return "Chat history saved successfully."
|
85 |
+
|
86 |
+
|
87 |
+
# Load conversation history from a JSON file
|
88 |
+
def load_history():
|
89 |
+
try:
|
90 |
+
with open("chat_history.json", "r") as f:
|
91 |
+
history = json.load(f)
|
92 |
+
return history
|
93 |
+
except FileNotFoundError:
|
94 |
+
return []
|
95 |
+
|
96 |
+
|
97 |
+
# Gradio app interface
|
98 |
def create_interface():
|
99 |
"""
|
100 |
+
Create the Gradio interface for the chatbot.
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
"""
|
102 |
+
with gr.Blocks() as demo:
|
103 |
+
gr.Markdown("## 🧠 NLPToolkit Agent\nAn advanced assistant for NLP tasks, powered by Hugging Face.")
|
104 |
+
|
105 |
+
with gr.Row():
|
106 |
+
# Task selection dropdown
|
107 |
+
task = gr.Dropdown(
|
108 |
+
choices=["Summarization", "Sentiment Analysis", "Text Classification", "Entity Recognition"],
|
109 |
+
value="Summarization",
|
110 |
+
label="Select NLP Task"
|
111 |
+
)
|
112 |
+
|
113 |
+
with gr.Row():
|
114 |
+
# User input and system message
|
115 |
+
user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")
|
116 |
+
system_message = gr.Textbox(value=default_system_message, label="System Message")
|
117 |
+
|
118 |
+
with gr.Row():
|
119 |
+
# Chat history and assistant response
|
120 |
+
chat_history = gr.State(value=[])
|
121 |
+
assistant_response = gr.Textbox(label="Assistant Response", interactive=False)
|
122 |
+
|
123 |
+
with gr.Row():
|
124 |
+
# Parameter sliders
|
125 |
+
max_tokens = gr.Slider(1, 2048, value=512, label="Max Tokens")
|
126 |
+
temperature = gr.Slider(0.1, 4.0, value=0.7, label="Temperature")
|
127 |
+
top_p = gr.Slider(0.1, 1.0, value=0.95, label="Top-p (Nucleus Sampling)")
|
128 |
+
|
129 |
+
with gr.Row():
|
130 |
+
# Buttons for save/load functionality
|
131 |
+
save_button = gr.Button("Save Chat History")
|
132 |
+
load_button = gr.Button("Load Chat History")
|
133 |
+
|
134 |
+
with gr.Row():
|
135 |
+
# Submit button
|
136 |
+
submit_button = gr.Button("Generate Response")
|
137 |
+
|
138 |
+
# Connect functionalities
|
139 |
+
submit_button.click(
|
140 |
+
fn=respond,
|
141 |
+
inputs=[task, user_input, chat_history, system_message, max_tokens, temperature, top_p],
|
142 |
+
outputs=assistant_response
|
143 |
+
)
|
144 |
+
save_button.click(fn=save_history, inputs=chat_history, outputs=None)
|
145 |
+
load_button.click(fn=load_history, inputs=None, outputs=chat_history)
|
146 |
+
|
147 |
+
gr.Markdown("### 🚀 Powered by Hugging Face and Gradio | Developed by Canstralian")
|
148 |
+
|
149 |
+
return demo
|
150 |
+
|
151 |
+
|
152 |
+
# Run the app
|
153 |
if __name__ == "__main__":
|
154 |
demo = create_interface()
|
155 |
demo.launch()
|