Canstralian commited on
Commit
1518493
·
verified ·
1 Parent(s): 65a82e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +133 -83
app.py CHANGED
@@ -1,105 +1,155 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
3
 
4
- # Hugging Face client initialization
5
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
 
7
- # Function to handle NLP responses and interaction with the model
8
- def respond(message, history, system_message, max_tokens, temperature, top_p):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  """
10
- Function to handle user message and generate a response using the NLP model.
11
-
12
- Parameters:
13
- message (str): User's current message/input.
14
- history (list): List of tuples representing conversation history (user's and assistant's messages).
15
- system_message (str): System-level instructions to the assistant to guide its responses.
16
- max_tokens (int): Maximum number of tokens to generate in the response.
17
- temperature (float): Degree of randomness in the response generation.
18
- top_p (float): Controls the diversity of the response using nucleus sampling.
19
-
20
- Yields:
21
- str: Streamed response as tokens are generated.
22
  """
23
- # Prepare the message for the assistant, including system-level instructions and history.
 
 
 
 
 
 
 
 
 
24
  messages = [{"role": "system", "content": system_message}]
25
-
26
- # Loop through the history and add past conversation to the messages
27
  for user_message, assistant_message in history:
28
  if user_message:
29
  messages.append({"role": "user", "content": user_message})
30
  if assistant_message:
31
  messages.append({"role": "assistant", "content": assistant_message})
32
-
33
- # Append the current user message to the conversation
34
- messages.append({"role": "user", "content": message})
35
 
36
- # Initialize the response variable
37
  response = ""
38
 
39
- # Get the response stream from the Hugging Face model
40
- for message in client.chat_completion(
41
- messages,
42
- max_tokens=max_tokens,
43
- stream=True,
44
- temperature=temperature,
45
- top_p=top_p,
46
- ):
47
- # Extract the token content and append it to the response
48
- token = message.choices[0].delta.content
49
- response += token
50
- yield response
51
-
52
- # System prompt to guide the assistant's behavior
53
- default_system_message = (
54
- "You are NLPToolkit Agent, an advanced natural language processing assistant. "
55
- "You specialize in tasks such as text summarization, sentiment analysis, text classification, "
56
- "entity recognition, and answering technical questions about NLP models and datasets. "
57
- "Assist users with clear, concise, and actionable outputs."
58
- )
59
 
60
- # Create the Gradio interface for user interaction
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  def create_interface():
62
  """
63
- Create and return a Gradio interface for the NLPToolkit Agent with customizable parameters.
64
-
65
- Parameters:
66
- None
67
-
68
- Returns:
69
- gr.Interface: The Gradio interface object.
70
  """
71
- return gr.ChatInterface(
72
- respond,
73
- additional_inputs=[
74
- gr.Textbox(
75
- value=default_system_message,
76
- label="System Message"
77
- ),
78
- gr.Slider(
79
- minimum=1,
80
- maximum=2048,
81
- value=512,
82
- step=1,
83
- label="Max New Tokens"
84
- ),
85
- gr.Slider(
86
- minimum=0.1,
87
- maximum=4.0,
88
- value=0.7,
89
- step=0.1,
90
- label="Temperature"
91
- ),
92
- gr.Slider(
93
- minimum=0.1,
94
- maximum=1.0,
95
- value=0.95,
96
- step=0.05,
97
- label="Top-p (Nucleus Sampling)"
98
- ),
99
- ],
100
- )
101
-
102
- # Run the Gradio interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  if __name__ == "__main__":
104
  demo = create_interface()
105
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import langdetect
4
+ import json
5
 
6
+ # Initialize Hugging Face client with the new model
7
+ client = InferenceClient(model="OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5")
8
 
9
+ # Default system message to guide the assistant
10
+ default_system_message = (
11
+ "You are NLPToolkit Agent, an advanced assistant specializing in NLP tasks such as text summarization, "
12
+ "sentiment analysis, text classification, and entity recognition. Adapt your responses to the selected task."
13
+ )
14
+
15
+ # Predefined task-specific instructions
16
+ task_instructions = {
17
+ "Summarization": "Summarize the text clearly and concisely.",
18
+ "Sentiment Analysis": "Analyze the sentiment of the text (positive, neutral, negative).",
19
+ "Text Classification": "Classify the text into relevant categories.",
20
+ "Entity Recognition": "Identify and list named entities in the text."
21
+ }
22
+
23
+
24
+ # Preprocessing user input
25
+ def preprocess_text(text):
26
+ """
27
+ Clean and validate the user's input text.
28
+ """
29
+ try:
30
+ # Detect input language
31
+ language = langdetect.detect(text)
32
+ if language != "en":
33
+ return f"Input language detected as {language}. Please provide input in English."
34
+ except Exception:
35
+ return "Unable to detect language. Please provide valid text input."
36
+ return text.strip()
37
+
38
+
39
+ # Respond function for handling user input and generating a response
40
+ def respond(task, message, history, system_message, max_tokens, temperature, top_p):
41
  """
42
+ Handle user messages and generate responses using the NLP model.
 
 
 
 
 
 
 
 
 
 
 
43
  """
44
+ # Apply task-specific instructions
45
+ system_message = f"{system_message} Task: {task_instructions.get(task, 'General NLP task')}"
46
+
47
+ # Preprocess the user's input
48
+ message = preprocess_text(message)
49
+ if message.startswith("Input language detected") or message.startswith("Unable to detect"):
50
+ yield message
51
+ return
52
+
53
+ # Prepare conversation history
54
  messages = [{"role": "system", "content": system_message}]
 
 
55
  for user_message, assistant_message in history:
56
  if user_message:
57
  messages.append({"role": "user", "content": user_message})
58
  if assistant_message:
59
  messages.append({"role": "assistant", "content": assistant_message})
 
 
 
60
 
61
+ messages.append({"role": "user", "content": message})
62
  response = ""
63
 
64
+ # Stream response from the Hugging Face model
65
+ try:
66
+ for chunk in client.chat_completion(
67
+ messages=messages,
68
+ max_tokens=max_tokens,
69
+ stream=True,
70
+ temperature=temperature,
71
+ top_p=top_p,
72
+ ):
73
+ token = chunk.choices[0].delta.content
74
+ response += token
75
+ yield response
76
+ except Exception as e:
77
+ yield f"Error generating response: {str(e)}"
78
+
 
 
 
 
 
79
 
80
+ # Save conversation history to a JSON file
81
+ def save_history(history):
82
+ with open("chat_history.json", "w") as f:
83
+ json.dump(history, f)
84
+ return "Chat history saved successfully."
85
+
86
+
87
+ # Load conversation history from a JSON file
88
+ def load_history():
89
+ try:
90
+ with open("chat_history.json", "r") as f:
91
+ history = json.load(f)
92
+ return history
93
+ except FileNotFoundError:
94
+ return []
95
+
96
+
97
+ # Gradio app interface
98
  def create_interface():
99
  """
100
+ Create the Gradio interface for the chatbot.
 
 
 
 
 
 
101
  """
102
+ with gr.Blocks() as demo:
103
+ gr.Markdown("## 🧠 NLPToolkit Agent\nAn advanced assistant for NLP tasks, powered by Hugging Face.")
104
+
105
+ with gr.Row():
106
+ # Task selection dropdown
107
+ task = gr.Dropdown(
108
+ choices=["Summarization", "Sentiment Analysis", "Text Classification", "Entity Recognition"],
109
+ value="Summarization",
110
+ label="Select NLP Task"
111
+ )
112
+
113
+ with gr.Row():
114
+ # User input and system message
115
+ user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")
116
+ system_message = gr.Textbox(value=default_system_message, label="System Message")
117
+
118
+ with gr.Row():
119
+ # Chat history and assistant response
120
+ chat_history = gr.State(value=[])
121
+ assistant_response = gr.Textbox(label="Assistant Response", interactive=False)
122
+
123
+ with gr.Row():
124
+ # Parameter sliders
125
+ max_tokens = gr.Slider(1, 2048, value=512, label="Max Tokens")
126
+ temperature = gr.Slider(0.1, 4.0, value=0.7, label="Temperature")
127
+ top_p = gr.Slider(0.1, 1.0, value=0.95, label="Top-p (Nucleus Sampling)")
128
+
129
+ with gr.Row():
130
+ # Buttons for save/load functionality
131
+ save_button = gr.Button("Save Chat History")
132
+ load_button = gr.Button("Load Chat History")
133
+
134
+ with gr.Row():
135
+ # Submit button
136
+ submit_button = gr.Button("Generate Response")
137
+
138
+ # Connect functionalities
139
+ submit_button.click(
140
+ fn=respond,
141
+ inputs=[task, user_input, chat_history, system_message, max_tokens, temperature, top_p],
142
+ outputs=assistant_response
143
+ )
144
+ save_button.click(fn=save_history, inputs=chat_history, outputs=None)
145
+ load_button.click(fn=load_history, inputs=None, outputs=chat_history)
146
+
147
+ gr.Markdown("### 🚀 Powered by Hugging Face and Gradio | Developed by Canstralian")
148
+
149
+ return demo
150
+
151
+
152
+ # Run the app
153
  if __name__ == "__main__":
154
  demo = create_interface()
155
  demo.launch()