deeksonparlma commited on
Commit
748c41a
·
1 Parent(s): 4e8eb95

default to previous build

Browse files
Files changed (1) hide show
  1. app.py +19 -188
app.py CHANGED
@@ -1,197 +1,28 @@
1
- # import gradio as gr
2
-
3
- # def greet(name):
4
- # return "Hello " + name + "!!"
5
-
6
- # iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- # iface.launch()
8
- # model = AutoModelForSequenceClassification.from_pretrained("tabibu-ai/mental-health-chatbot")
9
-
10
- # write a gradio interface for tabibu-ai/mental-health-chatbot in huggingfacehub
11
-
12
- # Path: app.py
13
-
14
-
15
- # -----------------------------------
16
- # import pickle
17
-
18
- # import numpy as np
19
- # import gradio as gr
20
-
21
- # # install transformers and torch in requirements.txt
22
-
23
- # from transformers import AutoTokenizer, AutoModelForSequenceClassification
24
- # from sklearn.feature_extraction.text import TfidfVectorizer
25
-
26
-
27
- # # tokenizer = AutoTokenizer.from_pretrained("tabibu-ai/mental-health-chatbot")
28
-
29
- # # tokenizer = AutoTokenizer.from_pretrained("rabiaqayyum/autotrain-mental-health-analysis-752423172")
30
- # model = pickle.load(open("model.pkl", "rb"))
31
- # vectorizer = pickle.load(open("vectorizer.pkl", "rb"))
32
-
33
-
34
- # def classify_text(inp):
35
- # # input_ids = tokenizer.encode(inp, return_tensors='pt')
36
- # # output = model.predict(input_ids)
37
- # # return output.logits.argmax().item()
38
- # # vectorizer = TfidfVectorizer()
39
- # # X = vectorizer.fit_transform(inp)
40
- # # reshape the input to 2D
41
- # # convert the input to a numpy array
42
- # # return model.predict( np.array(inp).reshape(1, -1) )
43
- # # dtype='numeric' is not compatible with arrays of bytes/strings.Convert your data to numeric values explicitly instead.
44
-
45
- # # Convert inp to numeric values explicitly instead
46
- # new_question_vector = vectorizer.transform([inp])
47
- # prediction = model.predict(new_question_vector)
48
- # # convert the prediction from a numpy array to a string
49
- # return str(prediction[0])
50
-
51
- # # # encode the input text
52
- # # encoded_input = tokenizer(text, return_tensors='pt')
53
- # # # get the prediction
54
- # # output = model(**encoded_input)
55
- # # # get the label
56
- # # label = output[0].argmax().item()
57
- # # # return the label
58
- # # return label
59
- # iface = gr.Interface(fn=classify_text, inputs="text", outputs="label",
60
- # interpretation="default", examples=[
61
- # ["I am feeling depressed"],
62
- # ["I am feeling anxious"],
63
- # ["I am feeling stressed"],
64
- # ["I am feeling sad"],
65
- # ])
66
- # iface.launch()
67
-
68
- # -----------------------------------
69
-
70
- import nltk
71
- nltk.download('punkt')
72
 
73
- import nltk
74
- from nltk.stem.lancaster import LancasterStemmer
75
  import numpy as np
76
- import tflearn
77
- import tensorflow
78
- import random
79
- import json
80
- import pandas as pd
81
- import pickle
82
  import gradio as gr
83
 
84
- stemmer = LancasterStemmer()
85
-
86
- with open("intents.json") as file:
87
- data = json.load(file)
88
-
89
- with open("data.pickle", "rb") as f:
90
- words, labels, training, output = pickle.load(f)
91
-
92
- net = tflearn.input_data(shape=[None, len(training[0])])
93
- net = tflearn.fully_connected(net, 8)
94
- net = tflearn.fully_connected(net, 8)
95
- net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
96
- net = tflearn.regression(net)
97
-
98
- model = tflearn.DNN(net)
99
- model.load("MentalHealthChatBotmodel.tflearn")
100
- # print('model loaded successfully')
101
-
102
-
103
- def bag_of_words(s, words):
104
- bag = [0 for _ in range(len(words))]
105
-
106
- s_words = nltk.word_tokenize(s)
107
- s_words = [stemmer.stem(word.lower()) for word in s_words]
108
-
109
- for se in s_words:
110
- for i, w in enumerate(words):
111
- if w == se:
112
- bag[i] = 1
113
-
114
- return np.array(bag)
115
-
116
-
117
- def chat(message, history):
118
- history = history or []
119
- message = message.lower()
120
- results = model.predict([bag_of_words(message, words)])
121
- results_index = np.argmax(results)
122
- tag = labels[results_index]
123
 
124
- for tg in data["intents"]:
125
- if tg['tag'] == tag:
126
- responses = tg['responses']
127
 
128
- # print(random.choice(responses))
129
- response = random.choice(responses)
130
-
131
- history.append((message, response))
132
- return history, history
133
 
134
- chatbot = gr.Chatbot(label="Chat")
135
- css = """
136
 
137
- .gr-button-primary {
138
- z-index: 14;
139
- height: 43px;
140
- width: 130px;
141
- left: 0px;
142
- top: 0px;
143
- padding: 0px;
144
- cursor: pointer !important;
145
 
146
- border: none !important;
147
- text-align: center !important;
148
- font-family: Poppins !important;
149
- font-size: 14px !important;
150
- font-weight: 500 !important;
151
- color: rgb(255, 255, 255) !important;
152
- line-height: 1 !important;
153
- border-radius: 12px !important;
154
- transition: box-shadow 200ms ease 0s, background 200ms ease 0s !important;
155
- box-shadow: none !important;
156
- }
157
- .gr-button-primary:hover{
158
- z-index: 14;
159
- height: 43px;
160
- width: 130px;
161
- left: 0px;
162
- top: 0px;
163
- padding: 0px;
164
- cursor: pointer !important;
165
- background: none rgb(37, 56, 133) !important;
166
- border: none !important;
167
- text-align: center !important;
168
- font-family: Poppins !important;
169
- font-size: 14px !important;
170
- font-weight: 500 !important;
171
- color: rgb(255, 255, 255) !important;
172
- line-height: 1 !important;
173
- border-radius: 12px !important;
174
- transition: box-shadow 200ms ease 0s, background 200ms ease 0s !important;
175
- box-shadow: rgb(0 0 0 / 23%) 0px 1px 7px 0px !important;
176
- }
177
- .hover\:bg-orange-50:hover {
178
- --tw-bg-opacity: 1 !important;
179
- background-color: rgb(229,225,255) !important;
180
- }
181
- div[data-testid="user"] {
182
- background-color: #253885 !important;
183
- }
184
- .h-\[40vh\]{
185
- height: 70vh !important;
186
- }
187
- """
188
- demo = gr.Interface(
189
- chat,
190
- [gr.Textbox(lines=1, label="Message"), "state"],
191
- [chatbot, "state"],
192
- allow_flagging="never",
193
- title="Mental Health Bot | Data Science Dojo",
194
- css=css
195
- )
196
- if __name__ == "__main__":
197
- demo.launch()
 
1
+ import pickle
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
 
 
3
  import numpy as np
 
 
 
 
 
 
4
  import gradio as gr
5
 
6
+ # install transformers and torch in requirements.txt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
9
+ from sklearn.feature_extraction.text import TfidfVectorizer
 
10
 
11
+ model = pickle.load(open("model.pkl", "rb"))
12
+ vectorizer = pickle.load(open("vectorizer.pkl", "rb"))
 
 
 
13
 
 
 
14
 
15
+ def classify_text(inp):
 
 
 
 
 
 
 
16
 
17
+ new_question_vector = vectorizer.transform([inp])
18
+ prediction = model.predict(new_question_vector)
19
+ return str(prediction[0])
20
+
21
+ iface = gr.Interface(fn=classify_text, inputs="text", outputs="label",title="Tabibu Bot",
22
+ interpretation="default", examples=[
23
+ ["I am feeling depressed"],
24
+ ["I am feeling anxious"],
25
+ ["I am feeling stressed"],
26
+ ["I am feeling sad"],
27
+ ])
28
+ iface.launch()