Quyet commited on
Commit
342c3ab
·
1 Parent(s): 7b5d6ad

add gradio server and emotion detection

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +210 -0
  3. requirement.txt +3 -0
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: PsyPlus
3
- emoji: 🌖
4
  colorFrom: green
5
  colorTo: purple
6
  sdk: gradio
 
1
  ---
2
  title: PsyPlus
3
+ emoji: 🤖
4
  colorFrom: green
5
  colorTo: purple
6
  sdk: gradio
app.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re, time
2
+ import matplotlib.pyplot as plt
3
+ from threading import Timer
4
+ import gradio as gr
5
+
6
+ from transformers import (
7
+ GPT2LMHeadModel, GPT2Tokenizer,
8
+ AutoModelForSequenceClassification, AutoTokenizer,
9
+ pipeline
10
+ )
11
+ # reference: https://huggingface.co/spaces/bentrevett/emotion-prediction
12
+ # and https://huggingface.co/spaces/tareknaous/Empathetic-DialoGPT
13
+
14
+ def euc_100():
15
+ # 1,2,3. asks about the user's emotions and store data
16
+ print('How was your day?')
17
+ print('On the scale 1 to 10, how would you judge your emotion through the following categories:') # ~ Baymax :)
18
+ emotion_types = ['overall'] #, 'happiness', 'surprise', 'sadness', 'depression', 'anger', 'fear', 'anxiety']
19
+ emotion_degree = []
20
+ input_time = []
21
+
22
+ for e in emotion_types:
23
+ while True:
24
+ x = input(f'{e}: ')
25
+ if x.isnumeric() and (0 < int(x) < 11):
26
+ emotion_degree.append(int(x))
27
+ input_time.append(time.gmtime())
28
+ break
29
+ else:
30
+ print('invalid input, my friend :) plz input again')
31
+
32
+ # 4. if good mood
33
+ if emotion_degree[0] >= 6:
34
+ print('You seem to be in a good mood today. Is there anything you could notice that makes you happy?')
35
+ while True:
36
+ # timer = Timer(10, ValueError)
37
+ # timer.start()
38
+ x = input('Your answer: ')
39
+ if x == '': # need to change this part to waiting 10 seconds
40
+ print('Whether your good mood is over?')
41
+ print('Any other details that you would like to recall?')
42
+ y = input('Your answer (Yes or No): ')
43
+ if y == 'No':
44
+ break
45
+ else:
46
+ break
47
+ print('I am glad that you are willing to share the experience with me. Thanks for letting me know.')
48
+
49
+ # 5. bad mood
50
+ else:
51
+ questions = [
52
+ 'What specific thing is bothering you the most right now?',
53
+ 'Oh, I see. So when it is happening, what feelings or emotions have you got?',
54
+ 'And what do you think about those feelings or emotions at that time?',
55
+ 'Could you think of any evidence for your above-mentioned thought?',
56
+ ]
57
+ for q in questions:
58
+ print(q)
59
+ y = 'No' # bad mood
60
+ while True:
61
+ x = input('Your answer (example of answer here): ')
62
+ if x == '': # need to change this part to waiting 10 seconds
63
+ print('Whether your bad mood is over?')
64
+ y = input('Your answer (Yes or No): ')
65
+ if y == 'Yes':
66
+ break
67
+ else:
68
+ break
69
+ if y == 'Yes':
70
+ print('Nice to hear that.')
71
+ break
72
+
73
+ # reading interface here
74
+ print('Here are some reference articles about bad emotions. You can take a look :)')
75
+ pass
76
+
77
+
78
+ def load_neural_emotion_detector():
79
+ model_name = "joeddav/distilbert-base-uncased-go-emotions-student"
80
+ model = AutoModelForSequenceClassification.from_pretrained(model_name)
81
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
82
+ pipe = pipeline('text-classification', model=model, tokenizer=tokenizer,
83
+ return_all_scores=True, truncation=True)
84
+ return pipe
85
+
86
+ def sort_predictions(predictions):
87
+ return sorted(predictions, key=lambda x: x['score'], reverse=True)
88
+
89
+ def plot_emotion_distribution(predictions):
90
+ fig, ax = plt.subplots()
91
+ ax.bar(x=[i for i, _ in enumerate(prediction)],
92
+ height=[p['score'] for p in prediction],
93
+ tick_label=[p['label'] for p in prediction])
94
+ ax.tick_params(rotation=90)
95
+ ax.set_ylim(0, 1)
96
+ plt.show()
97
+
98
+ def rulebase(text):
99
+ keywords = {
100
+ 'life_safety': ["death", "suicide", "murder", "to perish together", "jump off the building"],
101
+ 'immediacy': ["now", "immediately", "tomorrow", "today"],
102
+ 'manifestation': ["never stop", "every moment", "strong", "very"]
103
+ }
104
+
105
+ # if found dangerous kw/topics
106
+ if re.search(rf"{'|'.join(keywords['life_safety'])}", text)!=None and \
107
+ sum([re.search(rf"{'|'.join(keywords[k])}", text)!=None for k in ['immediacy','manifestation']]) >= 1:
108
+ print('We noticed that you may need immediate professional assistance, would you like to make a phone call? '
109
+ 'The Hong Kong Lifeline number is (852) 2382 0000')
110
+ x = input('Choose 1. "Dial to the number" or 2. "No dangerous emotion la": ')
111
+ if x == '1':
112
+ print('Let you connect to the office')
113
+ else:
114
+ print('Sorry for our misdetection. We just want to make sure that you could get immediate help when needed. '
115
+ 'Would you mind if we send this conversation to the cloud to finetune the model.')
116
+ y = input('Yes or No: ')
117
+ if y == 'Yes':
118
+ pass # do smt here
119
+
120
+
121
+ def euc_200(text, testing=True):
122
+ # 2. using rule to judge user's emotion
123
+ rulebase(text)
124
+
125
+ # 3. using ML
126
+ if not testing:
127
+ pipe = load_neural_emotion_detector()
128
+ prediction = pipe(text)[0]
129
+ prediction = sort_predictions(prediction)
130
+ plot_emotion_distribution(prediction)
131
+
132
+ # get the most probable emotion. TODO: modify this part, may take sum of prob. over all negative emotion
133
+ threshold = 0.3
134
+ emotion = {'label': 'sadness', 'score': 0.4} if testing else prediction[0]
135
+ # then judge
136
+ if emotion['label'] in ['surprise', 'sadness', 'anger', 'fear'] and emotion['score'] > threshold:
137
+ print(f'It has come to our attention that you may suffer from {emotion["label"]}')
138
+ print('If you want to know more about yourself, '
139
+ 'some professional scales are provided to quantify your current status. '
140
+ 'After a period of time (maybe a week/two months/a month) trying to follow the solutions we suggested, '
141
+ 'you can fill out these scales again to see if you have improved.')
142
+ x = input('Fill in the form now (Okay or Later): ')
143
+ if x == 'Okay':
144
+ print('Display the form')
145
+ else:
146
+ print('Here are some reference articles about bad emotions. You can take a look :)')
147
+
148
+ # 4. If both of the above are not satisfied. What do u mean by 'satisfied' here?
149
+ questions = [
150
+ 'What specific thing is bothering you the most right now?',
151
+ 'Oh, I see. So when it is happening, what feelings or emotions have you got?',
152
+ 'And what do you think about those feelings or emotions at that time?',
153
+ 'Could you think of any evidence for your above-mentioned thought? #',
154
+ ]
155
+ for q in questions:
156
+ print(q)
157
+ y = 'No' # bad mood
158
+ while True:
159
+ x = input('Your answer (example of answer here): ')
160
+ if x == '': # need to change this part to waiting 10 seconds
161
+ print('Whether your bad mood is over?')
162
+ y = input('Your answer (Yes or No): ')
163
+ if y == 'Yes':
164
+ break
165
+ else:
166
+ break
167
+ if y == 'Yes':
168
+ print('Nice to hear that.')
169
+ break
170
+
171
+ # reading interface here
172
+ print('Here are some reference articles about bad emotions. You can take a look :)')
173
+ pass
174
+
175
+
176
+ tokenizer = GPT2Tokenizer.from_pretrained("tareknaous/dialogpt-empathetic-dialogues")
177
+ model = GPT2LMHeadModel.from_pretrained("tareknaous/dialogpt-empathetic-dialogues")
178
+ model.eval()
179
+
180
+ def chat(message, history):
181
+ history = history or []
182
+ eos = tokenizer.eos_token
183
+ input_str = eos.join([m, r for m, r in history])
184
+
185
+ bot_input_ids = tokenizer.encode(input_str, return_tensors='pt')
186
+ bot_output_ids = model.generate(bot_input_ids,
187
+ max_length=1000,
188
+ pad_token_id=tokenizer.eos_token_id)
189
+ message = tokenizer.decode(bot_output_ids[:, bot_input_ids.shape[-1]:][0],
190
+ skip_special_tokens=True)
191
+
192
+ history.append((message, response))
193
+ return history, history
194
+
195
+
196
+ if __name__ == '__main__':
197
+ # euc_100()
198
+ # euc_200('I am happy about my academic record.')
199
+ title = "PsyPlus Empathetic Chatbot"
200
+ description = "Gradio demo for product of PsyPlus. Based on rule-based CBT and conversational AI model DialoGPT"
201
+ iface = gr.Interface(
202
+ chat,
203
+ ["text", "state"],
204
+ ["chatbot", "state"],
205
+ allow_screenshot=False,
206
+ allow_flagging="never",
207
+ title=title,
208
+ description=description
209
+ )
210
+ iface.launch(debug=True)
requirement.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ transformers
3
+ matplotlib