paragon-analytics commited on
Commit
9cc7c4e
Β·
1 Parent(s): f1c8fb6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -33
app.py CHANGED
@@ -12,14 +12,29 @@ from transformers import TFAutoModelForSequenceClassification
12
  from transformers import AutoTokenizer
13
  from transformers_interpret import SequenceClassificationExplainer
14
 
 
 
15
  tokenizer = AutoTokenizer.from_pretrained("paragon-analytics/ADRv1")
16
- model = AutoModelForSequenceClassification.from_pretrained("paragon-analytics/ADRv1")
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  # modelc = AutoModelForSequenceClassification.from_pretrained("paragon-analytics/ADRv1").cuda
18
 
19
 
20
- cls_explainer = SequenceClassificationExplainer(
21
- model,
22
- tokenizer)
23
 
24
  # # define a prediction function
25
  # def f(x):
@@ -42,26 +57,26 @@ def adr_predict(x):
42
  # shap_values = explainer([x])
43
  # shap_plot = shap.plots.text(shap_values)
44
 
45
- word_attributions = cls_explainer(str(x))
46
- # scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))
47
- letter = []
48
- score = []
49
- for i in word_attributions:
50
- if i[1]>0.5:
51
- a = "++"
52
- elif (i[1]<=0.5) and (i[1]>0.1):
53
- a = "+"
54
- elif (i[1]>=-0.5) and (i[1]<-0.1):
55
- a = "-"
56
- elif i[1]<-0.5:
57
- a = "--"
58
- else:
59
- a = "NA"
60
 
61
- letter.append(i[0])
62
- score.append(a)
63
 
64
- word_attributions = [(letter[i], score[i]) for i in range(0, len(letter))]
65
 
66
  # # SHAP:
67
  # # build an explainer using a token masker
@@ -70,10 +85,11 @@ def adr_predict(x):
70
  # scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))
71
  # # plot the first sentence's explanation
72
  # # plt = shap.plots.text(shap_values[0],display=False)
 
73
 
74
 
75
- return {"Severe Reaction": float(scores.numpy()[1]), "Non-severe Reaction": float(scores.numpy()[0])}, word_attributions
76
- # ,scores
77
 
78
  def main(text):
79
  text = str(text).lower()
@@ -100,25 +116,27 @@ with gr.Blocks(title=title) as demo:
100
  # color_map={"+++": "royalblue","++": "cornflowerblue",
101
  # "+": "lightsteelblue", "NA":"white"})
102
  # NER = gr.HTML(label = 'NER:')
103
- intp = gr.HighlightedText(label="Word Scores",
104
- combine_adjacent=False).style(color_map={"++": "darkred","+": "red",
105
- "--": "darkblue",
106
- "-": "blue", "NA":"white"})
107
 
108
- # interpretation = gr.components.Interpretation(text)
109
 
110
 
111
  submit_btn.click(
112
  main,
113
  [text],
114
- [label,intp
115
- # ,interpretation
 
116
  ], api_name="adr"
117
  )
118
 
119
  gr.Markdown("### Click on any of the examples below to see to what extent they contain resilience messaging:")
120
- gr.Examples([["I have minor pain."],["I have severe pain."]], [text], [label,intp
121
- # ,interpretation
 
122
  ], main, cache_examples=True)
123
 
124
  demo.launch()
 
12
  from transformers import AutoTokenizer
13
  from transformers_interpret import SequenceClassificationExplainer
14
 
15
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
16
+
17
  tokenizer = AutoTokenizer.from_pretrained("paragon-analytics/ADRv1")
18
+ model = AutoModelForSequenceClassification.from_pretrained("paragon-analytics/ADRv1").to(device)
19
+
20
+ # build a pipeline object to do predictions
21
+ pred = transformers.pipeline("text-classification", model=model,
22
+ tokenizer=tokenizer, return_all_scores=True)
23
+
24
+
25
+ def interpretation_function(text):
26
+ explainer = shap.Explainer(pred)
27
+ shap_values = explainer([text])
28
+ scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))
29
+ return scores
30
+
31
+ # model = AutoModelForSequenceClassification.from_pretrained("paragon-analytics/ADRv1")
32
  # modelc = AutoModelForSequenceClassification.from_pretrained("paragon-analytics/ADRv1").cuda
33
 
34
 
35
+ # cls_explainer = SequenceClassificationExplainer(
36
+ # model,
37
+ # tokenizer)
38
 
39
  # # define a prediction function
40
  # def f(x):
 
57
  # shap_values = explainer([x])
58
  # shap_plot = shap.plots.text(shap_values)
59
 
60
+ # word_attributions = cls_explainer(str(x))
61
+ # # scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))
62
+ # letter = []
63
+ # score = []
64
+ # for i in word_attributions:
65
+ # if i[1]>0.5:
66
+ # a = "++"
67
+ # elif (i[1]<=0.5) and (i[1]>0.1):
68
+ # a = "+"
69
+ # elif (i[1]>=-0.5) and (i[1]<-0.1):
70
+ # a = "-"
71
+ # elif i[1]<-0.5:
72
+ # a = "--"
73
+ # else:
74
+ # a = "NA"
75
 
76
+ # letter.append(i[0])
77
+ # score.append(a)
78
 
79
+ # word_attributions = [(letter[i], score[i]) for i in range(0, len(letter))]
80
 
81
  # # SHAP:
82
  # # build an explainer using a token masker
 
85
  # scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))
86
  # # plot the first sentence's explanation
87
  # # plt = shap.plots.text(shap_values[0],display=False)
88
+ shap_scores = interpretation_function(str(x).lower())
89
 
90
 
91
+ return {"Severe Reaction": float(scores.numpy()[1]), "Non-severe Reaction": float(scores.numpy()[0])}, shap_scores
92
+ # , word_attributions ,scores
93
 
94
  def main(text):
95
  text = str(text).lower()
 
116
  # color_map={"+++": "royalblue","++": "cornflowerblue",
117
  # "+": "lightsteelblue", "NA":"white"})
118
  # NER = gr.HTML(label = 'NER:')
119
+ # intp = gr.HighlightedText(label="Word Scores",
120
+ # combine_adjacent=False).style(color_map={"++": "darkred","+": "red",
121
+ # "--": "darkblue",
122
+ # "-": "blue", "NA":"white"})
123
 
124
+ interpretation = gr.components.Interpretation(text)
125
 
126
 
127
  submit_btn.click(
128
  main,
129
  [text],
130
+ [label
131
+ # ,intp
132
+ ,interpretation
133
  ], api_name="adr"
134
  )
135
 
136
  gr.Markdown("### Click on any of the examples below to see to what extent they contain resilience messaging:")
137
+ gr.Examples([["I have minor pain."],["I have severe pain."]], [text], [label
138
+ # ,intp
139
+ ,interpretation
140
  ], main, cache_examples=True)
141
 
142
  demo.launch()