paragon-analytics commited on
Commit
f1c8fb6
Β·
1 Parent(s): 98c11b8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -20
app.py CHANGED
@@ -14,20 +14,20 @@ from transformers_interpret import SequenceClassificationExplainer
14
 
15
  tokenizer = AutoTokenizer.from_pretrained("paragon-analytics/ADRv1")
16
  model = AutoModelForSequenceClassification.from_pretrained("paragon-analytics/ADRv1")
17
- modelc = AutoModelForSequenceClassification.from_pretrained("paragon-analytics/ADRv1").cuda
18
 
19
 
20
  cls_explainer = SequenceClassificationExplainer(
21
  model,
22
  tokenizer)
23
 
24
- # define a prediction function
25
- def f(x):
26
- tv = torch.tensor([tokenizer.encode(v, padding='max_length', max_length=500, truncation=True) for v in x]).cuda()
27
- outputs = modelc(tv)[0].detach().cpu().numpy()
28
- scores = (np.exp(outputs).T / np.exp(outputs).sum(-1)).T
29
- val = sp.special.logit(scores[:,1]) # use one vs rest logit units
30
- return val
31
 
32
  def adr_predict(x):
33
  encoded_input = tokenizer(x, return_tensors='pt')
@@ -43,6 +43,7 @@ def adr_predict(x):
43
  # shap_plot = shap.plots.text(shap_values)
44
 
45
  word_attributions = cls_explainer(str(x))
 
46
  letter = []
47
  score = []
48
  for i in word_attributions:
@@ -62,21 +63,23 @@ def adr_predict(x):
62
 
63
  word_attributions = [(letter[i], score[i]) for i in range(0, len(letter))]
64
 
65
- # SHAP:
66
- # build an explainer using a token masker
67
- explainer = shap.Explainer(f, tokenizer)
68
- shap_values = explainer(str(x), fixed_context=1)
69
- scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))
70
- # plot the first sentence's explanation
71
- # plt = shap.plots.text(shap_values[0],display=False)
72
 
73
 
74
- return {"Severe Reaction": float(scores.numpy()[1]), "Non-severe Reaction": float(scores.numpy()[0])}, word_attributions,scores
 
75
 
76
  def main(text):
77
  text = str(text).lower()
78
  obj = adr_predict(text)
79
- return obj[0],obj[1],obj[2]
 
80
 
81
  title = "Welcome to **ADR Detector** πŸͺ"
82
  description1 = """
@@ -102,16 +105,20 @@ with gr.Blocks(title=title) as demo:
102
  "--": "darkblue",
103
  "-": "blue", "NA":"white"})
104
 
105
- interpretation = gr.components.Interpretation(text)
106
 
107
 
108
  submit_btn.click(
109
  main,
110
  [text],
111
- [label,intp,interpretation], api_name="adr"
 
 
112
  )
113
 
114
  gr.Markdown("### Click on any of the examples below to see to what extent they contain resilience messaging:")
115
- gr.Examples([["I have minor pain."],["I have severe pain."]], [text], [label,intp,interpretation], main, cache_examples=True)
 
 
116
 
117
  demo.launch()
 
14
 
15
  tokenizer = AutoTokenizer.from_pretrained("paragon-analytics/ADRv1")
16
  model = AutoModelForSequenceClassification.from_pretrained("paragon-analytics/ADRv1")
17
+ # modelc = AutoModelForSequenceClassification.from_pretrained("paragon-analytics/ADRv1").cuda
18
 
19
 
20
  cls_explainer = SequenceClassificationExplainer(
21
  model,
22
  tokenizer)
23
 
24
+ # # define a prediction function
25
+ # def f(x):
26
+ # tv = torch.tensor([tokenizer.encode(v, padding='max_length', max_length=500, truncation=True) for v in x]).cuda()
27
+ # outputs = modelc(tv)[0].detach().cpu().numpy()
28
+ # scores = (np.exp(outputs).T / np.exp(outputs).sum(-1)).T
29
+ # val = sp.special.logit(scores[:,1]) # use one vs rest logit units
30
+ # return val
31
 
32
  def adr_predict(x):
33
  encoded_input = tokenizer(x, return_tensors='pt')
 
43
  # shap_plot = shap.plots.text(shap_values)
44
 
45
  word_attributions = cls_explainer(str(x))
46
+ # scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))
47
  letter = []
48
  score = []
49
  for i in word_attributions:
 
63
 
64
  word_attributions = [(letter[i], score[i]) for i in range(0, len(letter))]
65
 
66
+ # # SHAP:
67
+ # # build an explainer using a token masker
68
+ # explainer = shap.Explainer(f, tokenizer)
69
+ # shap_values = explainer(str(x), fixed_context=1)
70
+ # scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))
71
+ # # plot the first sentence's explanation
72
+ # # plt = shap.plots.text(shap_values[0],display=False)
73
 
74
 
75
+ return {"Severe Reaction": float(scores.numpy()[1]), "Non-severe Reaction": float(scores.numpy()[0])}, word_attributions
76
+ # ,scores
77
 
78
  def main(text):
79
  text = str(text).lower()
80
  obj = adr_predict(text)
81
+ return obj[0],obj[1]
82
+ # ,obj[2]
83
 
84
  title = "Welcome to **ADR Detector** πŸͺ"
85
  description1 = """
 
105
  "--": "darkblue",
106
  "-": "blue", "NA":"white"})
107
 
108
+ # interpretation = gr.components.Interpretation(text)
109
 
110
 
111
  submit_btn.click(
112
  main,
113
  [text],
114
+ [label,intp
115
+ # ,interpretation
116
+ ], api_name="adr"
117
  )
118
 
119
  gr.Markdown("### Click on any of the examples below to see to what extent they contain resilience messaging:")
120
+ gr.Examples([["I have minor pain."],["I have severe pain."]], [text], [label,intp
121
+ # ,interpretation
122
+ ], main, cache_examples=True)
123
 
124
  demo.launch()