Francesco-A commited on
Commit
be5c1d8
·
1 Parent(s): de2bc93

Fix full_context_input radio input

Files changed (1) hide show
  1. app.py +10 -6
app.py CHANGED
@@ -1,11 +1,12 @@
1
  # AUTOGENERATED! DO NOT EDIT! File to edit: ../drive/MyDrive/Codici/Python/Apps/Gradio_App/SemanticSearch_QA-v2.1.ipynb.
2
 
3
-
4
  __all__ = ['model_name', 'qa_model', 'contexts', 'question', 'df_results', 'question_1', 'question_2', 'question_3', 'question_4',
5
  'question_5', 'question_6', 'question_7', 'question_8', 'question_9', 'question_10', 'title', 'description',
6
  'data', 'context_df', 'question_input', 'contexts_input', 'n_answers_input', 'full_context_input',
7
  'confidence_threshold_input', 'intf', 'get_answers']
8
 
 
9
  import pandas as pd
10
  import gradio as gr
11
  import transformers
@@ -18,6 +19,7 @@ qa_model = pipeline(task = 'question-answering',
18
  model = model_name,
19
  tokenizer = model_name)
20
 
 
21
  def get_answers(question, contexts, n_answers=1, full_context=True, confidence_threshold = 0.5):
22
  results = []
23
 
@@ -41,7 +43,7 @@ def get_answers(question, contexts, n_answers=1, full_context=True, confidence_t
41
  results_dict['Full Context'] = context
42
 
43
  results.append(results_dict)
44
-
45
  df = pd.DataFrame(results)
46
  df = df[df['Score'] >= confidence_threshold]
47
  df = df.sort_values(by='Score', ascending=False).head(n_answers)
@@ -61,11 +63,12 @@ question = "Why is model conversion important?"
61
  df_results = get_answers(question,contexts,n_answers=2,full_context=False, confidence_threshold = 0.25)
62
  df_results
63
 
 
64
  # Define example question(s)
65
  question_1 = "What are the main features of the new XPhone 20?"
66
  question_2 = "What are some benefits of regular exercise?"
67
  question_3 = "What is the color of a rose?"
68
- question_4 = "How does photosynthesis work in plants?"
69
  question_5 = "At what temperature does water boil?"
70
  question_6 = "Where can I find potassium?"
71
  question_7 = "How does the internet function?"
@@ -117,6 +120,7 @@ contexts = [
117
  "The Declaration of Independence was adopted by the Continental Congress on July 4, 1776.",
118
  ]
119
 
 
120
  title = 'SemanticSearch_QA-v2'
121
  description = """
122
  QA model: [deepset/roberta-base-squad2](https://huggingface.co/deepset/roberta-base-squad2)
@@ -135,10 +139,10 @@ full_context_input = gr.Checkbox(label="Include Full Context", value=True)
135
  confidence_threshold_input = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.5, label="Confidence Threshold")
136
 
137
  intf = gr.Interface(fn=get_answers,
138
- inputs= [question_input, contexts_input, n_answers_input,confidence_threshold_input],
139
  outputs= gr.components.Dataframe(),
140
- examples = [[question_1,context_df,3,False,0.3],
141
- [question_2,context_df,5,True,0.5],
142
  [question_4,context_df,10,False,0.1]],
143
 
144
  title=title,
 
1
  # AUTOGENERATED! DO NOT EDIT! File to edit: ../drive/MyDrive/Codici/Python/Apps/Gradio_App/SemanticSearch_QA-v2.1.ipynb.
2
 
3
+ # %% auto 0
4
  __all__ = ['model_name', 'qa_model', 'contexts', 'question', 'df_results', 'question_1', 'question_2', 'question_3', 'question_4',
5
  'question_5', 'question_6', 'question_7', 'question_8', 'question_9', 'question_10', 'title', 'description',
6
  'data', 'context_df', 'question_input', 'contexts_input', 'n_answers_input', 'full_context_input',
7
  'confidence_threshold_input', 'intf', 'get_answers']
8
 
9
+ # %% ../drive/MyDrive/Codici/Python/Apps/Gradio_App/SemanticSearch_QA-v2.1.ipynb 3
10
  import pandas as pd
11
  import gradio as gr
12
  import transformers
 
19
  model = model_name,
20
  tokenizer = model_name)
21
 
22
+ # %% ../drive/MyDrive/Codici/Python/Apps/Gradio_App/SemanticSearch_QA-v2.1.ipynb 6
23
  def get_answers(question, contexts, n_answers=1, full_context=True, confidence_threshold = 0.5):
24
  results = []
25
 
 
43
  results_dict['Full Context'] = context
44
 
45
  results.append(results_dict)
46
+
47
  df = pd.DataFrame(results)
48
  df = df[df['Score'] >= confidence_threshold]
49
  df = df.sort_values(by='Score', ascending=False).head(n_answers)
 
63
  df_results = get_answers(question,contexts,n_answers=2,full_context=False, confidence_threshold = 0.25)
64
  df_results
65
 
66
+ # %% ../drive/MyDrive/Codici/Python/Apps/Gradio_App/SemanticSearch_QA-v2.1.ipynb 7
67
  # Define example question(s)
68
  question_1 = "What are the main features of the new XPhone 20?"
69
  question_2 = "What are some benefits of regular exercise?"
70
  question_3 = "What is the color of a rose?"
71
+ question_4 = "What's photosynthesis?"
72
  question_5 = "At what temperature does water boil?"
73
  question_6 = "Where can I find potassium?"
74
  question_7 = "How does the internet function?"
 
120
  "The Declaration of Independence was adopted by the Continental Congress on July 4, 1776.",
121
  ]
122
 
123
+ # %% ../drive/MyDrive/Codici/Python/Apps/Gradio_App/SemanticSearch_QA-v2.1.ipynb 10
124
  title = 'SemanticSearch_QA-v2'
125
  description = """
126
  QA model: [deepset/roberta-base-squad2](https://huggingface.co/deepset/roberta-base-squad2)
 
139
  confidence_threshold_input = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.5, label="Confidence Threshold")
140
 
141
  intf = gr.Interface(fn=get_answers,
142
+ inputs= [question_input, contexts_input, n_answers_input,full_context_input,confidence_threshold_input],
143
  outputs= gr.components.Dataframe(),
144
+ examples = [[question_1,context_df,3,False,0.1],
145
+ [question_2,context_df,5,True,0.1],
146
  [question_4,context_df,10,False,0.1]],
147
 
148
  title=title,