Dofla commited on
Commit
8f6c1bc
·
verified ·
1 Parent(s): 9cc44de

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -0
app.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForQuestionAnswering
2
+ import torch
3
+ import gradio as gr
4
+
5
+ # Charger le tokenizer depuis Hugging Face Spaces
6
+ tokenizer = AutoTokenizer.from_pretrained("Dofla/roberta_base")
7
+
8
+ # Charger le modèle depuis Hugging Face Spaces
9
+ model = AutoModelForQuestionAnswering.from_pretrained("Dofla/roberta_base")
10
+ def answer_question(context, question):
11
+ inputs = tokenizer.encode_plus(question, context, return_tensors="pt", padding=True, truncation=True)
12
+ start_logits, end_logits = model(**inputs)
13
+ outputs = model(**inputs)
14
+ start_logits = outputs.start_logits
15
+ end_logits = outputs.end_logits
16
+
17
+
18
+ # Assurez-vous que les logits sont des tenseurs
19
+ start_index = torch.argmax(start_logits, dim=1).item()
20
+ end_index = torch.argmax(end_logits, dim=1).item() + 1
21
+ answer = tokenizer.decode(inputs["input_ids"][0][start_index:end_index])
22
+ return answer
23
+ # Créer une interface Gradio pour l'inférence
24
+ iface = gr.Interface(
25
+ fn=answer_question,
26
+ inputs=[
27
+ gr.Textbox(lines=7, label="Contexte"),
28
+ gr.Textbox(lines=1, label="Question")
29
+ ],
30
+ outputs="text",
31
+ title="Question Answering with Fine-Tuned Model"
32
+ )
33
+
34
+ # Lancer l'interface
35
+ iface.launch('share=True')
36
+
37
+