BusinessDev commited on
Commit
59e0faa
·
1 Parent(s): 8879721

darinanina dina no

Browse files
Files changed (1) hide show
  1. app.py +32 -6
app.py CHANGED
@@ -1,12 +1,38 @@
 
1
  import gradio as gr
2
 
3
- def greet(name, intensity):
4
- return "Hello, " + name + "!" * int(intensity)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  demo = gr.Interface(
7
- fn=greet,
8
- inputs=["text", "slider"],
9
- outputs=["text"],
10
  )
11
 
12
- demo.launch()
 
 
1
+ from transformers import MBartForConditionalGeneration, MBart50Tokenizer
2
  import gradio as gr
3
 
4
+ # Load the model and tokenizer
5
+ model_name = "LocalDoc/mbart_large_qa_azerbaijan"
6
+ tokenizer = MBart50Tokenizer.from_pretrained(model_name, src_lang="en_XX", tgt_lang="az_AZ")
7
+ model = MBartForConditionalGeneration.from_pretrained(model_name)
8
+
9
+
10
+
11
+
12
+
13
+ def answer_question(text, question):
14
+ # Prepare input text
15
+ input_text = f"context: {text} question: {question}"
16
+ inputs = tokenizer(input_text, return_tensors="pt", max_length=1280000, truncation=False, padding="max_length")
17
+
18
+ # Generate answer
19
+ outputs = model.generate(
20
+ input_ids=inputs["input_ids"],
21
+ attention_mask=inputs["attention_mask"],
22
+ max_length=1280000,
23
+ num_beams=5,
24
+ early_stopping=True
25
+ )
26
+
27
+ # Decode the answer
28
+ answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
29
+ return answer
30
 
31
  demo = gr.Interface(
32
+ fn=answer_question,
33
+ inputs=["text", "text"],
34
+ outputs=["text"]
35
  )
36
 
37
+
38
+ demo.launch()