amaa / app.py
Adarsh1967's picture
Update app.py
6926a61 verified
import gradio as gr
from transformers import pipeline, MBartForConditionalGeneration, MBart50TokenizerFast
# Load ASR model
asr = pipeline("automatic-speech-recognition", model="Subu19/whisper-small-nepali")
# Load translation model
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
def translate_nepali_to_english(text):
tokenizer.src_lang = "ne_NP"
encoded = tokenizer(text, return_tensors="pt")
generated = model.generate(**encoded, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
return tokenizer.batch_decode(generated, skip_special_tokens=True)[0]
def translate_english_to_nepali(text):
tokenizer.src_lang = "en_XX"
encoded = tokenizer(text, return_tensors="pt")
generated = model.generate(**encoded, forced_bos_token_id=tokenizer.lang_code_to_id["ne_NP"])
return tokenizer.batch_decode(generated, skip_special_tokens=True)[0]
# Load summarizer
summarizer = pipeline("summarization")
def summarize_text(text):
word_count = len(text.split())
if word_count < 25:
return text
summary = summarizer(text, max_length=word_count, min_length=int(word_count * 0.4), do_sample=False)
return summary[0]['summary_text']
def pipeline_fn(audio):
result = asr(audio)["text"]
english = translate_nepali_to_english(result)
summary = summarize_text(english)
nepali_summary = translate_english_to_nepali(summary)
return result, english, summary, nepali_summary
gr.Interface(
fn=pipeline_fn,
inputs=gr.Audio(type="filepath", label="🎀 Speak Nepali"), # Corrected input argument
outputs=[
gr.Textbox(label="πŸ—£οΈ Transcribed Nepali"),
gr.Textbox(label="πŸ“˜ Translated English"),
gr.Textbox(label="πŸ“ English Summary"),
gr.Textbox(label="πŸ” Summarized Nepali"),
],
title="Nepali Voice Summarizer",
description="Speak Nepali β†’ Get English & Nepali Summary"
).launch()