|
|
|
import gradio as gr |
|
|
|
import torch |
|
from transformers import T5ForConditionalGeneration, PreTrainedTokenizerFast |
|
|
|
|
|
checkpoint_dir = "onlysainaa/cyrillic_to_script-t5-model" |
|
|
|
|
|
model = T5ForConditionalGeneration.from_pretrained(checkpoint_dir) |
|
model.eval() |
|
|
|
tokenizer = PreTrainedTokenizerFast.from_pretrained(checkpoint_dir) |
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
|
|
model.to(device) |
|
|
|
|
|
def translate_text(input_text): |
|
|
|
inputs = tokenizer(input_text, return_tensors="pt") |
|
|
|
|
|
inputs = {k: v.to(device) for k, v in inputs.items() if k in ['input_ids', 'attention_mask']} |
|
|
|
|
|
outputs = model.generate(**inputs) |
|
|
|
|
|
translated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
return translated_text |
|
|
|
|
|
gr_interface = gr.Interface( |
|
fn=translate_text, |
|
inputs="text", |
|
outputs="text", |
|
title="Mongolian Cyrillic to Mongolian Script Model", |
|
description="Enter text in Mongolian Cyrillic" |
|
) |
|
|
|
|
|
gr_interface.launch() |