import gradio as gr from transformers import T5ForConditionalGeneration, T5Tokenizer from textwrap import fill # Load fine-tuned model and tokenizer last_checkpoint = "Jyotiyadav/FLANT-5_Model_Forecasting" finetuned_model = T5ForConditionalGeneration.from_pretrained(last_checkpoint) tokenizer = T5Tokenizer.from_pretrained(last_checkpoint) # Define inference function def answer_question(question): # Format input inputs = ["Please answer this question: " + question] inputs = tokenizer(inputs, return_tensors="pt") # Generate answer outputs = finetuned_model.generate(**inputs) answer = tokenizer.decode(outputs[0]) # Wrap answer for better display return fill(answer, width=80) # Create Gradio interface iface = gr.Interface( fn=answer_question, inputs="text", outputs="text", title="Question Answering with T5 Model", description="Enter your question to get the answer.", examples=[ ["On 2013-02-11, at store number 1 in Quito, Pichincha, under store type D and cluster 13, with 396 transactions recorded, and crude oil price at 97.01, what was the sales quantity of BABY CARE products (ID: 73063), considering whether they were on promotion (On Promotion: 0) in Ecuador during Carnaval (Transferred: False)?"] ] ) # Launch Gradio interface iface.launch()