Flan-T5-Large / app.py
HafijulHoquenabid2's picture
Upload app.py
6f9c347 verified
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
# Load the model and tokenizer
model_name = "HafijulHoquenabid2/T5_flanlarge_phase_1" # Replace with your model if needed
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
# Define the function to generate answers
def generate_response(question, context):
# Combine question and context
input_text = f"question: {question} context: {context}"
# Tokenize the input with attention mask
inputs = tokenizer(
input_text,
max_length=512,
padding="max_length",
truncation=True,
return_tensors="pt"
)
# Generate response
outputs = model.generate(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
max_length=150,
num_beams=4,
early_stopping=True
)
# Decode and return the response
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
return answer
# Define Gradio interface
interface = gr.Interface(
fn=generate_response,
inputs=[
gr.Textbox(label="Question", placeholder="Enter your question here..."),
gr.Textbox(label="Context", placeholder="Provide the context for the question...")
],
outputs="text",
title="T5 Smart Door Lock QA",
description="Ask domain-specific questions related to smart door locks and receive precise answers."
)
# Launch the app
if __name__ == "__main__":
interface.launch()