import streamlit as st from transformers import AutoModelForQuestionAnswering, AutoTokenizer import torch # Load model and tokenizer @st.cache_resource def load_model(): model_path = 'C:\Users\neeli\Downloads\bert-tensorflow2-uncased-tf2-qa-v1' model = AutoModelForQuestionAnswering.from_pretrained(model_path) tokenizer = AutoTokenizer.from_pretrained(model_path) return model, tokenizer model, tokenizer = load_model() # Function to get answer from question and context def get_answer(question, context): inputs = tokenizer.encode_plus(question, context, return_tensors='pt') with torch.no_grad(): outputs = model(**inputs) answer_start = torch.argmax(outputs.start_logits) answer_end = torch.argmax(outputs.end_logits) + 1 answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(inputs['input_ids'][0][answer_start:answer_end])) return answer # Streamlit UI st.title("Question Answering Application") question = st.text_input("Enter your question:") context = st.text_area("Enter context text:", height=200) if st.button("Get Answer"): if question and context: answer = get_answer(question, context) st.write(f"**Answer:** {answer}") else: st.warning("Please enter both a question and context.")