import streamlit as st from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline from streamlit_extras.let_it_rain import rain from data/context_examples.py import context, questions rain( emoji="❔", font_size=54, falling_speed=5, animation_length="infinite", ) model_name = "deepset/roberta-base-squad2" model = AutoModelForQuestionAnswering.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) def get_answer(context, question): nlp = pipeline('question-answering', model=model, tokenizer=tokenizer) QA_input = {'question': question, 'context': context} res = nlp(QA_input) answer = res['answer'] return answer def main(): st.title("Question Answering App :robot_face:") st.divider() st.markdown("### **Enter the context and question, then click on ':blue[Get Answer]' to retrieve the answer:**") selected_index = st.selectbox("Select an index:", range(len(contexts))) context = st.text_area("**:blue[Context]**", contexts[selected_index]) question = st.text_input("**:blue[Question]**", questions[selected_index]) if st.button(":blue[**Get Answer**]"): if context.strip() == "" or question.strip() == "": st.warning("Please enter the context and question.") else: answer = get_answer(context, question) st.success(f"Answer: {answer}") if __name__ == "__main__": main()