Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import pipeline | |
# Set up the title and description of the app | |
st.title("My LLM Model: Dementia Knowledge Assistant") | |
st.markdown(""" | |
This app uses a fine-tuned **Large Language Model (LLM)** to answer questions about dementia. | |
Simply input your query, and the model will provide contextually relevant answers! | |
""") | |
# Load the model | |
def load_qa_pipeline(): | |
model_name = "rohitashva/dementia--chatbot-llm-model" # Replace with your Hugging Face model repo name | |
qa_pipeline = pipeline("text-generation", model=model_name) | |
return qa_pipeline | |
qa_pipeline = load_qa_pipeline() | |
# Input field for user query | |
st.header("Ask a Question") | |
question = st.text_input("Enter your question about dementia (e.g., 'What are the symptoms of early-stage dementia?'):") | |
# Context input for retrieval | |
st.text_area( | |
"Provide additional context (optional):", | |
placeholder="Include any relevant context about dementia here to get better results.", | |
key="context" | |
) | |
if st.button("Get Answer"): | |
if not question: | |
st.error("Please enter a question!") | |
else: | |
# Call the QA pipeline | |
with st.spinner("Generating response..."): | |
result = qa_pipeline({ | |
"question": question, | |
"context": st.session_state.context | |
}) | |
answer = result.get("answer", "I don't know.") | |
confidence = result.get("score", 0.0) | |
# Display the result | |
st.subheader("Answer") | |
st.write(answer) | |
st.subheader("Confidence Score") | |
st.write(f"{confidence:.2f}") | |
# Footer | |
st.markdown("---") | |
st.markdown("Deployed on **Hugging Face Spaces** using [Streamlit](https://streamlit.io/).") |