|
import streamlit as st |
|
import os |
|
|
|
from langchain import PromptTemplate, HuggingFaceHub, LLMChain |
|
|
|
st.title("Generating Response with HuggingFace Models") |
|
st.markdown("## Model: `facebook/blenderbot-1B-distill`") |
|
|
|
def get_response(question: str) -> dict: |
|
""" |
|
Generate a response to a given question using the Blenderbot Large Language Model. |
|
|
|
Args: |
|
question (str): The question to be answered. |
|
|
|
Returns: |
|
dict: A dictionary containing the response text and metadata. |
|
""" |
|
template = """Question: {question} |
|
|
|
Answer: Let's think step by step.""" |
|
|
|
prompt = PromptTemplate(template=template, input_variables=["question"]) |
|
|
|
llm_chain = LLMChain(prompt=prompt, |
|
llm=HuggingFaceHub(repo_id="facebook/blenderbot-1B-distill", |
|
model_kwargs={"temperature":0, |
|
"max_length":64})) |
|
|
|
response = llm_chain.invoke(question) |
|
|
|
return response |
|
|
|
question = st.text_area("Enter your question here...") |
|
|
|
if st.button("Get Response") and question: |
|
with st.spinner("Generating Response..."): |
|
answer = get_response(question) |
|
if answer is not None: |
|
st.success('Great! Response generated successfully') |
|
st.write(answer) |
|
st.write(answer["text"]) |