Spaces:
Sleeping
Sleeping
| ### Import Section ### | |
| import chainlit as cl | |
| import os | |
| from dotenv import load_dotenv | |
| from langchain_openai import ChatOpenAI | |
| from langchain_core.runnables.config import RunnableConfig | |
| from utilities.all_utilities import process_file | |
| from utilities.prompts import get_opening_content | |
| ################ | |
| # General code | |
| ################ | |
| load_dotenv() | |
| openai_api_key = os.getenv("OPENAI_API_KEY") | |
| # ChatOpenAI Templates | |
| async def on_action(action): | |
| cl.user_session.set("language", "icelandic") | |
| await cl.Message(content=f"Changing to {action.name}").send() | |
| # Optionally remove the action button from the chatbot user interface | |
| # await action.remove() | |
| async def on_action(action): | |
| cl.user_session.set("language", "english") | |
| await cl.Message(content=f"Changing to {action.name}").send() | |
| # Optionally remove the action button from the chatbot user interface | |
| # await action.remove() | |
| ############################################# | |
| ### On Chat Start (Session Start) Section ### | |
| ############################################# | |
| async def on_chat_start(): | |
| actions = [ | |
| cl.Action(name="icelandic", value="icelandic", description="Switch to Icelandic"), | |
| cl.Action(name="english", value="english", description="Switch to English") | |
| ] | |
| await cl.Message(content="Languages", actions=actions).send() | |
| await cl.Message(content=get_opening_content()).send() | |
| prompt_cache_input = await cl.AskActionMessage( | |
| content="Do you want to use Prompt Cache?", | |
| actions=[ | |
| cl.Action(name="yes", value="yes", label="β Yes"), | |
| cl.Action(name="no", value="no", label="β No"), | |
| ], | |
| ).send() | |
| prompt_cache = prompt_cache_input.get("value") | |
| files = None | |
| # Wait for the user to upload a file | |
| while not files: | |
| files = await cl.AskFileMessage( | |
| content="Please upload a .pdf file to begin processing!", | |
| accept=["application/pdf"], | |
| max_size_mb=20, | |
| timeout=180, | |
| ).send() | |
| file = files[0] | |
| msg = cl.Message( | |
| content=f"Processing `{file.name}`...", disable_human_feedback=True | |
| ) | |
| await msg.send() | |
| response = process_file(file, prompt_cache) | |
| rag_chain = response["chain"] | |
| retriever = response["retriever"] | |
| msg.content = f"Processing `{file.name}` is complete." | |
| await msg.update() | |
| msg.content = f"You can now ask questions about `{file.name}`." | |
| await msg.update() | |
| cl.user_session.set("chain", rag_chain) | |
| cl.user_session.set("retriever", retriever) | |
| ########################## | |
| ### On Message Section ### | |
| ########################## | |
| async def main(message: cl.Message): | |
| # Ensure that message.content is not None or empty | |
| chain = cl.user_session.get("chain") | |
| language = cl.user_session.get("language", "english") | |
| msg = cl.Message(content="") | |
| question = message.content | |
| async for chunk in chain.astream( | |
| {"question": question, "language": language}, | |
| config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]), | |
| ): | |
| await msg.stream_token(chunk.content) | |
| await msg.send() | |