CAMELSDocBot / app.py
PabloVD's picture
cache examples to try to fix examples not appearing in hugging face spaces
95d3fc0
# AI assistant with a RAG system to query information from the CAMELS cosmological simulations using Langchain
# Author: Pablo Villanueva Domingo
import gradio as gr
from langchain import hub
from langchain_chroma import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_mistralai import ChatMistralAI
from langchain_community.document_loaders import WebBaseLoader
from langchain_core.rate_limiters import InMemoryRateLimiter
# Define a limiter to avoid rate limit issues with MistralAI
rate_limiter = InMemoryRateLimiter(
requests_per_second=0.1, # <-- MistralAI free. We can only make a request once every second
check_every_n_seconds=0.01, # Wake up every 100 ms to check whether allowed to make a request,
max_bucket_size=10, # Controls the maximum burst size.
)
# Get urls
urlsfile = open("urls.txt")
urls = urlsfile.readlines()
urls = [url.replace("\n","") for url in urls]
urlsfile.close()
# Load, chunk and index the contents of the blog.
loader = WebBaseLoader(urls)
docs = loader.load()
print("Pages loaded:",len(docs))
# Join content pages for processing
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
# Create a RAG chain
def RAG(llm, docs, embeddings):
# Split text
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
splits = text_splitter.split_documents(docs)
# Create vector store
vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings)
# Retrieve and generate using the relevant snippets of the documents
retriever = vectorstore.as_retriever()
# Prompt basis example for RAG systems
prompt = hub.pull("rlm/rag-prompt")
# Create the chain
rag_chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
return rag_chain
# LLM model
llm = ChatMistralAI(model="mistral-large-latest", rate_limiter=rate_limiter)
# Embeddings
embed_model = "sentence-transformers/multi-qa-distilbert-cos-v1"
# embed_model = "nvidia/NV-Embed-v2"
embeddings = HuggingFaceInstructEmbeddings(model_name=embed_model)
# RAG chain
rag_chain = RAG(llm, docs, embeddings)
# Function to handle prompt and query the RAG chain
def handle_prompt(message, history):
try:
# Stream output
out=""
for chunk in rag_chain.stream(message):
out += chunk
yield out
except:
raise gr.Error("Requests rate limit exceeded")
# Predefined messages and examples
description = "AI powered assistant which answers any question related to the [CAMELS simulations](https://www.camel-simulations.org/)."
greetingsmessage = "Hi, I'm the CAMELS DocBot, I'm here to assist you with any question related to the CAMELS simulations."
example_questions = [
"How can I read a halo file?",
"Which simulation suites are included in CAMELS?",
"Which are the largest volumes in CAMELS simulations?",
"Write a complete snippet of code getting the power spectrum of a simulation"
]
# Define customized Gradio chatbot
chatbot = gr.Chatbot([{"role":"assistant", "content":greetingsmessage}],
type="messages",
avatar_images=["ims/userpic.png","ims/camelslogo.jpg"],
height="60vh")
# Define Gradio interface
demo = gr.ChatInterface(handle_prompt,
type="messages",
title="CAMELS DocBot",
fill_height=True,
examples=example_questions,
theme=gr.themes.Soft(),
description=description,
cache_examples=False,
chatbot=chatbot)
demo.launch()