Spaces:
Sleeping
Sleeping
## Setup | |
# Import the necessary Libraries | |
import os | |
import uuid | |
import joblib | |
import json | |
import gradio as gr | |
import pandas as pd | |
from huggingface_hub import InferenceClient,CommitScheduler | |
from pathlib import Path | |
from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings | |
from langchain_community.vectorstores import Chroma | |
from openai import OpenAI | |
# Create Client | |
client = OpenAI( | |
base_url="https://api.endpoints.anyscale.com/v1", | |
api_key=os.environ['anyscale_api_key'] | |
) | |
#model_name = 'mlabonne/NeuralHermes-2.5-Mistral-7B' | |
model_name = 'mistralai/Mixtral-8x7B-Instruct-v0.1' | |
# Define the embedding model and the vectorstore | |
embedding_model_name = 'thenlper/gte-large' | |
embedding_model = SentenceTransformerEmbeddings(model_name=embedding_model_name) | |
collection_name_qna = 'report_10K_db' | |
persisted_vectordb_location = './report_10K_db' | |
# Load the persisted vectorDB | |
vectorstore_persisted = Chroma( | |
collection_name=collection_name_qna, | |
persist_directory=persisted_vectordb_location, | |
embedding_function=embedding_model | |
) | |
vectorstore_retriever = vectorstore_persisted.as_retriever( | |
search_type='similarity', | |
search_kwargs={'k': 5} | |
) | |
# Prepare the logging functionality | |
log_file = Path("logs/") / f"data_{uuid.uuid4()}.json" | |
log_folder = log_file.parent | |
scheduler = CommitScheduler( | |
repo_id="RavikantSingh_reports_qna_logs", | |
repo_type="dataset", | |
folder_path=log_folder, | |
path_in_repo="data", | |
every=2 | |
) | |
# Define the Q&A system message | |
qna_system_message = """ | |
You are an assistant to a financial technology services firm who does timely and accurate recommendations to its clients based on 10-K reports from various industry players | |
The firm has expertise in investment management and financial planning. | |
User input will have the context required by you to answer user questions. | |
This context will begin with the token: ###Context. | |
The context contains references to specific portions of a document relevant to the user query. | |
User questions will begin with the token: ###Question. | |
When crafting your response: | |
1. Select only context relevant to answer the question. | |
2. Include the source links in your response. Get the Page Nbr in the final response from Source. | |
3. User questions will begin with the token: ###Question. | |
4. If the question is irrelevant to 10-K respond with - "I am an assistant for 10-K reports. I can only help you with that". | |
Please adhere to the following guidelines: | |
- Start the answer under the section - Answer. | |
- Always quote the source when you use the context. Cite the relevant source at the end of your response under the section - Source: | |
- Your response should only be about the question asked and nothing else. | |
- Answer only using the context provided. | |
- If the answer is not found in the context, it is very very important for you to respond with "I don't know. Please check the docs @ '/content/dataset/'" | |
- Do not make up sources. Use the links provided in the sources section of the context and nothing else. You are prohibited from providing other links/sources. | |
Please answer only using the context provided in the input. Do not mention anything about the context in your final answer. | |
Here is an example of how to structure your response: | |
Answer: | |
[Answer] | |
Source: | |
[Source] | |
""" | |
# Define the user message template | |
qna_user_message_template = """ | |
###Context | |
Here are some documents and their source links that are relevant to the question mentioned below. | |
{context} | |
###Question | |
{question} | |
""" | |
# Define the predict function that runs when 'Submit' is clicked or when a API request is made | |
def predict(user_input,company): | |
filter_company = "/content/dataset/"+company+"-10-k-2023.pdf" | |
#relevant_document_chunks = vectorstore_persisted.similarity_search(user_input, k=5, filter={"source":filter_company}) | |
# Create context_for_query | |
user_input = user_input | |
relevant_document_chunks = vectorstore_retriever.get_relevant_documents(user_input,k=5,filter={"source":filter_company}) | |
context_list = [d.page_content for d in relevant_document_chunks] | |
context_for_query = ". ".join(context_list) | |
# Create messages | |
prompt = [ | |
{'role':'system', 'content': qna_system_message}, | |
{'role': 'user', 'content': qna_user_message_template.format( | |
context=context_for_query, | |
question=user_input | |
) | |
} | |
] | |
# Get response from the LLM | |
try: | |
response = client.chat.completions.create( | |
model=model_name, | |
messages=prompt, | |
temperature=0 | |
) | |
print("responseRavi",response) | |
prediction = response.choices[0].message.content.strip() | |
except Exception as e: | |
prediction = f'Sorry, I encountered the following error: \n {e}' | |
# While the prediction is made, log both the inputs and outputs to a local log file | |
# While writing to the log file, ensure that the commit scheduler is locked to avoid parallel | |
# access | |
with scheduler.lock: | |
with log_file.open("a") as f: | |
f.write(json.dumps( | |
{ | |
'user_input': user_input, | |
'retrieved_context': context_for_query, | |
'model_response': prediction | |
} | |
)) | |
f.write("\n") | |
return prediction | |
# Set-up the Gradio UI | |
# Add text box and radio button to the interface | |
# The radio button is used to select the company 10k report in which the context needs to be retrieved. | |
textbox = gr.Textbox(placeholder="Enter your Query.",lines=6) | |
company = gr.Radio(["Meta","aws","google","IBM","msft"], label="Companies Reports") | |
# Create the interface | |
# For the inputs parameter of Interface provide [textbox,company] | |
demo = gr.Interface( | |
fn=predict, | |
inputs=[textbox,company], | |
outputs="text", | |
title="Insights from 10-K reports", | |
description="AI for extraction, summarization, and analysis of information from the 10-K reports", | |
allow_flagging="auto", | |
concurrency_limit=12 | |
) | |
if __name__ == "__main__": | |
demo.queue() | |
demo.launch() |