from dotenv import load_dotenv import gradio as gr import os from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings from llama_index.llms.huggingface import HuggingFaceInferenceAPI from llama_index.embeddings.huggingface import HuggingFaceEmbedding import firebase_admin from firebase_admin import db, credentials import datetime import uuid import random def select_random_name(): names = ['Clara', 'Lily'] return random.choice(names) # Load environment variables load_dotenv() # Authenticate to Firebase cred = credentials.Certificate("redfernstech-fd8fe-firebase-adminsdk-g9vcn-0537b4efd6.json") firebase_admin.initialize_app(cred, {"databaseURL": "https://redfernstech-fd8fe-default-rtdb.firebaseio.com/"}) # Configure Llama index settings Settings.llm = HuggingFaceInferenceAPI( model_name="facebook/rag-token-nq", tokenizer_name="facebook/rag-token-nq", context_window=3000, token=os.getenv("HF_TOKEN"), max_new_tokens=512, generate_kwargs={"temperature": 0.1}, ) Settings.embed_model = HuggingFaceEmbedding( model_name="BAAI/bge-small-en-v1.5" ) # Define directories for storage and data PERSIST_DIR = "db" PDF_DIRECTORY = 'data' # Directory containing PDFs # Ensure directories exist os.makedirs(PDF_DIRECTORY, exist_ok=True) os.makedirs(PERSIST_DIR, exist_ok=True) # Variable to store current chat conversation current_chat_history = [] def data_ingestion_from_directory(): # Use SimpleDirectoryReader on the directory containing PDF files documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data() storage_context = StorageContext.from_defaults() index = VectorStoreIndex.from_documents(documents) index.storage_context.persist(persist_dir=PERSIST_DIR) def handle_query(query): chat_text_qa_msgs = [ ( "user", """ You're Clara, working in customer care at RedfernsTech. Continue the conversation flow, giving responses within 10-15 words only. Convert all questions into company-related inquiries. Use the entire conversation context to craft responses, ensuring each answer relates to previous questions and answers. If you don't know the answer, say, 'You can directly contact us at +91 7972628566 or email us at contactus@redfernstech.com' {context_str} Question: {query_str} """ ) ] text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs) # Load index from storage storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR) index = load_index_from_storage(storage_context) # Use chat history to enhance response context_str = "" for past_query, response in reversed(current_chat_history): if past_query.strip(): context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n" query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str) answer = query_engine.query(query) if hasattr(answer, 'response'): response = answer.response elif isinstance(answer, dict) and 'response' in answer: response = answer['response'] else: response = "Sorry, I couldn't find an answer." # Update current chat history current_chat_history.append((query, response)) return response def predict(message, history): logo_html = '''