from dotenv import load_dotenv import gradio as gr import os from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings from llama_index.llms.huggingface import HuggingFaceInferenceAPI from llama_index.embeddings.huggingface import HuggingFaceEmbedding from sentence_transformers import SentenceTransformer # Load environment variables load_dotenv() # Configure the Llama index settings Settings.llm = HuggingFaceInferenceAPI( model_name="meta-llama/Meta-Llama-3-8B-Instruct", tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct", context_window=3000, token=os.getenv("HF_TOKEN"), max_new_tokens=512, generate_kwargs={"temperature": 0.1}, ) Settings.embed_model = HuggingFaceEmbedding( model_name="BAAI/bge-small-en-v1.5" ) # Define the directory for persistent storage and data PERSIST_DIR = "db" PDF_DIRECTORY = 'data' # Changed to the directory containing PDFs # Ensure directories exist os.makedirs(PDF_DIRECTORY, exist_ok=True) os.makedirs(PERSIST_DIR, exist_ok=True) # Variable to store current chat conversation current_chat_history = [] def data_ingestion_from_directory(): # Use SimpleDirectoryReader on the directory containing the PDF files documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data() storage_context = StorageContext.from_defaults() index = VectorStoreIndex.from_documents(documents) index.storage_context.persist(persist_dir=PERSIST_DIR) def handle_query(message, history): # Prepare the chat history for context chat_history = [[msg["text"], ""] for msg in history] # Prepare the chat prompt template chat_text_qa_msgs = [ ( "user", f"You are now the RedFerns Tech chatbot. Your aim is to provide answers to the user based on the conversation flow only.\n\nQuestion:\n{message}" ) ] text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs) # Load index from storage storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR) index = load_index_from_storage(storage_context) # Use the Llama index to generate a response query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str="") answer = query_engine.query(message) if hasattr(answer, 'response'): response = answer.response elif isinstance(answer, dict) and 'response' in answer: response = answer['response'] else: response = "Sorry, I couldn't find an answer." # Update chat history with the current interaction chat_history.append([message, response]) return response # Example usage: Process PDF ingestion from directory print("Processing PDF ingestion from directory:", PDF_DIRECTORY) data_ingestion_from_directory() # Create the Gradio interface interface = gr.ChatInterface( fn=handle_query, examples=[{"text": "hello"}, {"text": "hola"}, {"text": "merhaba"}], title="RedfernsTech Q&A Chatbot", description="Ask me anything about the uploaded document." ) # Launch the Gradio interface interface.launch()