Spaces:
Runtime error
Runtime error
File size: 3,148 Bytes
4602937 fada25c 4615482 4602937 fada25c 4602937 2b44908 fada25c 2b44908 fada25c 2b44908 fada25c 3430157 fada25c 2b44908 fada25c 2b44908 fada25c b35537f e9360cc b35537f e9360cc fada25c 7dbdd26 fada25c e9360cc b35537f 7dbdd26 fada25c 2b44908 fada25c 2b44908 fada25c 2b44908 fada25c e9360cc 7dbdd26 fada25c 2b44908 7dbdd26 2b44908 fada25c 4602937 836f6a4 e9360cc b35537f 4602937 fada25c e9360cc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
from dotenv import load_dotenv
import gradio as gr
import os
from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from sentence_transformers import SentenceTransformer
# Load environment variables
load_dotenv()
# Configure the Llama index settings
Settings.llm = HuggingFaceInferenceAPI(
model_name="meta-llama/Meta-Llama-3-8B-Instruct",
tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
context_window=3000,
token=os.getenv("HF_TOKEN"),
max_new_tokens=512,
generate_kwargs={"temperature": 0.1},
)
Settings.embed_model = HuggingFaceEmbedding(
model_name="BAAI/bge-small-en-v1.5"
)
# Define the directory for persistent storage and data
PERSIST_DIR = "db"
PDF_DIRECTORY = 'data' # Changed to the directory containing PDFs
# Ensure directories exist
os.makedirs(PDF_DIRECTORY, exist_ok=True)
os.makedirs(PERSIST_DIR, exist_ok=True)
# Variable to store current chat conversation
current_chat_history = []
def data_ingestion_from_directory():
# Use SimpleDirectoryReader on the directory containing the PDF files
documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
storage_context = StorageContext.from_defaults()
index = VectorStoreIndex.from_documents(documents)
index.storage_context.persist(persist_dir=PERSIST_DIR)
def handle_query(message, history):
# Prepare the chat history for context
chat_history = [[msg["text"], ""] for msg in history]
# Prepare the chat prompt template
chat_text_qa_msgs = [
(
"user",
f"You are now the RedFerns Tech chatbot. Your aim is to provide answers to the user based on the conversation flow only.\n\nQuestion:\n{message}"
)
]
text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
# Load index from storage
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
index = load_index_from_storage(storage_context)
# Use the Llama index to generate a response
query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str="")
answer = query_engine.query(message)
if hasattr(answer, 'response'):
response = answer.response
elif isinstance(answer, dict) and 'response' in answer:
response = answer['response']
else:
response = "Sorry, I couldn't find an answer."
# Update chat history with the current interaction
chat_history.append([message, response])
return response
# Example usage: Process PDF ingestion from directory
print("Processing PDF ingestion from directory:", PDF_DIRECTORY)
data_ingestion_from_directory()
# Create the Gradio interface
interface = gr.ChatInterface(
fn=handle_query,
examples=[{"text": "hello"}, {"text": "hola"}, {"text": "merhaba"}],
title="RedfernsTech Q&A Chatbot",
description="Ask me anything about the uploaded document."
)
# Launch the Gradio interface
interface.launch()
|