Spaces:
Runtime error
Runtime error
File size: 4,901 Bytes
ca16a7c fada25c 4615482 4602937 cb8f565 592600b 6cd1447 ca16a7c dd1c2fe ca16a7c 2b44908 fada25c c1c397a ca16a7c fada25c c545b48 fada25c 3430157 fada25c ca16a7c fada25c 2b44908 fada25c ca16a7c fada25c 6dd9499 ca16a7c 6dd9499 fada25c 6dd9499 ca16a7c 6dd9499 fada25c 2b44908 fada25c 2b44908 fada25c 2b44908 fada25c ca16a7c fada25c 2b44908 ca16a7c 23b9040 162343b f40383a ca16a7c f40383a 162343b 7adc402 0a5200d 7adc402 7f3fc7b 455007f ca16a7c 6570683 7b0ee51 0a5200d f40383a ca16a7c 7dbb2e1 ca16a7c f40383a ca16a7c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 |
from dotenv import load_dotenv
import gradio as gr
import os
from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
import datetime
import uuid
import random
def select_random_name():
names = ['Clara', 'Lily']
return random.choice(names)
# Example usage
# Load environment variables
load_dotenv()
# Configure the Llama index settings
Settings.llm = HuggingFaceInferenceAPI(
model_name="meta-llama/Meta-Llama-3-8B-Instruct",
tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
context_window=3000,
token=os.getenv("HF_TOKEN"),
max_new_tokens=512,
generate_kwargs={"temperature": 0.1},
)
Settings.embed_model = HuggingFaceEmbedding(
model_name="BAAI/bge-small-en-v1.5"
)
# Define the directory for persistent storage and data
PERSIST_DIR = "db"
PDF_DIRECTORY = 'data' # Changed to the directory containing PDFs
# Ensure directories exist
os.makedirs(PDF_DIRECTORY, exist_ok=True)
os.makedirs(PERSIST_DIR, exist_ok=True)
# Variable to store current chat conversation
current_chat_history = []
def data_ingestion_from_directory():
# Use SimpleDirectoryReader on the directory containing the PDF files
documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
storage_context = StorageContext.from_defaults()
index = VectorStoreIndex.from_documents(documents)
index.storage_context.persist(persist_dir=PERSIST_DIR)
def handle_query(query):
chat_text_qa_msgs = [
(
"user",
"""
You are the Clara Redfernstech chatbot. Your goal is to provide accurate, professional, and helpful answers to user queries based on the company's data. Always ensure your responses are clear and concise. give response within 10-15 words only
{context_str}
Question:
{query_str}
"""
)
]
text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
# Load index from storage
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
index = load_index_from_storage(storage_context)
# Use chat history to enhance response
context_str = ""
for past_query, response in reversed(current_chat_history):
if past_query.strip():
context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
answer = query_engine.query(query)
if hasattr(answer, 'response'):
response = answer.response
elif isinstance(answer, dict) and 'response' in answer:
response = answer['response']
else:
response = "Sorry, I couldn't find an answer."
# Update current chat history
current_chat_history.append((query, response))
return response
# Example usage: Process PDF ingestion from directory
print("Processing PDF ingestion from directory:", PDF_DIRECTORY)
data_ingestion_from_directory()
def predict(message, history):
logo_html = '''
<div class="circle-logo">
<img src="https://rb.gy/8r06eg" alt="FernAi">
</div>
'''
response = handle_query(message)
response_with_logo = f'<div class="response-with-logo">{logo_html}<div class="response-text">{response}</div></div>'
return response_with_logo
def chat_interface(message, history):
try:
# Process the user message and generate a response
response = handle_query(message)
# Update chat history
current_chat_history.append((message, response))
return response
except Exception as e:
return str(e)
# Custom CSS for styling
css = '''
.circle-logo {
display: inline-block;
width: 40px;
height: 40px;
border-radius: 50%;
overflow: hidden;
margin-right: 10px;
vertical-align: middle;
}
.circle-logo img {
width: 100%;
height: 100%;
object-fit: cover;
}
.response-with-logo {
display: flex;
align-items: center;
margin-bottom: 10px;
}
footer {
display: none !important;
background-color: #F8D7DA;
}
.svelte-1ed2p3z p {
font-size: 24px;
font-weight: bold;
line-height: 1.2;
color: #111;
margin: 20px 0;
}
label.svelte-1b6s6s {display: none}
div.svelte-rk35yg {display: none;}
div.progress-text.svelte-z7cif2.meta-text {display: none;}
'''
# Define JavaScript for redirection
js = '''
<script>
function redirectToPage() {
window.location.href = "https://example.com"; // Replace with your target URL
}
</script>
<button onclick="redirectToPage()">Redirect to another page</button>
'''
gr.ChatInterface(
fn=chat_interface,
inputs="text",
outputs="html",
live=True,
css=css,
description=js
).launch()
|