Spaces:
Runtime error
Runtime error
Srinivasulu kethanaboina
commited on
Commit
•
ca16a7c
1
Parent(s):
bb0cb25
Update app.py
Browse files
app.py
CHANGED
@@ -1,18 +1,24 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
-
from http.cookies import SimpleCookie
|
4 |
-
from dotenv import load_dotenv
|
5 |
from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
|
6 |
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
|
7 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
8 |
import datetime
|
9 |
-
|
10 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
|
|
12 |
# Load environment variables
|
13 |
load_dotenv()
|
14 |
|
15 |
-
# Configure the Llama index settings
|
16 |
Settings.llm = HuggingFaceInferenceAPI(
|
17 |
model_name="meta-llama/Meta-Llama-3-8B-Instruct",
|
18 |
tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
|
@@ -27,33 +33,29 @@ Settings.embed_model = HuggingFaceEmbedding(
|
|
27 |
|
28 |
# Define the directory for persistent storage and data
|
29 |
PERSIST_DIR = "db"
|
30 |
-
PDF_DIRECTORY = 'data'
|
31 |
|
32 |
# Ensure directories exist
|
33 |
os.makedirs(PDF_DIRECTORY, exist_ok=True)
|
34 |
os.makedirs(PERSIST_DIR, exist_ok=True)
|
35 |
|
36 |
-
#
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
})
|
48 |
-
cookies['chat_history'] = str(history_list)
|
49 |
-
|
50 |
-
def handle_query(query, cookies=None):
|
51 |
chat_text_qa_msgs = [
|
52 |
(
|
53 |
"user",
|
54 |
"""
|
55 |
-
|
56 |
-
|
57 |
Question:
|
58 |
{query_str}
|
59 |
"""
|
@@ -67,12 +69,9 @@ def handle_query(query, cookies=None):
|
|
67 |
|
68 |
# Use chat history to enhance response
|
69 |
context_str = ""
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
for entry in reversed(history_list):
|
74 |
-
if entry["query"].strip():
|
75 |
-
context_str += f"User asked: '{entry['query']}'\nBot answered: '{entry['response']}'\n"
|
76 |
|
77 |
query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
|
78 |
answer = query_engine.query(query)
|
@@ -84,68 +83,45 @@ def handle_query(query, cookies=None):
|
|
84 |
else:
|
85 |
response = "Sorry, I couldn't find an answer."
|
86 |
|
87 |
-
# Update current chat history
|
88 |
-
|
89 |
-
save_chat_history_to_cookies(chat_id, query, response, cookies)
|
90 |
|
91 |
return response
|
92 |
|
93 |
-
#
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
)
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
Chat history:
|
108 |
-
{history_str}
|
109 |
-
"""
|
110 |
-
|
111 |
-
# Call the Gradio API for summarization
|
112 |
-
result = client.predict(
|
113 |
-
message=message,
|
114 |
-
system_prompt="Summarize the text and provide client interest in 30-40 words in bullet points.",
|
115 |
-
temperature=0.8,
|
116 |
-
max_new_tokens=1024,
|
117 |
-
top_p=1,
|
118 |
-
top_k=20,
|
119 |
-
penalty=1.2,
|
120 |
-
api_name="/chat"
|
121 |
-
)
|
122 |
-
|
123 |
-
# Print the result for debugging
|
124 |
-
print(f"Summary: {result}")
|
125 |
-
|
126 |
-
# Prepare the data to be sent via POST request
|
127 |
-
data = {
|
128 |
-
"summary": result,
|
129 |
-
"timestamp": str(datetime.datetime.now())
|
130 |
-
}
|
131 |
-
|
132 |
-
# Send the result to the URL
|
133 |
-
response = requests.post("https://redfernstech.com/api/receive_result", json=data)
|
134 |
-
|
135 |
-
# Debugging response status
|
136 |
-
print(f"POST request response: {response.status_code}, {response.text}")
|
137 |
-
|
138 |
-
return response.status_code, response.text
|
139 |
-
|
140 |
-
# Define your Gradio chat interface function
|
141 |
def chat_interface(message, history):
|
142 |
-
cookies = {} # You might need to get cookies from the request in a real implementation
|
143 |
try:
|
144 |
-
#
|
145 |
-
|
146 |
-
|
147 |
-
#
|
148 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
except Exception as e:
|
150 |
return str(e)
|
151 |
|
@@ -174,26 +150,25 @@ footer {
|
|
174 |
display: none !important;
|
175 |
background-color: #F8D7DA;
|
176 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
label.svelte-1b6s6s {display: none}
|
178 |
div.svelte-rk35yg {display: none;}
|
179 |
-
div.svelte-1rjryqp{display: none;}
|
180 |
div.progress-text.svelte-z7cif2.meta-text {display: none;}
|
181 |
'''
|
182 |
-
|
183 |
-
#
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
custom_button.click(fn=retrieve_history_and_redirect, inputs=[gr.State()])
|
194 |
-
|
195 |
-
# Add a JavaScript function to handle redirection after the Gradio event is processed
|
196 |
-
custom_button.click(fn=None, js="() => { window.open('https://redfernstech.com/chat-bot-test', '_blank'); }")
|
197 |
-
|
198 |
-
# Launch the Gradio interface
|
199 |
-
demo.launch()
|
|
|
1 |
+
from dotenv import load_dotenv
|
2 |
import gradio as gr
|
3 |
import os
|
|
|
|
|
4 |
from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
|
5 |
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
|
6 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
7 |
import datetime
|
8 |
+
import uuid
|
9 |
+
import random
|
10 |
+
import flask
|
11 |
+
from flask import request, redirect, make_response
|
12 |
+
|
13 |
+
def select_random_name():
|
14 |
+
names = ['Clara', 'Lily']
|
15 |
+
return random.choice(names)
|
16 |
|
17 |
+
# Example usage
|
18 |
# Load environment variables
|
19 |
load_dotenv()
|
20 |
|
21 |
+
# Configure the Llama index settings
|
22 |
Settings.llm = HuggingFaceInferenceAPI(
|
23 |
model_name="meta-llama/Meta-Llama-3-8B-Instruct",
|
24 |
tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
|
|
|
33 |
|
34 |
# Define the directory for persistent storage and data
|
35 |
PERSIST_DIR = "db"
|
36 |
+
PDF_DIRECTORY = 'data' # Changed to the directory containing PDFs
|
37 |
|
38 |
# Ensure directories exist
|
39 |
os.makedirs(PDF_DIRECTORY, exist_ok=True)
|
40 |
os.makedirs(PERSIST_DIR, exist_ok=True)
|
41 |
|
42 |
+
# Variable to store current chat conversation
|
43 |
+
current_chat_history = []
|
44 |
+
|
45 |
+
def data_ingestion_from_directory():
|
46 |
+
# Use SimpleDirectoryReader on the directory containing the PDF files
|
47 |
+
documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
|
48 |
+
storage_context = StorageContext.from_defaults()
|
49 |
+
index = VectorStoreIndex.from_documents(documents)
|
50 |
+
index.storage_context.persist(persist_dir=PERSIST_DIR)
|
51 |
+
|
52 |
+
def handle_query(query):
|
|
|
|
|
|
|
|
|
53 |
chat_text_qa_msgs = [
|
54 |
(
|
55 |
"user",
|
56 |
"""
|
57 |
+
You are the Clara Redfernstech chatbot. Your goal is to provide accurate, professional, and helpful answers to user queries based on the company's data. Always ensure your responses are clear and concise. give response within 10-15 words only
|
58 |
+
{context_str}
|
59 |
Question:
|
60 |
{query_str}
|
61 |
"""
|
|
|
69 |
|
70 |
# Use chat history to enhance response
|
71 |
context_str = ""
|
72 |
+
for past_query, response in reversed(current_chat_history):
|
73 |
+
if past_query.strip():
|
74 |
+
context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
|
|
|
|
|
|
|
75 |
|
76 |
query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
|
77 |
answer = query_engine.query(query)
|
|
|
83 |
else:
|
84 |
response = "Sorry, I couldn't find an answer."
|
85 |
|
86 |
+
# Update current chat history
|
87 |
+
current_chat_history.append((query, response))
|
|
|
88 |
|
89 |
return response
|
90 |
|
91 |
+
# Example usage: Process PDF ingestion from directory
|
92 |
+
print("Processing PDF ingestion from directory:", PDF_DIRECTORY)
|
93 |
+
data_ingestion_from_directory()
|
94 |
+
|
95 |
+
def predict(message, history):
|
96 |
+
logo_html = '''
|
97 |
+
<div class="circle-logo">
|
98 |
+
<img src="https://rb.gy/8r06eg" alt="FernAi">
|
99 |
+
</div>
|
100 |
+
'''
|
101 |
+
response = handle_query(message)
|
102 |
+
response_with_logo = f'<div class="response-with-logo">{logo_html}<div class="response-text">{response}</div></div>'
|
103 |
+
return response_with_logo
|
104 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
def chat_interface(message, history):
|
|
|
106 |
try:
|
107 |
+
# Generate a unique session ID for this chat session
|
108 |
+
session_id = str(uuid.uuid4())
|
109 |
+
|
110 |
+
# Process the user message and generate a response (your chatbot logic)
|
111 |
+
response = handle_query(message)
|
112 |
+
|
113 |
+
# Capture the message data
|
114 |
+
message_data = {
|
115 |
+
"sender": "user",
|
116 |
+
"message": message,
|
117 |
+
"response": response,
|
118 |
+
"timestamp": datetime.datetime.now().isoformat() # Use a library like datetime
|
119 |
+
}
|
120 |
+
|
121 |
+
# Store chat history in cookies (for demo purposes)
|
122 |
+
resp = make_response(response)
|
123 |
+
resp.set_cookie('chat_history', str(current_chat_history))
|
124 |
+
return resp
|
125 |
except Exception as e:
|
126 |
return str(e)
|
127 |
|
|
|
150 |
display: none !important;
|
151 |
background-color: #F8D7DA;
|
152 |
}
|
153 |
+
.svelte-1ed2p3z p {
|
154 |
+
font-size: 24px;
|
155 |
+
font-weight: bold;
|
156 |
+
line-height: 1.2;
|
157 |
+
color: #111;
|
158 |
+
margin: 20px 0;
|
159 |
+
}
|
160 |
label.svelte-1b6s6s {display: none}
|
161 |
div.svelte-rk35yg {display: none;}
|
|
|
162 |
div.progress-text.svelte-z7cif2.meta-text {display: none;}
|
163 |
'''
|
164 |
+
def redirect_page():
|
165 |
+
return redirect("https://example.com") # Replace with your target URL
|
166 |
+
|
167 |
+
gr.Interface(
|
168 |
+
fn=chat_interface,
|
169 |
+
inputs="text",
|
170 |
+
outputs="html",
|
171 |
+
live=True,
|
172 |
+
css=css,
|
173 |
+
description="<button onclick='window.location.href=\"https://example.com\"'>Redirect to another page</button>"
|
174 |
+
).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|