Spaces:
Sleeping
Sleeping
Commit
·
31b8e6f
1
Parent(s):
7a1e3f0
fixing the none output issue
Browse files
app.py
CHANGED
@@ -36,33 +36,80 @@ QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"], templa
|
|
36 |
|
37 |
OPENAI_API_KEY = ''
|
38 |
|
39 |
-
def slow_echo(usr_message, chat_history):
|
40 |
-
|
41 |
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
bot_result = answer_chain({"query": usr_message})
|
67 |
bot_response = bot_result['result']
|
68 |
source_doc = [bot_result['source_documents'][i].metadata['title'] for i in range(len(bot_result))]
|
@@ -76,30 +123,26 @@ def slow_echo(usr_message, chat_history):
|
|
76 |
else:
|
77 |
source_print[source_doc[i]] = 'page: '+ source_page[i]
|
78 |
|
79 |
-
bot_response
|
80 |
for doc, page in source_print.items():
|
81 |
bot_response += '\n' + doc + ': ' + page
|
82 |
|
83 |
chat_history.append((usr_message, bot_response))
|
84 |
-
|
|
|
85 |
time.sleep(1)
|
|
|
86 |
yield "", chat_history
|
87 |
|
88 |
except openai.error.OpenAIError as e:
|
89 |
# Handle OpenAI-specific errors
|
90 |
-
error_message = f"
|
91 |
-
|
92 |
|
93 |
except Exception as e:
|
94 |
# Handle other unexpected errors
|
95 |
-
error_message = f"
|
96 |
-
|
97 |
-
|
98 |
-
def get_opeanai_key(openai_key):
|
99 |
-
global OPENAI_API_KEY
|
100 |
-
OPENAI_API_KEY=openai_key
|
101 |
-
|
102 |
-
return {chatbot_col: gr.Column(visible=True)}
|
103 |
|
104 |
|
105 |
with gr.Blocks() as demo:
|
|
|
36 |
|
37 |
OPENAI_API_KEY = ''
|
38 |
|
39 |
+
# def slow_echo(usr_message, chat_history):
|
40 |
+
# global OPENAI_API_KEY
|
41 |
|
42 |
+
# # Check if the API key is set
|
43 |
+
# if not OPENAI_API_KEY:
|
44 |
+
# error_message = "OpenAI API key not set. Please provide the key first."
|
45 |
+
# print(error_message)
|
46 |
+
# return error_message, chat_history
|
47 |
|
48 |
+
# try:
|
49 |
+
# chat_model = ChatOpenAI(temperature=0, model_name='gpt-3.5-turbo', openai_api_key=OPENAI_API_KEY)
|
50 |
+
|
51 |
+
# # customized memory
|
52 |
+
# memory = ConversationBufferMemory(
|
53 |
+
# return_messages=True,
|
54 |
+
# output_key='result'
|
55 |
+
# )
|
56 |
+
|
57 |
+
# answer_chain = RetrievalQA.from_chain_type(
|
58 |
+
# chat_model,
|
59 |
+
# retriever=VectorStore.as_retriever(search_type="similarity", k=6),
|
60 |
+
# memory = memory,
|
61 |
+
# chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
|
62 |
+
# return_source_documents=True
|
63 |
+
# )
|
64 |
|
65 |
+
# # Get a response from the OpenAI model
|
66 |
+
# bot_result = answer_chain({"query": usr_message})
|
67 |
+
# bot_response = bot_result['result']
|
68 |
+
# source_doc = [bot_result['source_documents'][i].metadata['title'] for i in range(len(bot_result))]
|
69 |
+
# source_page = [str(bot_result['source_documents'][i].metadata['page']+1) for i in range(len(bot_result))]
|
70 |
+
|
71 |
+
# # formated output
|
72 |
+
# source_print = {}
|
73 |
+
# for i in range(len(source_doc)):
|
74 |
+
# if source_doc[i] in source_print:
|
75 |
+
# source_print[source_doc[i]] = source_print[source_doc[i]] + ', ' + source_page[i]
|
76 |
+
# else:
|
77 |
+
# source_print[source_doc[i]] = 'page: '+ source_page[i]
|
78 |
+
|
79 |
+
# bot_response += '\n Source:'
|
80 |
+
# for doc, page in source_print.items():
|
81 |
+
# bot_response += '\n' + doc + ': ' + page
|
82 |
+
|
83 |
+
# chat_history.append((usr_message, bot_response))
|
84 |
|
85 |
+
# time.sleep(1)
|
86 |
+
# yield "", chat_history
|
87 |
+
|
88 |
+
# except openai.error.OpenAIError as e:
|
89 |
+
# # Handle OpenAI-specific errors
|
90 |
+
# error_message = f"An openAI API Error: {e}"
|
91 |
+
# return error_message, chat_history
|
92 |
+
|
93 |
+
# except Exception as e:
|
94 |
+
# # Handle other unexpected errors
|
95 |
+
# error_message = f"An unexpected error: {e}"
|
96 |
+
# return error_message, chat_history
|
97 |
+
|
98 |
+
# def get_opeanai_key(openai_key):
|
99 |
+
# global OPENAI_API_KEY
|
100 |
+
# OPENAI_API_KEY=openai_key
|
101 |
+
|
102 |
+
# return {chatbot_col: gr.Column(visible=True)}
|
103 |
+
|
104 |
+
|
105 |
+
def slow_echo(usr_message, chat_history):
|
106 |
+
global request_count
|
107 |
+
try:
|
108 |
+
# Check if request count has reached the limit
|
109 |
+
if request_count >= 10:
|
110 |
+
return "You have reached the maximum number of allowed requests. Please exit the chat."
|
111 |
+
|
112 |
+
# Attempt to get a response from the OpenAI model
|
113 |
bot_result = answer_chain({"query": usr_message})
|
114 |
bot_response = bot_result['result']
|
115 |
source_doc = [bot_result['source_documents'][i].metadata['title'] for i in range(len(bot_result))]
|
|
|
123 |
else:
|
124 |
source_print[source_doc[i]] = 'page: '+ source_page[i]
|
125 |
|
126 |
+
bot_response = bot_response + '\n Source:'
|
127 |
for doc, page in source_print.items():
|
128 |
bot_response += '\n' + doc + ': ' + page
|
129 |
|
130 |
chat_history.append((usr_message, bot_response))
|
131 |
+
|
132 |
+
|
133 |
time.sleep(1)
|
134 |
+
|
135 |
yield "", chat_history
|
136 |
|
137 |
except openai.error.OpenAIError as e:
|
138 |
# Handle OpenAI-specific errors
|
139 |
+
error_message = f"OpenAI API Error: {e}"
|
140 |
+
print(error_message)
|
141 |
|
142 |
except Exception as e:
|
143 |
# Handle other unexpected errors
|
144 |
+
error_message = f"Unexpected error: {e}"
|
145 |
+
print(error_message)
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
|
147 |
|
148 |
with gr.Blocks() as demo:
|