Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -38,6 +38,15 @@ stage_analyzer_chain = LLMChain(
|
|
38 |
verbose=False,
|
39 |
output_key="stage_number")
|
40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
df = pd.read_json('./data/unified_wine_data.json', encoding='utf-8', lines=True)
|
42 |
|
43 |
loader =DataFrameLoader(data_frame=df, page_content_column='name')
|
@@ -167,7 +176,7 @@ conversation_stages_dict = {
|
|
167 |
"1": "Start: Start the conversation by introducing yourself. Be polite and respectful while maintaining a professional tone of conversation.",
|
168 |
"2": "Analyze: Identify the user's preferences in order to make wine recommendations. Ask questions to understand the preferences of your users in order to make wine recommendations. Ask only one question at a time. The wine database tool is not available here.",
|
169 |
"3": "Recommendation: Recommend the right wine based on the user's preferences identified. Recommendations must be limited to wines in wine database, and you can use tools to do this.",
|
170 |
-
"4": "After recommendation: After making a wine recommendation, it asks if the user likes the wine you recommended, and if they do, it provides a link to it. Otherwise, it takes you back to the recommendation stage.",
|
171 |
"5": "Close: When you're done, say goodbye to the user.",
|
172 |
"6": "Question and Answering: This is where you answer the user's questions. To answer user question, you can use the search tool or the wine database tool.",
|
173 |
"7": "Not in the given steps: This step is for when none of the steps between 1 and 6 apply.",
|
@@ -242,7 +251,7 @@ agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, ve
|
|
242 |
import gradio as gr
|
243 |
|
244 |
# user_response, stage_history, conversation_history, pre_conversation_history = "", "", """""", """"""
|
245 |
-
|
246 |
stage_description = ""
|
247 |
for key, value in conversation_stages_dict.items():
|
248 |
stage_description += f"{key}.{value}\n"
|
@@ -261,12 +270,17 @@ with gr.Blocks(css='#chatbot .overflow-y-auto{height:750px}') as demo:
|
|
261 |
|
262 |
chatbot = gr.Chatbot()
|
263 |
msg = gr.Textbox(label='User input')
|
|
|
|
|
264 |
stage_history = gr.Textbox(value="stage history: ", interactive=False, label='stage history')
|
265 |
submit_btn = gr.Button("์ ์ก")
|
266 |
-
clear_btn = gr.ClearButton([msg, chatbot, stage_history])
|
267 |
stage_info = gr.Textbox(value=stage_description, interactive=False, label='stage description')
|
268 |
|
269 |
-
def
|
|
|
|
|
|
|
270 |
chat_history = chat_history or []
|
271 |
stage_history = stage_history or ""
|
272 |
pre_conversation_history = ""
|
@@ -278,27 +292,21 @@ with gr.Blocks(css='#chatbot .overflow-y-auto{height:750px}') as demo:
|
|
278 |
stage_number = stage_number[-1]
|
279 |
stage_history += stage_number if stage_history == "stage history: " else ", " + stage_number
|
280 |
response = agent_executor.run({'input':user_response, 'conversation_history': pre_conversation_history, 'stage_number': stage_number})
|
281 |
-
conversation_history += "์ด์ฐ์ : " + response + "\n"
|
282 |
response = response.split('<END_OF_TURN>')[0]
|
283 |
-
print(stage_history)
|
284 |
-
print(conversation_history)
|
285 |
chat_history.append((user_response, response))
|
|
|
|
|
|
|
286 |
|
287 |
-
return "", chat_history, stage_history
|
288 |
-
|
289 |
-
def user(user_message, history):
|
290 |
-
return gr.update(value="", interactive=False), history + [[user_message, None]]
|
291 |
-
|
292 |
-
# def clear(*args):
|
293 |
-
# global conversation_history, pre_conversation_history, stage_history, answer_token
|
294 |
-
# answer_token = ''
|
295 |
-
# conversation_history, pre_conversation_history, stage_history = """""", """""", ""
|
296 |
-
def clear():
|
297 |
-
pass
|
298 |
|
299 |
-
|
|
|
300 |
|
301 |
-
|
302 |
-
|
|
|
|
|
303 |
|
304 |
demo.launch()
|
|
|
38 |
verbose=False,
|
39 |
output_key="stage_number")
|
40 |
|
41 |
+
user_response_prompt = load_prompt("./templates/user_response_prompt.json")
|
42 |
+
llm = ChatOpenAI(model='gpt-3.5-turbo', temperature=0.0)
|
43 |
+
user_response_chain = LLMChain(
|
44 |
+
llm=llm,
|
45 |
+
prompt=user_response_prompt,
|
46 |
+
verbose=True, # ๊ณผ์ ์ ์ถ๋ ฅํ ์ง
|
47 |
+
output_key="user_responses"
|
48 |
+
)
|
49 |
+
|
50 |
df = pd.read_json('./data/unified_wine_data.json', encoding='utf-8', lines=True)
|
51 |
|
52 |
loader =DataFrameLoader(data_frame=df, page_content_column='name')
|
|
|
176 |
"1": "Start: Start the conversation by introducing yourself. Be polite and respectful while maintaining a professional tone of conversation.",
|
177 |
"2": "Analyze: Identify the user's preferences in order to make wine recommendations. Ask questions to understand the preferences of your users in order to make wine recommendations. Ask only one question at a time. The wine database tool is not available here.",
|
178 |
"3": "Recommendation: Recommend the right wine based on the user's preferences identified. Recommendations must be limited to wines in wine database, and you can use tools to do this.",
|
179 |
+
"4": "After recommendation: After making a wine recommendation, it asks if the user likes the wine you recommended, and if they do, it provides a link and image to it. Otherwise, it takes you back to the recommendation stage.",
|
180 |
"5": "Close: When you're done, say goodbye to the user.",
|
181 |
"6": "Question and Answering: This is where you answer the user's questions. To answer user question, you can use the search tool or the wine database tool.",
|
182 |
"7": "Not in the given steps: This step is for when none of the steps between 1 and 6 apply.",
|
|
|
251 |
import gradio as gr
|
252 |
|
253 |
# user_response, stage_history, conversation_history, pre_conversation_history = "", "", """""", """"""
|
254 |
+
|
255 |
stage_description = ""
|
256 |
for key, value in conversation_stages_dict.items():
|
257 |
stage_description += f"{key}.{value}\n"
|
|
|
270 |
|
271 |
chatbot = gr.Chatbot()
|
272 |
msg = gr.Textbox(label='User input')
|
273 |
+
init_examples = [["์ด๋ฒ ์ฃผ์ ์น๊ตฌ๋ค๊ณผ ๋ชจ์์ด ์๋๋ฐ, ํ๋ฅญํ ์์ธ ํ ๋ณ์ ์ถ์ฒํด์ค๋?"], ["์
๋ฌธ์์๊ฒ ์ข์ ์์ธ์ ์ถ์ฒํด์ค๋?"], ["๋ณด๋ฅด๋์ ๋ถ๋ฅด๊ณ ๋ด ์์ธ์ ์ฐจ์ด์ ์ ๋ญ์ผ?"]]
|
274 |
+
user_response_examples = gr.Dataset(samples=init_examples, components=[msg])
|
275 |
stage_history = gr.Textbox(value="stage history: ", interactive=False, label='stage history')
|
276 |
submit_btn = gr.Button("์ ์ก")
|
277 |
+
clear_btn = gr.ClearButton([msg, chatbot, stage_history, ])
|
278 |
stage_info = gr.Textbox(value=stage_description, interactive=False, label='stage description')
|
279 |
|
280 |
+
def load_example(example):
|
281 |
+
return example[0]
|
282 |
+
|
283 |
+
def answer(user_response, chat_history, stage_history, user_response_examples):
|
284 |
chat_history = chat_history or []
|
285 |
stage_history = stage_history or ""
|
286 |
pre_conversation_history = ""
|
|
|
292 |
stage_number = stage_number[-1]
|
293 |
stage_history += stage_number if stage_history == "stage history: " else ", " + stage_number
|
294 |
response = agent_executor.run({'input':user_response, 'conversation_history': pre_conversation_history, 'stage_number': stage_number})
|
295 |
+
# conversation_history += "์ด์ฐ์ : " + response + "\n"
|
296 |
response = response.split('<END_OF_TURN>')[0]
|
|
|
|
|
297 |
chat_history.append((user_response, response))
|
298 |
+
user_response_examples = []
|
299 |
+
for user_response_example in user_response_chain.run({'conversation_history': conversation_history}).split('|'):
|
300 |
+
user_response_examples.append([user_response_example])
|
301 |
|
302 |
+
return "", chat_history, stage_history, gr.Dataset.update(samples=user_response_examples)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
303 |
|
304 |
+
def clear(user_response_examples):
|
305 |
+
return gr.Dataset.update(samples=init_examples)
|
306 |
|
307 |
+
clear_btn.click(fn=clear, inputs=[user_response_examples], outputs=[user_response_examples])
|
308 |
+
user_response_examples.click(load_example, inputs=[user_response_examples], outputs=[msg])
|
309 |
+
submit_btn.click(answer, [msg, chatbot, stage_history, user_response_examples], [msg, chatbot, stage_history, user_response_examples])
|
310 |
+
msg.submit(answer, [msg, chatbot, stage_history, user_response_examples], [msg, chatbot, stage_history, user_response_examples])
|
311 |
|
312 |
demo.launch()
|