mickkhaw commited on
Commit
bb8f798
·
1 Parent(s): 5180403

Merge changes from main

Browse files
Files changed (4) hide show
  1. app.py +0 -51
  2. chainlit.md +11 -1
  3. utils/graph.py +23 -9
  4. utils/graph_chains.py +5 -3
app.py CHANGED
@@ -7,7 +7,6 @@ from chainlit.element import ElementBased
7
  from dotenv import load_dotenv
8
 
9
  # modules for audio processing
10
- import httpx
11
  from langchain.schema.runnable.config import RunnableConfig
12
  from langchain_openai.chat_models import ChatOpenAI
13
  from openai import AsyncOpenAI
@@ -19,11 +18,6 @@ client = AsyncOpenAI()
19
  # ---- ENV VARIABLES ---- #
20
  load_dotenv()
21
  OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
22
- #QDRANT_CLOUD_KEY = os.environ.get("QDRANT_CLOUD_KEY")
23
- #QDRANT_CLOUD_URL = "https://30591e3d-7092-41c4-95e1-4d3c7ef6e894.us-east4-0.gcp.cloud.qdrant.io"
24
- #ELEVENLABS_API_KEY = os.environ.get("ELEVENLABS_API_KEY")
25
- #ELEVENLABS_VOICE_ID = os.environ.get("ELEVENLABS_VOICE_ID")
26
-
27
 
28
  # -- AUGMENTED -- #
29
 
@@ -112,35 +106,6 @@ async def generate_text_answer(transcription):
112
  return msg.content
113
 
114
 
115
- # Text-to-Speech Function: Take the text answer generated and convert it to an audio file
116
- # @cl.step(type="tool")
117
- # async def text_to_speech(text: str, mime_type: str):
118
- # CHUNK_SIZE = 2048 # try 4096 or 8192 if getting read timeout error. the bigger the chunk size, the fewer API calls but longer wait time
119
- # url = f"https://api.elevenlabs.io/v1/text-to-speech/{ELEVENLABS_VOICE_ID}"
120
- # headers = {"Accept": mime_type, "Content-Type": "application/json", "xi-api-key": ELEVENLABS_API_KEY}
121
- # data = {
122
- # "text": text,
123
- # "model_id": "eleven_monolingual_v1",
124
- # "voice_settings": {"stability": 0.5, "similarity_boost": 0.5},
125
- # }
126
-
127
- # # make an async HTTP POST request to the ElevenLabs API to convert text to speech and return an audio file
128
- # async with httpx.AsyncClient(timeout=60.0) as client:
129
- # response = await client.post(url, json=data, headers=headers)
130
- # response.raise_for_status() # Ensure we notice bad responses
131
- # buffer = BytesIO()
132
- # buffer.name = f"output_audio.{mime_type.split('/')[1]}"
133
- # async for chunk in response.aiter_bytes(chunk_size=CHUNK_SIZE):
134
- # if chunk:
135
- # buffer.write(chunk)
136
-
137
- # buffer.seek(0)
138
- # return buffer.name, buffer.read()
139
-
140
-
141
- # ---- AUDIO PROCESSING ---- #
142
-
143
-
144
  # Audio Chunk Function: Process audio chunks as they arrive from the user's microphone
145
  @cl.on_audio_chunk
146
  async def on_audio_chunk(chunk: cl.AudioChunk):
@@ -182,19 +147,3 @@ async def on_audio_end(elements: list[ElementBased]):
182
  pprint(f"================== Node: '{key}':")
183
 
184
  await msg.send()
185
-
186
- # text_answer = await generate_text_answer(
187
- # transcription
188
- # ) # need to change this to generate answer based on base_rag_chain
189
-
190
- # output_name, output_audio = await text_to_speech(text_answer, audio_mime_type)
191
-
192
- # output_audio_el = cl.Audio(
193
- # name=output_name,
194
- # auto_play=True,
195
- # mime=audio_mime_type,
196
- # content=output_audio,
197
- # )
198
- # answer_message = await cl.Message(content="").send()
199
- # answer_message.elements = [output_audio_el]
200
- # await answer_message.update()
 
7
  from dotenv import load_dotenv
8
 
9
  # modules for audio processing
 
10
  from langchain.schema.runnable.config import RunnableConfig
11
  from langchain_openai.chat_models import ChatOpenAI
12
  from openai import AsyncOpenAI
 
18
  # ---- ENV VARIABLES ---- #
19
  load_dotenv()
20
  OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
 
 
 
 
 
21
 
22
  # -- AUGMENTED -- #
23
 
 
106
  return msg.content
107
 
108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  # Audio Chunk Function: Process audio chunks as they arrive from the user's microphone
110
  @cl.on_audio_chunk
111
  async def on_audio_chunk(chunk: cl.AudioChunk):
 
147
  pprint(f"================== Node: '{key}':")
148
 
149
  await msg.send()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
chainlit.md CHANGED
@@ -1,9 +1,19 @@
 
 
 
 
 
 
 
 
 
 
1
  # 😋 Welcome to RAGalicious!
2
 
3
  RAGalicious is a chatbot designed to help users discover delectable recipes from the NYTimes Cooking section. It simplifies the recipe discovery process, providing users inspiration based on ingredients, diet, occasion or equipment you have.
4
 
5
  ## Example prompts
6
 
7
- - **Plan your daily meals:** E.g, "Give me ideas for making an easy weeknight dinner."
8
  - **Get ready to host occasions:** E.g. "What are good dishes to make for Rosh Hashanah?"
9
  - **Get scrappy with ingredients you already have:** E.g. "What can I make with pasta, lemon and chickpeas?"
 
1
+ ---
2
+ title: RAGalicious App (AIE3)
3
+ emoji: 👩‍🍳
4
+ colorFrom: purple
5
+ colorTo: blue
6
+ sdk: docker
7
+ pinned: false
8
+ license: mit
9
+ ---
10
+
11
  # 😋 Welcome to RAGalicious!
12
 
13
  RAGalicious is a chatbot designed to help users discover delectable recipes from the NYTimes Cooking section. It simplifies the recipe discovery process, providing users inspiration based on ingredients, diet, occasion or equipment you have.
14
 
15
  ## Example prompts
16
 
17
+ - **Plan your daily meals:** E.g. "Give me ideas for making an easy weeknight dinner."
18
  - **Get ready to host occasions:** E.g. "What are good dishes to make for Rosh Hashanah?"
19
  - **Get scrappy with ingredients you already have:** E.g. "What can I make with pasta, lemon and chickpeas?"
utils/graph.py CHANGED
@@ -186,6 +186,7 @@ def generate_workflow(base_llm, power_llm):
186
  shortlisted_recipes = state["shortlisted_recipes"]
187
  messages = state["messages"]
188
  last_message = messages[-1] if messages else ""
 
189
 
190
  # LLM with tool and validation
191
  base_rag_prompt_template = """\
@@ -207,7 +208,20 @@ def generate_workflow(base_llm, power_llm):
207
  chain = base_rag_prompt | power_llm
208
 
209
  full_response = ""
 
210
  cl_msg = config["configurable"]["cl_msg"]
 
 
 
 
 
 
 
 
 
 
 
 
211
  async for chunk in chain.astream(
212
  {
213
  "question": question,
@@ -220,9 +234,9 @@ def generate_workflow(base_llm, power_llm):
220
  await cl_msg.stream_token(chunk.content)
221
  full_response += chunk.content
222
 
223
- selected_recipe = get_selected_recipe(base_llm, question, shortlisted_recipes, messages)
224
-
225
- return {"messages": [full_response], "selected_recipe": selected_recipe}
226
 
227
  async def _node_single_recipe_qa(state: AgentState, config):
228
  print("--- Q&A with SINGLE RECIPE ---")
@@ -271,7 +285,7 @@ def generate_workflow(base_llm, power_llm):
271
  selected_recipe = state.get("selected_recipe")
272
  messages = state["messages"]
273
  last_message = messages[-1] if messages else ""
274
-
275
  # LLM with tool and validation
276
  base_rag_prompt_template = """\
277
  You are a friendly AI assistant.
@@ -303,15 +317,18 @@ def generate_workflow(base_llm, power_llm):
303
  )
304
 
305
  print("message", message)
306
-
307
  action = ToolInvocation(
308
  tool=message.additional_kwargs["function_call"]["name"],
309
- tool_input=json.loads(message.additional_kwargs["function_call"]["arguments"]),
310
  )
311
 
312
  response = tool_executor.invoke(action)
313
 
314
  function_message = FunctionMessage(content=str(response), name=action.tool)
 
 
 
315
 
316
  return {"messages": [function_message]}
317
 
@@ -335,9 +352,6 @@ def generate_workflow(base_llm, power_llm):
335
  shortlisted_recipes = state.get("shortlisted_recipes")
336
  selected_recipe = state.get("selected_recipe")
337
 
338
- # if not shortlisted_recipes or len(shortlisted_recipes) == 0:
339
- # print("going to retrieve since no shortlisted_recipes")
340
- # return "retrieve"
341
  if question_type == "asking_for_recipe_suggestions":
342
  return "retrieve"
343
  if question_type in ["referring_to_shortlisted_recipes", "show_specific_recipe"]:
 
186
  shortlisted_recipes = state["shortlisted_recipes"]
187
  messages = state["messages"]
188
  last_message = messages[-1] if messages else ""
189
+ question_type = state["question_type"]
190
 
191
  # LLM with tool and validation
192
  base_rag_prompt_template = """\
 
208
  chain = base_rag_prompt | power_llm
209
 
210
  full_response = ""
211
+ thumbnail_url = ""
212
  cl_msg = config["configurable"]["cl_msg"]
213
+ if state["question_type"] == "show_specific_recipe":
214
+ selected_recipe = state.get("selected_recipe")
215
+ if selected_recipe and selected_recipe.get("thumbnail"):
216
+ thumbnail_url = selected_recipe["thumbnail"]
217
+ image = cl.Image(url=thumbnail_url, name="thumbnail", display="inline", size="large")
218
+
219
+ # Attach the image to the message
220
+ await cl.Message(
221
+ content="",
222
+ elements=[image],
223
+ ).send()
224
+
225
  async for chunk in chain.astream(
226
  {
227
  "question": question,
 
234
  await cl_msg.stream_token(chunk.content)
235
  full_response += chunk.content
236
 
237
+ return {
238
+ "messages": [full_response],
239
+ }
240
 
241
  async def _node_single_recipe_qa(state: AgentState, config):
242
  print("--- Q&A with SINGLE RECIPE ---")
 
285
  selected_recipe = state.get("selected_recipe")
286
  messages = state["messages"]
287
  last_message = messages[-1] if messages else ""
288
+ cl_msg = config["configurable"]["cl_msg"]
289
  # LLM with tool and validation
290
  base_rag_prompt_template = """\
291
  You are a friendly AI assistant.
 
317
  )
318
 
319
  print("message", message)
320
+ tool_arguments = json.loads(message.additional_kwargs["function_call"]["arguments"])
321
  action = ToolInvocation(
322
  tool=message.additional_kwargs["function_call"]["name"],
323
+ tool_input=tool_arguments,
324
  )
325
 
326
  response = tool_executor.invoke(action)
327
 
328
  function_message = FunctionMessage(content=str(response), name=action.tool)
329
+ await cl_msg.stream_token(
330
+ f"""Sure! I've sent a text to {tool_arguments['number']} with the following: \n\n{tool_arguments['text']}"""
331
+ )
332
 
333
  return {"messages": [function_message]}
334
 
 
352
  shortlisted_recipes = state.get("shortlisted_recipes")
353
  selected_recipe = state.get("selected_recipe")
354
 
 
 
 
355
  if question_type == "asking_for_recipe_suggestions":
356
  return "retrieve"
357
  if question_type in ["referring_to_shortlisted_recipes", "show_specific_recipe"]:
utils/graph_chains.py CHANGED
@@ -125,14 +125,14 @@ def get_question_type_chain(llm_model):
125
  description="The likelihood / chance that the User Question is asking for recipe suggestions based on some criteria, integers from 1 to 100"
126
  )
127
  referring_to_specific_recipe: int = Field(
128
- description="The likelihood / chance that the User Question is asking questions about one specific full recipe, integers from 1 to 100"
129
  )
130
  referring_to_shortlisted_recipes: int = Field(
131
- description="The likelihood / chance that the User Question is asking generally about shortlisted recipes provided in the last message, integers from 1 to 100"
132
  )
133
 
134
  show_specific_recipe: int = Field(
135
- description="The likelihood / chance that the User Question is asking to show a specific recipe, integers from 1 to 100"
136
  )
137
  send_text: int = Field(
138
  description="The likelihood / chance that the User Question is to send a SMS or text, integers from 1 to 100"
@@ -155,6 +155,8 @@ def get_question_type_chain(llm_model):
155
  it is highly likely that the user is asking questions referring to shortlisted recipes.
156
  If the last message was a full single recipe, it is generally likely that the user
157
  is asking questions referring to specific recipe.
 
 
158
 
159
  {format_instructions}
160
 
 
125
  description="The likelihood / chance that the User Question is asking for recipe suggestions based on some criteria, integers from 1 to 100"
126
  )
127
  referring_to_specific_recipe: int = Field(
128
+ description="The likelihood / chance that the User Question is asking specific questions about a single specific recipe, integers from 1 to 100"
129
  )
130
  referring_to_shortlisted_recipes: int = Field(
131
+ description="The likelihood / chance that the User Question is asking generally about more than one recipe provided in the last message, integers from 1 to 100"
132
  )
133
 
134
  show_specific_recipe: int = Field(
135
+ description="The likelihood / chance that the User Question is asking to show the full recipe for a specific recipe, integers from 1 to 100"
136
  )
137
  send_text: int = Field(
138
  description="The likelihood / chance that the User Question is to send a SMS or text, integers from 1 to 100"
 
155
  it is highly likely that the user is asking questions referring to shortlisted recipes.
156
  If the last message was a full single recipe, it is generally likely that the user
157
  is asking questions referring to specific recipe.
158
+ If the user is asking to show the full recipe, it is highly likely that they are asking
159
+ to show a specific recipe and less likely that they are asking for anything else.
160
 
161
  {format_instructions}
162