kastan commited on
Commit
e20da41
Β·
1 Parent(s): 3842618

fix chat history formatting by keeping the chat history strip list comprehension

Browse files
Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -2,10 +2,10 @@ import os
2
 
3
  import gradio as gr
4
  import retrieval
5
- from text_generation import Client, InferenceAPIClient
6
-
7
  # UNCOMMENT ONLY WHEN RUNNING LOCALLY (not on Spaces)
8
  # from dotenv import load_dotenv
 
 
9
  # load API keys from globally-availabe .env file
10
  # SECRETS_FILEPATH = "/mnt/project/chatbotai/huggingface_cache/internal_api_keys.env"
11
  # load_dotenv(dotenv_path=SECRETS_FILEPATH, override=True)
@@ -124,12 +124,16 @@ def predict(
124
  chat = [(history[i].strip(), history[i + 1].strip()) for i in range(0, len(history) - 1, 2)]
125
  yield chat, history, None, None, None, []
126
 
 
 
127
  # Pinecone context retrieval
128
  top_context_list = ta.retrieve_contexts_from_pinecone(user_question=inputs, topk=NUM_ANSWERS_GENERATED)
 
129
  yield chat, history, top_context_list[0], top_context_list[1], top_context_list[2], []
130
 
131
  # run CLIP
132
  images_list = ta.clip_text_to_image(inputs)
 
133
  yield chat, history, top_context_list[0], top_context_list[1], top_context_list[2], images_list
134
 
135
 
 
2
 
3
  import gradio as gr
4
  import retrieval
 
 
5
  # UNCOMMENT ONLY WHEN RUNNING LOCALLY (not on Spaces)
6
  # from dotenv import load_dotenv
7
+ from text_generation import Client, InferenceAPIClient
8
+
9
  # load API keys from globally-availabe .env file
10
  # SECRETS_FILEPATH = "/mnt/project/chatbotai/huggingface_cache/internal_api_keys.env"
11
  # load_dotenv(dotenv_path=SECRETS_FILEPATH, override=True)
 
124
  chat = [(history[i].strip(), history[i + 1].strip()) for i in range(0, len(history) - 1, 2)]
125
  yield chat, history, None, None, None, []
126
 
127
+ chat = [(history[i].strip(), history[i + 1].strip()) for i in range(0, len(history) - 1, 2)]
128
+
129
  # Pinecone context retrieval
130
  top_context_list = ta.retrieve_contexts_from_pinecone(user_question=inputs, topk=NUM_ANSWERS_GENERATED)
131
+ # yield chat, history, top_context_list[0], top_context_list[1], top_context_list[2], []
132
  yield chat, history, top_context_list[0], top_context_list[1], top_context_list[2], []
133
 
134
  # run CLIP
135
  images_list = ta.clip_text_to_image(inputs)
136
+ # yield chat, history, top_context_list[0], top_context_list[1], top_context_list[2], images_list
137
  yield chat, history, top_context_list[0], top_context_list[1], top_context_list[2], images_list
138
 
139