Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -121,7 +121,7 @@ def read_file_content(file,max_length):
|
|
121 |
return ""
|
122 |
|
123 |
|
124 |
-
def chat_with_model(prompt, document_section):
|
125 |
model = model_choice
|
126 |
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
127 |
conversation.append({'role': 'user', 'content': prompt})
|
@@ -132,7 +132,7 @@ def chat_with_model(prompt, document_section):
|
|
132 |
return response['choices'][0]['message']['content']
|
133 |
|
134 |
|
135 |
-
def chat_with_file_contents(prompt, file_content):
|
136 |
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
137 |
conversation.append({'role': 'user', 'content': prompt})
|
138 |
if len(file_content)>0:
|
@@ -154,7 +154,7 @@ def main():
|
|
154 |
if filename is not None:
|
155 |
transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
|
156 |
st.write(transcription)
|
157 |
-
gptOutput = chat_with_model(transcription, '') # *************************************
|
158 |
filename = generate_filename(transcription, choice)
|
159 |
create_file(filename, transcription, gptOutput)
|
160 |
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
@@ -190,7 +190,7 @@ def main():
|
|
190 |
else:
|
191 |
if st.button(f"Chat about Section {i+1}"):
|
192 |
st.write('Reasoning with your inputs...')
|
193 |
-
response = chat_with_model(user_prompt, section) # *************************************
|
194 |
st.write('Response:')
|
195 |
st.write(response)
|
196 |
document_responses[i] = response
|
@@ -200,7 +200,7 @@ def main():
|
|
200 |
|
201 |
if st.button('π¬ Chat'):
|
202 |
st.write('Reasoning with your inputs...')
|
203 |
-
response = chat_with_model(user_prompt, ''.join(list(document_sections))) # *************************************
|
204 |
st.write('Response:')
|
205 |
st.write(response)
|
206 |
|
|
|
121 |
return ""
|
122 |
|
123 |
|
124 |
+
def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
|
125 |
model = model_choice
|
126 |
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
127 |
conversation.append({'role': 'user', 'content': prompt})
|
|
|
132 |
return response['choices'][0]['message']['content']
|
133 |
|
134 |
|
135 |
+
def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'):
|
136 |
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
137 |
conversation.append({'role': 'user', 'content': prompt})
|
138 |
if len(file_content)>0:
|
|
|
154 |
if filename is not None:
|
155 |
transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
|
156 |
st.write(transcription)
|
157 |
+
gptOutput = chat_with_model(transcription, '', model_choice) # *************************************
|
158 |
filename = generate_filename(transcription, choice)
|
159 |
create_file(filename, transcription, gptOutput)
|
160 |
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
|
|
190 |
else:
|
191 |
if st.button(f"Chat about Section {i+1}"):
|
192 |
st.write('Reasoning with your inputs...')
|
193 |
+
response = chat_with_model(user_prompt, section, model_choice) # *************************************
|
194 |
st.write('Response:')
|
195 |
st.write(response)
|
196 |
document_responses[i] = response
|
|
|
200 |
|
201 |
if st.button('π¬ Chat'):
|
202 |
st.write('Reasoning with your inputs...')
|
203 |
+
response = chat_with_model(user_prompt, ''.join(list(document_sections,)), model_choice) # *************************************
|
204 |
st.write('Response:')
|
205 |
st.write(response)
|
206 |
|