ProfessorLeVesseur commited on
Commit
83b7841
1 Parent(s): 3ec3939

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -201
app.py CHANGED
@@ -148,213 +148,43 @@ placeholders = [
148
  'Example: Does this resource provide guidance on implementation fidelity'
149
  ]
150
 
151
- # Select a random placeholder from the list
152
- if 'placeholder' not in st.session_state:
153
- st.session_state.placeholder = random.choice(placeholders)
154
-
155
- q = st.text_input(label='Ask a question or make a request ', value='', placeholder=st.session_state.placeholder)
156
- # q = st.text_input(label='Ask a question or make a request ', value='')
157
-
158
- if q:
159
- with st.spinner('Thinking...'):
160
- answer = ask_with_memory(vector_store, q, st.session_state.history)
161
-
162
- # Display the response in a text area
163
- # st.text_area('Response: ', value=answer, height=400, key="response_text_area")
164
- import time
165
- import random
166
-
167
- def stream_data():
168
- for word in answer.split(" "):
169
- yield word + " "
170
- # time.sleep(0.02)
171
- time.sleep(random.uniform(0.03, 0.08))
172
-
173
- st.write(stream_data)
174
-
175
- st.success('Powered by MTSS GPT. AI can make mistakes. Consider checking important information.')
176
-
177
- # Prepare chat history text for display
178
- # history_text = "\n\n".join(f"Q: {entry[0]}\nA: {entry[1]}" for entry in st.session_state.history)
179
- # Prepare chat history text for display in reverse order
180
- history_text = "\n\n".join(f"Q: {entry[0]}\nA: {entry[1]}" for entry in reversed(st.session_state.history))
181
-
182
- # Display chat history
183
- st.text_area('Chat History', value=history_text, height=800)
184
-
185
-
186
-
187
-
188
-
189
- # import streamlit as st
190
- # import pinecone
191
- # from langchain.embeddings.openai import OpenAIEmbeddings
192
- # from langchain.vectorstores import Pinecone, Chroma
193
- # from langchain.chains import RetrievalQA
194
- # from langchain.chat_models import ChatOpenAI
195
- # import tiktoken
196
- # import random
197
-
198
- # # Fetch the OpenAI API key from Streamlit secrets
199
- # openai_api_key = st.secrets["openai_api_key"]
200
-
201
- # # Fetch Pinecone API key and environment from Streamlit secrets
202
- # pinecone_api_key = st.secrets["pinecone_api_key"]
203
- # pinecone_environment = st.secrets["pinecone_environment"]
204
-
205
- # # Initialize Pinecone
206
- # pinecone.init(api_key=pinecone_api_key, environment=pinecone_environment)
207
-
208
- # # Define the name of the Pinecone index
209
- # index_name = 'mi-resource-qa'
210
-
211
- # # Initialize the OpenAI embeddings object with the hardcoded API key
212
- # embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
213
-
214
- # # Define functions
215
- # def insert_or_fetch_embeddings(index_name):
216
- # if index_name in pinecone.list_indexes():
217
- # vector_store = Pinecone.from_existing_index(index_name, embeddings)
218
- # return vector_store
219
- # else:
220
- # raise ValueError(f"Index {index_name} does not exist. Please create it before fetching.")
221
-
222
- # # Initialize or fetch Pinecone vector store
223
- # vector_store = insert_or_fetch_embeddings(index_name)
224
-
225
- # # calculate embedding cost using tiktoken
226
- # def calculate_embedding_cost(text):
227
- # import tiktoken
228
- # enc = tiktoken.encoding_for_model('text-embedding-ada-002')
229
- # total_tokens = len(enc.encode(text))
230
- # # print(f'Total Tokens: {total_tokens}')
231
- # # print(f'Embedding Cost in USD: {total_tokens / 1000 * 0.0004:.6f}')
232
- # return total_tokens, total_tokens / 1000 * 0.0004
233
-
234
 
235
- # def ask_with_memory(vector_store, query, chat_history=[]):
236
- # from langchain.chains import ConversationalRetrievalChain
237
- # from langchain.chat_models import ChatOpenAI
 
238
 
239
- # llm = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=1, openai_api_key=openai_api_key)
240
 
241
- # # The retriever is created with metadata filter directly in search_kwargs
242
- # # retriever = vector_store.as_retriever(search_type='similarity', search_kwargs={'k': 3, 'filter': {'source': {'$eq': 'https://mimtsstac.org/sites/default/files/session-documents/Intensifying%20Literacy%20Instruction%20-%20Essential%20Practices%20%28NATIONAL%29.pdf'}}})
243
- # retriever = vector_store.as_retriever(search_type='similarity', search_kwargs={'k': 3, 'filter': {'source':'https://mimtsstac.org/sites/default/files/session-documents/Intensifying%20Literacy%20Instruction%20-%20Essential%20Practices%20%28NATIONAL%29.pdf'}})
244
-
245
- # chain= ConversationalRetrievalChain.from_llm(llm, retriever)
246
- # result = chain({'question': query, 'chat_history': st.session_state['history']})
247
- # # Append to chat history as a dictionary
248
- # st.session_state['history'].append((query, result['answer']))
249
-
250
- # return (result['answer'])
251
-
252
- # # Initialize chat history
253
- # if 'history' not in st.session_state:
254
- # st.session_state['history'] = []
255
-
256
- # # # STREAMLIT APPLICATION SETUP WITH PASSWORD
257
-
258
- # # Define the correct password
259
- # # correct_password = "MiBLSi"
260
-
261
- # #Add the image with a specified width
262
- # image_width = 300 # Set the desired width in pixels
263
- # st.image('MTSS.ai_Logo.png', width=image_width)
264
- # st.subheader('Ink QA™ | Dynamic PDFs')
265
-
266
- # # Using Markdown for formatted text
267
- # st.markdown("""
268
- # Resource: **Intensifying Literacy Instruction: Essential Practices**
269
- # """, unsafe_allow_html=True)
270
-
271
- # with st.sidebar:
272
- # # Password input field
273
- # # password = st.text_input("Enter Password:", type="password")
274
-
275
- # st.image('mimtss.png', width=200)
276
- # st.image('Literacy_Cover.png', width=200)
277
- # st.link_button("View | Download", "https://mimtsstac.org/sites/default/files/session-documents/Intensifying%20Literacy%20Instruction%20-%20Essential%20Practices%20%28NATIONAL%29.pdf")
278
 
279
- # Audio_Header_text = """
280
- # **Tune into Dr. St. Martin's introduction**"""
281
- # st.markdown(Audio_Header_text)
282
 
283
- # # Path or URL to the audio file
284
- # audio_file_path = 'Audio_Introduction_Literacy.m4a'
285
- # # Display the audio player widget
286
- # st.audio(audio_file_path, format='audio/mp4', start_time=0)
 
 
 
 
287
 
288
- # # Citation text with Markdown formatting
289
- # citation_Content_text = """
290
- # **Citation**
291
- # St. Martin, K., Vaughn, S., Troia, G., Fien, & H., Coyne, M. (2023). *Intensifying literacy instruction: Essential practices, Version 2.0*. Lansing, MI: MiMTSS Technical Assistance Center, Michigan Department of Education.
292
 
293
- # **Table of Contents**
294
- # * **Introduction**: pg. 1
295
- # * **Intensifying Literacy Instruction: Essential Practices**: pg. 4
296
- # * **Purpose**: pg. 4
297
- # * **Practice 1**: Knowledge and Use of a Learning Progression for Developing Skilled Readers and Writers: pg. 6
298
- # * **Practice 2**: Design and Use of an Intervention Platform as the Foundation for Effective Intervention: pg. 13
299
- # * **Practice 3**: On-going Data-Based Decision Making for Providing and Intensifying Interventions: pg. 16
300
- # * **Practice 4**: Adaptations to Increase the Instructional Intensity of the Intervention: pg. 20
301
- # * **Practice 5**: Infrastructures to Support Students with Significant and Persistent Literacy Needs: pg. 24
302
- # * **Motivation and Engagement**: pg. 28
303
- # * **Considerations for Understanding How Students' Learning and Behavior are Enhanced**: pg. 28
304
- # * **Summary**: pg. 29
305
- # * **Endnotes**: pg. 30
306
- # * **Acknowledgment**: pg. 39
307
- # """
308
- # st.markdown(citation_Content_text)
309
-
310
- # # if password == correct_password:
311
- # # Define a list of possible placeholder texts
312
- # placeholders = [
313
- # 'Example: Summarize the article in 200 words or less',
314
- # 'Example: What are the essential practices?',
315
- # 'Example: I am a teacher, why is this resource important?',
316
- # 'Example: How can this resource support my instruction in reading and writing?',
317
- # 'Example: Does this resource align with the learning progression for developing skilled readers and writers?',
318
- # 'Example: How does this resource address the needs of students scoring below the 20th percentile?',
319
- # 'Example: Are there assessment tools included in this resource to monitor student progress?',
320
- # 'Example: Does this resource provide guidance on data collection and analysis for monitoring student outcomes?',
321
- # "Example: How can this resource be used to support students' social-emotional development?",
322
- # "Example: How does this resource align with the district's literacy goals and objectives?",
323
- # 'Example: What research and evidence support the effectiveness of this resource?',
324
- # 'Example: Does this resource provide guidance on implementation fidelity'
325
- # ]
326
-
327
- # # Select a random placeholder from the list
328
- # if 'placeholder' not in st.session_state:
329
- # st.session_state.placeholder = random.choice(placeholders)
330
-
331
- # q = st.text_input(label='Ask a question or make a request ', value='', placeholder=st.session_state.placeholder)
332
- # # q = st.text_input(label='Ask a question or make a request ', value='')
333
-
334
- # k = 3 # Set k to 3
335
-
336
- # # # Initialize chat history if not present
337
- # # if 'history' not in st.session_state:
338
- # # st.session_state.history = []
339
-
340
- # if q:
341
- # with st.spinner('Thinking...'):
342
- # answer = ask_with_memory(vector_store, q, st.session_state.history)
343
-
344
- # # Display the response in a text area
345
- # st.text_area('Response: ', value=answer, height=400, key="response_text_area")
346
-
347
- # st.success('Powered by MTSS GPT. AI can make mistakes. Consider checking important information.')
348
-
349
- # # # Prepare chat history text for display
350
- # # history_text = "\n\n".join(f"Q: {entry[0]}\nA: {entry[1]}" for entry in st.session_state.history)
351
- # # Prepare chat history text for display in reverse order
352
- # history_text = "\n\n".join(f"Q: {entry[0]}\nA: {entry[1]}" for entry in reversed(st.session_state.history))
353
-
354
- # # Display chat history
355
- # st.text_area('Chat History', value=history_text, height=800)
356
-
357
-
358
-
359
 
 
 
360
 
 
 
 
148
  'Example: Does this resource provide guidance on implementation fidelity'
149
  ]
150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
152
+ # CLEAR THE TEXT BOX
153
+ with st.form("Question",clear_on_submit=True):
154
+ q = st.text_input(label='Ask a Question | Send a Prompt', placeholder=st.session_state.placeholder, value='', )
155
+ submitted = st.form_submit_button("Submit")
156
 
157
+ st.divider()
158
 
159
+ if submitted:
160
+ with st.spinner('Thinking...'):
161
+ answer = ask_with_memory(vector_store, q, st.session_state.history)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
 
163
+ # st.write(q)
164
+ st.write(f"**{q}**")
 
165
 
166
+ import time
167
+ import random
168
+
169
+ def stream_answer():
170
+ for word in answer.split(" "):
171
+ yield word + " "
172
+ # time.sleep(0.02)
173
+ time.sleep(random.uniform(0.03, 0.08))
174
 
175
+ st.write(stream_answer)
 
 
 
176
 
177
+ # Display the response in a text area
178
+ # st.text_area('Response: ', value=answer, height=400, key="response_text_area")
179
+ # OR to display as Markdown (interprets Markdown formatting)
180
+ # st.markdown(answer)
181
+
182
+ st.success('Powered by MTSS GPT. AI can make mistakes. Consider checking important information.')
183
+
184
+ st.divider()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
+ # # Prepare chat history text for display
187
+ history_text = "\n\n".join(f"Q: {entry[0]}\nA: {entry[1]}" for entry in reversed(st.session_state.history))
188
 
189
+ # Display chat history
190
+ st.text_area('Chat History', value=history_text, height=800)