ProfessorLeVesseur commited on
Commit
2e4aaee
1 Parent(s): f71d7b6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +174 -168
app.py CHANGED
@@ -1,3 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # import streamlit as st
2
  # import pinecone
3
  # from langchain.embeddings.openai import OpenAIEmbeddings
@@ -170,171 +344,3 @@
170
 
171
 
172
 
173
- import streamlit as st
174
- import openai
175
- import random
176
-
177
- # Fetch the OpenAI API key from Streamlit secrets
178
- openai_api_key = st.secrets["openai_api_key"]
179
-
180
- # Initialize the OpenAI service with API key
181
- openai.api_key = openai_api_key
182
-
183
- # Fetch Pinecone API key and environment from Streamlit secrets
184
- pinecone_api_key = st.secrets["pinecone_api_key"]
185
- # pinecone_environment = st.secrets["pinecone_environment"]
186
-
187
- # AUTHENTICATE/INITIALIZE PINCONE SERVICE
188
- from pinecone import Pinecone
189
- # pc = Pinecone(api_key=pinecone_api_key)
190
- # pc = Pinecone (api_key= 'YOUR_API_KEY')
191
-
192
- # Define the name of the Pinecone index
193
- index_name = 'mimtssinkqa'
194
-
195
- # Initialize the OpenAI embeddings object
196
- from langchain_openai import OpenAIEmbeddings
197
- embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
198
-
199
- # LOAD VECTOR STORE FROM EXISTING INDEX
200
- from langchain_community.vectorstores import Pinecone
201
- vector_store = Pinecone.from_existing_index(index_name='mimtssinkqa', embedding=embeddings)
202
-
203
- def ask_with_memory(vector_store, query, chat_history=[]):
204
- from langchain_openai import ChatOpenAI
205
- from langchain.chains import ConversationalRetrievalChain
206
- from langchain.memory import ConversationBufferMemory
207
-
208
- from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
209
-
210
- llm = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0.5)
211
-
212
- retriever = vector_store.as_retriever(search_type='similarity', search_kwargs={'k': 3})
213
-
214
- memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
215
-
216
- system_template = r'''
217
- Use the following pieces of context to answer the user's question. The title of the article is Intensifying literacy Instruction: Essential Practices. Do not mention the Header unless asked.
218
- ----------------
219
- Context: ```{context}```
220
- '''
221
-
222
- user_template = '''
223
- Question: ```{question}```
224
- Chat History: ```{chat_history}```
225
- '''
226
-
227
- messages= [
228
- SystemMessagePromptTemplate.from_template(system_template),
229
- HumanMessagePromptTemplate.from_template(user_template)
230
- ]
231
-
232
- qa_prompt = ChatPromptTemplate.from_messages (messages)
233
-
234
- chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory,chain_type='stuff', combine_docs_chain_kwargs={'prompt': qa_prompt}, verbose=False
235
- )
236
-
237
- result = chain.invoke({'question': query, 'chat_history': st.session_state['history']})
238
- # Append to chat history as a dictionary
239
- st.session_state['history'].append((query, result['answer']))
240
-
241
- return (result['answer'])
242
-
243
- # Initialize chat history
244
- if 'history' not in st.session_state:
245
- st.session_state['history'] = []
246
-
247
- # # STREAMLIT APPLICATION SETUP WITH PASSWORD
248
-
249
- # Define the correct password
250
- # correct_password = "MiBLSi"
251
-
252
- #Add the image with a specified width
253
- image_width = 300 # Set the desired width in pixels
254
- st.image('MTSS.ai_Logo.png', width=image_width)
255
- st.subheader('Ink QA™ | Dynamic PDFs')
256
-
257
- # Using Markdown for formatted text
258
- st.markdown("""
259
- Resource: **Intensifying Literacy Instruction: Essential Practices**
260
- """, unsafe_allow_html=True)
261
-
262
- with st.sidebar:
263
- # Password input field
264
- # password = st.text_input("Enter Password:", type="password")
265
-
266
- st.image('mimtss.png', width=200)
267
- st.image('Literacy_Cover.png', width=200)
268
- st.link_button("View | Download", "https://mimtsstac.org/sites/default/files/session-documents/Intensifying%20Literacy%20Instruction%20-%20Essential%20Practices%20%28NATIONAL%29.pdf")
269
-
270
- Audio_Header_text = """
271
- **Tune into Dr. St. Martin's introduction**"""
272
- st.markdown(Audio_Header_text)
273
-
274
- # Path or URL to the audio file
275
- audio_file_path = 'Audio_Introduction_Literacy.m4a'
276
- # Display the audio player widget
277
- st.audio(audio_file_path, format='audio/mp4', start_time=0)
278
-
279
- # Citation text with Markdown formatting
280
- citation_Content_text = """
281
- **Citation**
282
- St. Martin, K., Vaughn, S., Troia, G., Fien, & H., Coyne, M. (2023). *Intensifying literacy instruction: Essential practices, Version 2.0*. Lansing, MI: MiMTSS Technical Assistance Center, Michigan Department of Education.
283
-
284
- **Table of Contents**
285
- * **Introduction**: pg. 1
286
- * **Intensifying Literacy Instruction: Essential Practices**: pg. 4
287
- * **Purpose**: pg. 4
288
- * **Practice 1**: Knowledge and Use of a Learning Progression for Developing Skilled Readers and Writers: pg. 6
289
- * **Practice 2**: Design and Use of an Intervention Platform as the Foundation for Effective Intervention: pg. 13
290
- * **Practice 3**: On-going Data-Based Decision Making for Providing and Intensifying Interventions: pg. 16
291
- * **Practice 4**: Adaptations to Increase the Instructional Intensity of the Intervention: pg. 20
292
- * **Practice 5**: Infrastructures to Support Students with Significant and Persistent Literacy Needs: pg. 24
293
- * **Motivation and Engagement**: pg. 28
294
- * **Considerations for Understanding How Students' Learning and Behavior are Enhanced**: pg. 28
295
- * **Summary**: pg. 29
296
- * **Endnotes**: pg. 30
297
- * **Acknowledgment**: pg. 39
298
- """
299
- st.markdown(citation_Content_text)
300
-
301
- # if password == correct_password:
302
- # Define a list of possible placeholder texts
303
- placeholders = [
304
- 'Example: Summarize the article in 200 words or less',
305
- 'Example: What are the essential practices?',
306
- 'Example: I am a teacher, why is this resource important?',
307
- 'Example: How can this resource support my instruction in reading and writing?',
308
- 'Example: Does this resource align with the learning progression for developing skilled readers and writers?',
309
- 'Example: How does this resource address the needs of students scoring below the 20th percentile?',
310
- 'Example: Are there assessment tools included in this resource to monitor student progress?',
311
- 'Example: Does this resource provide guidance on data collection and analysis for monitoring student outcomes?',
312
- "Example: How can this resource be used to support students' social-emotional development?",
313
- "Example: How does this resource align with the district's literacy goals and objectives?",
314
- 'Example: What research and evidence support the effectiveness of this resource?',
315
- 'Example: Does this resource provide guidance on implementation fidelity'
316
- ]
317
-
318
- # Select a random placeholder from the list
319
- if 'placeholder' not in st.session_state:
320
- st.session_state.placeholder = random.choice(placeholders)
321
-
322
- q = st.text_input(label='Ask a question or make a request ', value='', placeholder=st.session_state.placeholder)
323
- # q = st.text_input(label='Ask a question or make a request ', value='')
324
-
325
- if q:
326
- with st.spinner('Thinking...'):
327
- answer = ask_with_memory(vector_store, q, st.session_state.history)
328
-
329
- # Display the response in a text area
330
- st.text_area('Response: ', value=answer, height=400, key="response_text_area")
331
-
332
- st.success('Powered by MTSS GPT. AI can make mistakes. Consider checking important information.')
333
-
334
- # Prepare chat history text for display
335
- # history_text = "\n\n".join(f"Q: {entry[0]}\nA: {entry[1]}" for entry in st.session_state.history)
336
- # Prepare chat history text for display in reverse order
337
- history_text = "\n\n".join(f"Q: {entry[0]}\nA: {entry[1]}" for entry in reversed(st.session_state.history))
338
-
339
- # Display chat history
340
- st.text_area('Chat History', value=history_text, height=800)
 
1
+ import streamlit as st
2
+ import openai
3
+ import random
4
+
5
+ # Fetch the OpenAI API key from Streamlit secrets
6
+ openai_api_key = st.secrets["openai_api_key"]
7
+
8
+ # Initialize the OpenAI service with API key
9
+ openai.api_key = openai_api_key
10
+
11
+ # Fetch Pinecone API key and environment from Streamlit secrets
12
+ # pinecone_api_key = st.secrets["pinecone_api_key"]
13
+ pinecone_api_key = '555c0e70-331d-4b43-aac7-5b3aac5078d6'
14
+ # pinecone_environment = st.secrets["pinecone_environment"]
15
+
16
+ # AUTHENTICATE/INITIALIZE PINCONE SERVICE
17
+ from pinecone import Pinecone
18
+ pc = Pinecone(api_key=pinecone_api_key)
19
+ # pc = Pinecone (api_key= 'YOUR_API_KEY')
20
+
21
+ # Define the name of the Pinecone index
22
+ index_name = 'mimtssinkqa'
23
+
24
+ # Initialize the OpenAI embeddings object
25
+ from langchain_openai import OpenAIEmbeddings
26
+ embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
27
+
28
+ # LOAD VECTOR STORE FROM EXISTING INDEX
29
+ from langchain_community.vectorstores import Pinecone
30
+ vector_store = Pinecone.from_existing_index(index_name='mimtssinkqa', embedding=embeddings)
31
+
32
+ def ask_with_memory(vector_store, query, chat_history=[]):
33
+ from langchain_openai import ChatOpenAI
34
+ from langchain.chains import ConversationalRetrievalChain
35
+ from langchain.memory import ConversationBufferMemory
36
+
37
+ from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
38
+
39
+ llm = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0.5)
40
+
41
+ retriever = vector_store.as_retriever(search_type='similarity', search_kwargs={'k': 3})
42
+
43
+ memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
44
+
45
+ system_template = r'''
46
+ Use the following pieces of context to answer the user's question. The title of the article is Intensifying literacy Instruction: Essential Practices. Do not mention the Header unless asked.
47
+ ----------------
48
+ Context: ```{context}```
49
+ '''
50
+
51
+ user_template = '''
52
+ Question: ```{question}```
53
+ Chat History: ```{chat_history}```
54
+ '''
55
+
56
+ messages= [
57
+ SystemMessagePromptTemplate.from_template(system_template),
58
+ HumanMessagePromptTemplate.from_template(user_template)
59
+ ]
60
+
61
+ qa_prompt = ChatPromptTemplate.from_messages (messages)
62
+
63
+ chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory,chain_type='stuff', combine_docs_chain_kwargs={'prompt': qa_prompt}, verbose=False
64
+ )
65
+
66
+ result = chain.invoke({'question': query, 'chat_history': st.session_state['history']})
67
+ # Append to chat history as a dictionary
68
+ st.session_state['history'].append((query, result['answer']))
69
+
70
+ return (result['answer'])
71
+
72
+ # Initialize chat history
73
+ if 'history' not in st.session_state:
74
+ st.session_state['history'] = []
75
+
76
+ # # STREAMLIT APPLICATION SETUP WITH PASSWORD
77
+
78
+ # Define the correct password
79
+ # correct_password = "MiBLSi"
80
+
81
+ #Add the image with a specified width
82
+ image_width = 300 # Set the desired width in pixels
83
+ st.image('MTSS.ai_Logo.png', width=image_width)
84
+ st.subheader('Ink QA™ | Dynamic PDFs')
85
+
86
+ # Using Markdown for formatted text
87
+ st.markdown("""
88
+ Resource: **Intensifying Literacy Instruction: Essential Practices**
89
+ """, unsafe_allow_html=True)
90
+
91
+ with st.sidebar:
92
+ # Password input field
93
+ # password = st.text_input("Enter Password:", type="password")
94
+
95
+ st.image('mimtss.png', width=200)
96
+ st.image('Literacy_Cover.png', width=200)
97
+ st.link_button("View | Download", "https://mimtsstac.org/sites/default/files/session-documents/Intensifying%20Literacy%20Instruction%20-%20Essential%20Practices%20%28NATIONAL%29.pdf")
98
+
99
+ Audio_Header_text = """
100
+ **Tune into Dr. St. Martin's introduction**"""
101
+ st.markdown(Audio_Header_text)
102
+
103
+ # Path or URL to the audio file
104
+ audio_file_path = 'Audio_Introduction_Literacy.m4a'
105
+ # Display the audio player widget
106
+ st.audio(audio_file_path, format='audio/mp4', start_time=0)
107
+
108
+ # Citation text with Markdown formatting
109
+ citation_Content_text = """
110
+ **Citation**
111
+ St. Martin, K., Vaughn, S., Troia, G., Fien, & H., Coyne, M. (2023). *Intensifying literacy instruction: Essential practices, Version 2.0*. Lansing, MI: MiMTSS Technical Assistance Center, Michigan Department of Education.
112
+
113
+ **Table of Contents**
114
+ * **Introduction**: pg. 1
115
+ * **Intensifying Literacy Instruction: Essential Practices**: pg. 4
116
+ * **Purpose**: pg. 4
117
+ * **Practice 1**: Knowledge and Use of a Learning Progression for Developing Skilled Readers and Writers: pg. 6
118
+ * **Practice 2**: Design and Use of an Intervention Platform as the Foundation for Effective Intervention: pg. 13
119
+ * **Practice 3**: On-going Data-Based Decision Making for Providing and Intensifying Interventions: pg. 16
120
+ * **Practice 4**: Adaptations to Increase the Instructional Intensity of the Intervention: pg. 20
121
+ * **Practice 5**: Infrastructures to Support Students with Significant and Persistent Literacy Needs: pg. 24
122
+ * **Motivation and Engagement**: pg. 28
123
+ * **Considerations for Understanding How Students' Learning and Behavior are Enhanced**: pg. 28
124
+ * **Summary**: pg. 29
125
+ * **Endnotes**: pg. 30
126
+ * **Acknowledgment**: pg. 39
127
+ """
128
+ st.markdown(citation_Content_text)
129
+
130
+ # if password == correct_password:
131
+ # Define a list of possible placeholder texts
132
+ placeholders = [
133
+ 'Example: Summarize the article in 200 words or less',
134
+ 'Example: What are the essential practices?',
135
+ 'Example: I am a teacher, why is this resource important?',
136
+ 'Example: How can this resource support my instruction in reading and writing?',
137
+ 'Example: Does this resource align with the learning progression for developing skilled readers and writers?',
138
+ 'Example: How does this resource address the needs of students scoring below the 20th percentile?',
139
+ 'Example: Are there assessment tools included in this resource to monitor student progress?',
140
+ 'Example: Does this resource provide guidance on data collection and analysis for monitoring student outcomes?',
141
+ "Example: How can this resource be used to support students' social-emotional development?",
142
+ "Example: How does this resource align with the district's literacy goals and objectives?",
143
+ 'Example: What research and evidence support the effectiveness of this resource?',
144
+ 'Example: Does this resource provide guidance on implementation fidelity'
145
+ ]
146
+
147
+ # Select a random placeholder from the list
148
+ if 'placeholder' not in st.session_state:
149
+ st.session_state.placeholder = random.choice(placeholders)
150
+
151
+ q = st.text_input(label='Ask a question or make a request ', value='', placeholder=st.session_state.placeholder)
152
+ # q = st.text_input(label='Ask a question or make a request ', value='')
153
+
154
+ if q:
155
+ with st.spinner('Thinking...'):
156
+ answer = ask_with_memory(vector_store, q, st.session_state.history)
157
+
158
+ # Display the response in a text area
159
+ st.text_area('Response: ', value=answer, height=400, key="response_text_area")
160
+
161
+ st.success('Powered by MTSS GPT. AI can make mistakes. Consider checking important information.')
162
+
163
+ # Prepare chat history text for display
164
+ # history_text = "\n\n".join(f"Q: {entry[0]}\nA: {entry[1]}" for entry in st.session_state.history)
165
+ # Prepare chat history text for display in reverse order
166
+ history_text = "\n\n".join(f"Q: {entry[0]}\nA: {entry[1]}" for entry in reversed(st.session_state.history))
167
+
168
+ # Display chat history
169
+ st.text_area('Chat History', value=history_text, height=800)
170
+
171
+
172
+
173
+
174
+
175
  # import streamlit as st
176
  # import pinecone
177
  # from langchain.embeddings.openai import OpenAIEmbeddings
 
344
 
345
 
346