Joshua Sundance Bailey commited on
Commit
85880a1
2 Parent(s): e4344c4 47c2ffc

Merge pull request #34 from joshuasundance-swca/summarize

Browse files
.idea/.name CHANGED
@@ -1 +1 @@
1
- langchain-streamlit-demo
 
1
+ langchain-streamlit-demo
.idea/inspectionProfiles/Project_Default.xml CHANGED
@@ -18,4 +18,4 @@
18
  </inspection_tool>
19
  <inspection_tool class="PyShadowingNamesInspection" enabled="false" level="WEAK WARNING" enabled_by_default="false" />
20
  </profile>
21
- </component>
 
18
  </inspection_tool>
19
  <inspection_tool class="PyShadowingNamesInspection" enabled="false" level="WEAK WARNING" enabled_by_default="false" />
20
  </profile>
21
+ </component>
.idea/inspectionProfiles/profiles_settings.xml CHANGED
@@ -3,4 +3,4 @@
3
  <option name="USE_PROJECT_PROFILE" value="false" />
4
  <version value="1.0" />
5
  </settings>
6
- </component>
 
3
  <option name="USE_PROJECT_PROFILE" value="false" />
4
  <version value="1.0" />
5
  </settings>
6
+ </component>
.idea/kubernetes-settings.xml CHANGED
@@ -3,4 +3,4 @@
3
  <component name="KubernetesSettings">
4
  <option name="contextName" value="swca-aks" />
5
  </component>
6
- </project>
 
3
  <component name="KubernetesSettings">
4
  <option name="contextName" value="swca-aks" />
5
  </component>
6
+ </project>
.idea/langchain-streamlit-demo.iml CHANGED
@@ -5,4 +5,4 @@
5
  <orderEntry type="jdk" jdkName="Remote Python 3.11.4 Docker (&lt;none&gt;:&lt;none&gt;) (5)" jdkType="Python SDK" />
6
  <orderEntry type="sourceFolder" forTests="false" />
7
  </component>
8
- </module>
 
5
  <orderEntry type="jdk" jdkName="Remote Python 3.11.4 Docker (&lt;none&gt;:&lt;none&gt;) (5)" jdkType="Python SDK" />
6
  <orderEntry type="sourceFolder" forTests="false" />
7
  </component>
8
+ </module>
.idea/misc.xml CHANGED
@@ -1,4 +1,4 @@
1
  <?xml version="1.0" encoding="UTF-8"?>
2
  <project version="4">
3
  <component name="ProjectRootManager" version="2" project-jdk-name="Remote Python 3.11.4 Docker (&lt;none&gt;:&lt;none&gt;) (5)" project-jdk-type="Python SDK" />
4
- </project>
 
1
  <?xml version="1.0" encoding="UTF-8"?>
2
  <project version="4">
3
  <component name="ProjectRootManager" version="2" project-jdk-name="Remote Python 3.11.4 Docker (&lt;none&gt;:&lt;none&gt;) (5)" project-jdk-type="Python SDK" />
4
+ </project>
.idea/modules.xml CHANGED
@@ -5,4 +5,4 @@
5
  <module fileurl="file://$PROJECT_DIR$/.idea/langchain-streamlit-demo.iml" filepath="$PROJECT_DIR$/.idea/langchain-streamlit-demo.iml" />
6
  </modules>
7
  </component>
8
- </project>
 
5
  <module fileurl="file://$PROJECT_DIR$/.idea/langchain-streamlit-demo.iml" filepath="$PROJECT_DIR$/.idea/langchain-streamlit-demo.iml" />
6
  </modules>
7
  </component>
8
+ </project>
.idea/vcs.xml CHANGED
@@ -3,4 +3,4 @@
3
  <component name="VcsDirectoryMappings">
4
  <mapping directory="$PROJECT_DIR$" vcs="Git" />
5
  </component>
6
- </project>
 
3
  <component name="VcsDirectoryMappings">
4
  <mapping directory="$PROJECT_DIR$" vcs="Git" />
5
  </component>
6
+ </project>
langchain-streamlit-demo/app.py CHANGED
@@ -1,29 +1,33 @@
1
  import os
2
  from datetime import datetime
3
  from tempfile import NamedTemporaryFile
4
- from typing import Union
5
 
6
  import anthropic
7
  import langsmith.utils
8
  import openai
9
  import streamlit as st
10
- from langchain import LLMChain
11
  from langchain.callbacks import StreamlitCallbackHandler
12
  from langchain.callbacks.base import BaseCallbackHandler
13
  from langchain.callbacks.tracers.langchain import LangChainTracer, wait_for_all_tracers
14
  from langchain.callbacks.tracers.run_collector import RunCollectorCallbackHandler
15
  from langchain.chains import RetrievalQA
 
16
  from langchain.chat_models import ChatOpenAI, ChatAnyscale, ChatAnthropic
17
  from langchain.document_loaders import PyPDFLoader
18
  from langchain.embeddings import OpenAIEmbeddings
19
  from langchain.memory import ConversationBufferMemory, StreamlitChatMessageHistory
20
  from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
 
21
  from langchain.schema.retriever import BaseRetriever
22
  from langchain.text_splitter import RecursiveCharacterTextSplitter
23
  from langchain.vectorstores import FAISS
24
  from langsmith.client import Client
25
  from streamlit_feedback import streamlit_feedback
26
 
 
 
 
27
  __version__ = "0.0.6"
28
 
29
  # --- Initialization ---
@@ -46,6 +50,7 @@ st_init_null(
46
  "document_chat_chain_type",
47
  "llm",
48
  "ls_tracer",
 
49
  "retriever",
50
  "run",
51
  "run_id",
@@ -120,11 +125,11 @@ DEFAULT_CHUNK_OVERLAP = 0
120
 
121
 
122
  @st.cache_data
123
- def get_retriever(
124
  uploaded_file_bytes: bytes,
125
  chunk_size: int = DEFAULT_CHUNK_SIZE,
126
  chunk_overlap: int = DEFAULT_CHUNK_OVERLAP,
127
- ) -> BaseRetriever:
128
  with NamedTemporaryFile() as temp_file:
129
  temp_file.write(uploaded_file_bytes)
130
  temp_file.seek(0)
@@ -138,7 +143,7 @@ def get_retriever(
138
  texts = text_splitter.split_documents(documents)
139
  embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
140
  db = FAISS.from_documents(texts, embeddings)
141
- return db.as_retriever()
142
 
143
 
144
  # --- Sidebar ---
@@ -152,10 +157,12 @@ with sidebar:
152
  index=SUPPORTED_MODELS.index(DEFAULT_MODEL),
153
  )
154
 
155
- provider = MODEL_DICT[model]
156
 
157
- provider_api_key = PROVIDER_KEY_DICT.get(provider) or st.text_input(
158
- f"{provider} API key",
 
 
159
  type="password",
160
  )
161
 
@@ -170,7 +177,7 @@ with sidebar:
170
 
171
  openai_api_key = (
172
  provider_api_key
173
- if provider == "OpenAI"
174
  else OPENAI_API_KEY
175
  or st.sidebar.text_input("OpenAI API Key: ", type="password")
176
  )
@@ -210,7 +217,14 @@ with sidebar:
210
  )
211
  document_chat_chain_type = st.selectbox(
212
  label="Document Chat Chain Type",
213
- options=["stuff", "refine", "map_reduce", "map_rerank"],
 
 
 
 
 
 
 
214
  index=0,
215
  help=chain_type_help,
216
  disabled=not document_chat,
@@ -218,7 +232,10 @@ with sidebar:
218
 
219
  if uploaded_file:
220
  if openai_api_key:
221
- st.session_state.retriever = get_retriever(
 
 
 
222
  uploaded_file_bytes=uploaded_file.getvalue(),
223
  chunk_size=chunk_size,
224
  chunk_overlap=chunk_overlap,
@@ -280,7 +297,7 @@ with sidebar:
280
 
281
  # --- LLM Instantiation ---
282
  if provider_api_key:
283
- if provider == "OpenAI":
284
  st.session_state.llm = ChatOpenAI(
285
  model=model,
286
  openai_api_key=provider_api_key,
@@ -288,7 +305,7 @@ if provider_api_key:
288
  streaming=True,
289
  max_tokens=max_tokens,
290
  )
291
- elif provider == "Anthropic":
292
  st.session_state.llm = ChatAnthropic(
293
  model_name=model,
294
  anthropic_api_key=provider_api_key,
@@ -296,7 +313,7 @@ if provider_api_key:
296
  streaming=True,
297
  max_tokens_to_sample=max_tokens,
298
  )
299
- elif provider == "Anyscale Endpoints":
300
  st.session_state.llm = ChatAnyscale(
301
  model=model,
302
  anyscale_api_key=provider_api_key,
@@ -321,18 +338,18 @@ for msg in STMEMORY.messages:
321
  if st.session_state.llm:
322
  # --- Document Chat ---
323
  if st.session_state.retriever:
324
- # st.session_state.doc_chain = ConversationalRetrievalChain.from_llm(
325
- # st.session_state.llm,
326
- # st.session_state.retriever,
327
- # memory=MEMORY,
328
- # )
329
-
330
- st.session_state.doc_chain = RetrievalQA.from_chain_type(
331
- llm=st.session_state.llm,
332
- chain_type=document_chat_chain_type,
333
- retriever=st.session_state.retriever,
334
- memory=MEMORY,
335
- )
336
 
337
  else:
338
  # --- Regular Chat ---
@@ -375,17 +392,62 @@ if st.session_state.llm:
375
  )
376
 
377
  try:
 
378
  if use_document_chat:
379
- st_handler = StreamlitCallbackHandler(st.container())
380
- callbacks.append(st_handler)
381
- full_response = st.session_state.doc_chain(
382
- {"query": prompt},
383
- callbacks=callbacks,
384
- tags=["Streamlit Chat"],
385
- return_only_outputs=True,
386
- )[st.session_state.doc_chain.output_key]
387
- st_handler._complete_current_thought()
388
- st.markdown(full_response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
389
  else:
390
  message_placeholder = st.empty()
391
  stream_handler = StreamHandler(message_placeholder)
@@ -399,7 +461,7 @@ if st.session_state.llm:
399
  message_placeholder.markdown(full_response)
400
  except (openai.error.AuthenticationError, anthropic.AuthenticationError):
401
  st.error(
402
- f"Please enter a valid {provider} API key.",
403
  icon="❌",
404
  )
405
  full_response = None
@@ -468,4 +530,4 @@ if st.session_state.llm:
468
  st.warning("Invalid feedback score.")
469
 
470
  else:
471
- st.error(f"Please enter a valid {provider} API key.", icon="❌")
 
1
  import os
2
  from datetime import datetime
3
  from tempfile import NamedTemporaryFile
4
+ from typing import Tuple, List, Dict, Any, Union
5
 
6
  import anthropic
7
  import langsmith.utils
8
  import openai
9
  import streamlit as st
 
10
  from langchain.callbacks import StreamlitCallbackHandler
11
  from langchain.callbacks.base import BaseCallbackHandler
12
  from langchain.callbacks.tracers.langchain import LangChainTracer, wait_for_all_tracers
13
  from langchain.callbacks.tracers.run_collector import RunCollectorCallbackHandler
14
  from langchain.chains import RetrievalQA
15
+ from langchain.chains.llm import LLMChain
16
  from langchain.chat_models import ChatOpenAI, ChatAnyscale, ChatAnthropic
17
  from langchain.document_loaders import PyPDFLoader
18
  from langchain.embeddings import OpenAIEmbeddings
19
  from langchain.memory import ConversationBufferMemory, StreamlitChatMessageHistory
20
  from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
21
+ from langchain.schema.document import Document
22
  from langchain.schema.retriever import BaseRetriever
23
  from langchain.text_splitter import RecursiveCharacterTextSplitter
24
  from langchain.vectorstores import FAISS
25
  from langsmith.client import Client
26
  from streamlit_feedback import streamlit_feedback
27
 
28
+ from qagen import get_qa_gen_chain, combine_qa_pair_lists
29
+ from summarize import get_summarization_chain
30
+
31
  __version__ = "0.0.6"
32
 
33
  # --- Initialization ---
 
50
  "document_chat_chain_type",
51
  "llm",
52
  "ls_tracer",
53
+ "provider",
54
  "retriever",
55
  "run",
56
  "run_id",
 
125
 
126
 
127
  @st.cache_data
128
+ def get_texts_and_retriever(
129
  uploaded_file_bytes: bytes,
130
  chunk_size: int = DEFAULT_CHUNK_SIZE,
131
  chunk_overlap: int = DEFAULT_CHUNK_OVERLAP,
132
+ ) -> Tuple[List[Document], BaseRetriever]:
133
  with NamedTemporaryFile() as temp_file:
134
  temp_file.write(uploaded_file_bytes)
135
  temp_file.seek(0)
 
143
  texts = text_splitter.split_documents(documents)
144
  embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
145
  db = FAISS.from_documents(texts, embeddings)
146
+ return texts, db.as_retriever()
147
 
148
 
149
  # --- Sidebar ---
 
157
  index=SUPPORTED_MODELS.index(DEFAULT_MODEL),
158
  )
159
 
160
+ st.session_state.provider = MODEL_DICT[model]
161
 
162
+ provider_api_key = PROVIDER_KEY_DICT.get(
163
+ st.session_state.provider,
164
+ ) or st.text_input(
165
+ f"{st.session_state.provider} API key",
166
  type="password",
167
  )
168
 
 
177
 
178
  openai_api_key = (
179
  provider_api_key
180
+ if st.session_state.provider == "OpenAI"
181
  else OPENAI_API_KEY
182
  or st.sidebar.text_input("OpenAI API Key: ", type="password")
183
  )
 
217
  )
218
  document_chat_chain_type = st.selectbox(
219
  label="Document Chat Chain Type",
220
+ options=[
221
+ "stuff",
222
+ "refine",
223
+ "map_reduce",
224
+ "map_rerank",
225
+ "Q&A Generation",
226
+ "Summarization",
227
+ ],
228
  index=0,
229
  help=chain_type_help,
230
  disabled=not document_chat,
 
232
 
233
  if uploaded_file:
234
  if openai_api_key:
235
+ (
236
+ st.session_state.texts,
237
+ st.session_state.retriever,
238
+ ) = get_texts_and_retriever(
239
  uploaded_file_bytes=uploaded_file.getvalue(),
240
  chunk_size=chunk_size,
241
  chunk_overlap=chunk_overlap,
 
297
 
298
  # --- LLM Instantiation ---
299
  if provider_api_key:
300
+ if st.session_state.provider == "OpenAI":
301
  st.session_state.llm = ChatOpenAI(
302
  model=model,
303
  openai_api_key=provider_api_key,
 
305
  streaming=True,
306
  max_tokens=max_tokens,
307
  )
308
+ elif st.session_state.provider == "Anthropic":
309
  st.session_state.llm = ChatAnthropic(
310
  model_name=model,
311
  anthropic_api_key=provider_api_key,
 
313
  streaming=True,
314
  max_tokens_to_sample=max_tokens,
315
  )
316
+ elif st.session_state.provider == "Anyscale Endpoints":
317
  st.session_state.llm = ChatAnyscale(
318
  model=model,
319
  anyscale_api_key=provider_api_key,
 
338
  if st.session_state.llm:
339
  # --- Document Chat ---
340
  if st.session_state.retriever:
341
+ if document_chat_chain_type == "Summarization":
342
+ st.session_state.doc_chain = "summarization"
343
+ elif document_chat_chain_type == "Q&A Generation":
344
+ st.session_state.doc_chain = get_qa_gen_chain(st.session_state.llm)
345
+
346
+ else:
347
+ st.session_state.doc_chain = RetrievalQA.from_chain_type(
348
+ llm=st.session_state.llm,
349
+ chain_type=document_chat_chain_type,
350
+ retriever=st.session_state.retriever,
351
+ memory=MEMORY,
352
+ )
353
 
354
  else:
355
  # --- Regular Chat ---
 
392
  )
393
 
394
  try:
395
+ full_response: Union[str, None]
396
  if use_document_chat:
397
+ if document_chat_chain_type == "Summarization":
398
+ st.session_state.doc_chain = get_summarization_chain(
399
+ st.session_state.llm,
400
+ prompt,
401
+ )
402
+ full_response = st.session_state.doc_chain.run(
403
+ st.session_state.texts,
404
+ callbacks=callbacks,
405
+ tags=["Streamlit Chat"],
406
+ )
407
+
408
+ st.markdown(full_response)
409
+ elif document_chat_chain_type == "Q&A Generation":
410
+ config: Dict[str, Any] = dict(
411
+ callbacks=callbacks,
412
+ tags=["Streamlit Chat"],
413
+ )
414
+ if st.session_state.provider == "Anthropic":
415
+ config["max_concurrency"] = 5
416
+ raw_results = st.session_state.doc_chain.batch(
417
+ [
418
+ {"input": doc.page_content, "prompt": prompt}
419
+ for doc in st.session_state.texts
420
+ ],
421
+ config,
422
+ )
423
+ results = combine_qa_pair_lists(raw_results).QuestionAnswerPairs
424
+
425
+ def _to_str(idx, qap):
426
+ question_piece = f"{idx}. **Q:** {qap.question}"
427
+ whitespace = " " * (len(str(idx)) + 2)
428
+ answer_piece = f"{whitespace}**A:** {qap.answer}"
429
+ return f"{question_piece}\n{answer_piece}"
430
+
431
+ output_text = "\n\n".join(
432
+ [
433
+ _to_str(idx, qap)
434
+ for idx, qap in enumerate(results, start=1)
435
+ ],
436
+ )
437
+
438
+ st.markdown(output_text)
439
+
440
+ else:
441
+ st_handler = StreamlitCallbackHandler(st.container())
442
+ callbacks.append(st_handler)
443
+ full_response = st.session_state.doc_chain(
444
+ {"query": prompt},
445
+ callbacks=callbacks,
446
+ tags=["Streamlit Chat"],
447
+ return_only_outputs=True,
448
+ )[st.session_state.doc_chain.output_key]
449
+ st_handler._complete_current_thought()
450
+ st.markdown(full_response)
451
  else:
452
  message_placeholder = st.empty()
453
  stream_handler = StreamHandler(message_placeholder)
 
461
  message_placeholder.markdown(full_response)
462
  except (openai.error.AuthenticationError, anthropic.AuthenticationError):
463
  st.error(
464
+ f"Please enter a valid {st.session_state.provider} API key.",
465
  icon="❌",
466
  )
467
  full_response = None
 
530
  st.warning("Invalid feedback score.")
531
 
532
  else:
533
+ st.error(f"Please enter a valid {st.session_state.provider} API key.", icon="❌")
langchain-streamlit-demo/qagen.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import reduce
2
+ from typing import List
3
+
4
+ from langchain.output_parsers import PydanticOutputParser, OutputFixingParser
5
+ from langchain.prompts.chat import (
6
+ ChatPromptTemplate,
7
+ )
8
+ from langchain.schema.language_model import BaseLanguageModel
9
+ from langchain.schema.runnable import RunnableSequence
10
+ from pydantic import BaseModel, field_validator, Field
11
+
12
+
13
+ class QuestionAnswerPair(BaseModel):
14
+ question: str = Field(..., description="The question that will be answered.")
15
+ answer: str = Field(..., description="The answer to the question that was asked.")
16
+
17
+ @field_validator("question")
18
+ def validate_question(cls, v: str) -> str:
19
+ if not v.endswith("?"):
20
+ raise ValueError("Question must end with a question mark.")
21
+ return v
22
+
23
+
24
+ class QuestionAnswerPairList(BaseModel):
25
+ QuestionAnswerPairs: List[QuestionAnswerPair]
26
+
27
+
28
+ PYDANTIC_PARSER: PydanticOutputParser = PydanticOutputParser(
29
+ pydantic_object=QuestionAnswerPairList,
30
+ )
31
+
32
+
33
+ templ1 = """You are a smart assistant designed to help college professors come up with reading comprehension questions.
34
+ Given a piece of text, you must come up with question and answer pairs that can be used to test a student's reading comprehension abilities.
35
+ Generate as many question/answer pairs as you can.
36
+ When coming up with the question/answer pairs, you must respond in the following format:
37
+ {format_instructions}
38
+
39
+ Do not provide additional commentary and do not wrap your response in Markdown formatting. Return RAW, VALID JSON.
40
+ """
41
+ templ2 = """{prompt}
42
+ Please create question/answer pairs, in the specified JSON format, for the following text:
43
+ ----------------
44
+ {input}"""
45
+ CHAT_PROMPT = ChatPromptTemplate.from_messages(
46
+ [
47
+ ("system", templ1),
48
+ ("human", templ2),
49
+ ],
50
+ ).partial(format_instructions=PYDANTIC_PARSER.get_format_instructions)
51
+
52
+
53
+ def combine_qa_pair_lists(
54
+ qa_pair_lists: List[QuestionAnswerPairList],
55
+ ) -> QuestionAnswerPairList:
56
+ def reducer(
57
+ accumulator: QuestionAnswerPairList,
58
+ current: QuestionAnswerPairList,
59
+ ) -> QuestionAnswerPairList:
60
+ return QuestionAnswerPairList(
61
+ QuestionAnswerPairs=accumulator.QuestionAnswerPairs
62
+ + current.QuestionAnswerPairs,
63
+ )
64
+
65
+ return reduce(
66
+ reducer,
67
+ qa_pair_lists,
68
+ QuestionAnswerPairList(QuestionAnswerPairs=[]),
69
+ )
70
+
71
+
72
+ def get_qa_gen_chain(llm: BaseLanguageModel) -> RunnableSequence:
73
+ return (
74
+ CHAT_PROMPT | llm | OutputFixingParser.from_llm(llm=llm, parser=PYDANTIC_PARSER)
75
+ )
langchain-streamlit-demo/summarize.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.chains.base import Chain
2
+ from langchain.chains.summarize import load_summarize_chain
3
+ from langchain.prompts import PromptTemplate
4
+ from langchain.schema.language_model import BaseLanguageModel
5
+
6
+ prompt_template = """Write a concise summary of the following text, based on the user input.
7
+ User input: {query}
8
+ Text:
9
+ ```
10
+ {text}
11
+ ```
12
+ CONCISE SUMMARY:"""
13
+
14
+ refine_template = (
15
+ "You are iteratively crafting a summary of the text below based on the user input\n"
16
+ "User input: {query}"
17
+ "We have provided an existing summary up to a certain point: {existing_answer}\n"
18
+ "We have the opportunity to refine the existing summary"
19
+ "(only if needed) with some more context below.\n"
20
+ "------------\n"
21
+ "{text}\n"
22
+ "------------\n"
23
+ "Given the new context, refine the original summary.\n"
24
+ "If the context isn't useful, return the original summary.\n"
25
+ "If the context is useful, refine the summary to include the new context.\n"
26
+ "Your contribution is helping to build a comprehensive summary of a large body of knowledge.\n"
27
+ "You do not have the complete context, so do not discard pieces of the original summary."
28
+ )
29
+
30
+
31
+ def get_summarization_chain(
32
+ llm: BaseLanguageModel,
33
+ prompt: str,
34
+ ) -> Chain:
35
+ _prompt = PromptTemplate.from_template(
36
+ prompt_template,
37
+ partial_variables={"query": prompt},
38
+ )
39
+ refine_prompt = PromptTemplate.from_template(
40
+ refine_template,
41
+ partial_variables={"query": prompt},
42
+ )
43
+ return load_summarize_chain(
44
+ llm=llm,
45
+ chain_type="refine",
46
+ question_prompt=_prompt,
47
+ refine_prompt=refine_prompt,
48
+ return_intermediate_steps=False,
49
+ input_key="input_documents",
50
+ output_key="output_text",
51
+ )