diff --git "a/9dFJT4oBgHgl3EQfoixI/content/tmp_files/load_file.txt" "b/9dFJT4oBgHgl3EQfoixI/content/tmp_files/load_file.txt" new file mode 100644--- /dev/null +++ "b/9dFJT4oBgHgl3EQfoixI/content/tmp_files/load_file.txt" @@ -0,0 +1,795 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf,len=794 +page_content='ThoughtSource: A central hub for large language model reasoning data Simon Ott 1*,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Konstantin Hebenstreit 1*,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Valentin Liévin 2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Christoffer Egeberg Hother 4,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Milad Moradi 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Maximilian Mayrhauser 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Robert Praas 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Ole Winther 2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Matthias Samwald 1 1) Institute of Artificial Intelligence,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Medical University of Vienna,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Vienna,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Austria 2) Section for Cognitive Systems,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Technical University of Denmark,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Lyngby,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Denmark 3) School of Electrical Engineering and Computer Science,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The Royal Institute of Technology (KTH),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Stockholm,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Sweden 4) Department of Clinical Immunology,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Copenhagen University Hospital,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Copenhagen,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Denmark equal contribution Corresponding author: Matthias Samwald (matthias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='samwald [at] meduniwien.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='at) Abstract Large language models (LLMs) such as GPT-3 and ChatGPT have recently demonstrated impressive results across a wide range of tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' LLMs are still limited, however, in that they frequently fail at complex reasoning, their reasoning processes are opaque, they are prone to ‘hallucinate’ facts, and there are concerns about their underlying biases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Letting models verbalize reasoning steps as natural language, a technique known as chain-of-thought prompting, has recently been proposed as a way to address some of these issues.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Here we present the first release of ThoughtSource, a meta-dataset and so�ware library for chain-of-thought (CoT) reasoning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The goal of ThoughtSource is to improve future artificial intelligence systems by facilitating qualitative understanding of CoTs, enabling empirical evaluations, and providing training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' This first release of ThoughtSource integrates six scientific/medical, three general-domain and five math word question answering datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Background & Summary The most recent generation of large language models (LLMs) has produced impressive results across a wide range of tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Examples of such models include T0 1, GPT-3 2, InstructGPT 3 and ChatGPT (https://openai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='com/blog/chatgpt/).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' These models demonstrated remarkable ability to ThoughtSource: a central hub for large language model reasoning data | 2 generate text that is both realistic and coherent, as well as good performance on a broad spectrum of tasks, despite not explicitly being trained on them 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' However, despite this ability, LLMs are also limited in several ways.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' They o�en fail to produce accurate predictions due to their inability to accomplish complex reasoning, such as solving mathematical problems or question answering tasks requiring multi-hop reasoning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Furthermore, they tend to be black boxes, making it difficult to understand how and why predictions are generated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' These limitations severely limit the application domains of LLMs and have the potential to cause harm, as lack of explainability and robustness can lead to critical failures and biases when these models are deployed in practice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' One recently proposed method for enabling complex reasoning and generating explanations with LLMs is to force models to explicitly verbalize reasoning steps as natural language, a technique known as chain-of-thought prompting 4,5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' This method improved performance on a variety of tasks and sparked the active development of further refinements 6, such as decomposing problems and structuring reasoning (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', least-to-most prompting 7, ReAct 8, self-ask 9, maieutic prompting 10, successive prompting 11) and/or extending LLM capabilities by leveraging external services for tasks like information retrieval (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', self-ask 9, IRCoT 12, DSP 13).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=" The terminology surrounding these rapidly evolving techniques is not settled, hence in this document, we refer to all approaches that result in a linear sequence of reasoning steps as 'chain-of-thought' (CoT)." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Meta-datasets (datasets of datasets) that are easily accessible and standardized have proven useful for training and evaluating versatile LLMs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Examples include SuperGLUE 14 for general-domain language model tasks, BigBIO 15 and BLURB 16 for biomedical tasks, or Pile 17 and ROOTS 18 as text corpora for LLM pre-training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Datasets can be complemented by tools such as PromptSource, which was used to convert a large number of datasets into prompts fit for training and interrogating LLMs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' PromptSource facilitated training the highly performant T0 model 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Here we present the first release of ThoughtSource, a meta-dataset and so�ware library for chain-of-thought reasoning in LLMs (https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='com/OpenBioLink/ThoughtSource).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The goals of ThoughtSource are to: — Facilitate qualitative understanding of CoTs generated by LLMs under various conditions (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', across tasks, models and prompts).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' — Enable empirical and quantitative evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' — Provide a library of diverse CoT training data for improving performance, robustness, explainability and value-alignment of future LLM-based AI systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ThoughtSource: a central hub for large language model reasoning data | 3 Methods We selected NLP benchmarks for question answering and natural language inference for which pre-existing data for constructing CoTs was available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' For some of the datasets, one or multiple additional datasets were used as sources for additional CoTs, allowing for the comparison of different CoT generation methodologies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We created data loader scripts compatible with the Hugging Face datasets library 19 for all datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Additionally, we collected metadata of attributes such as descriptions, websites and licenses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We contacted dataset providers and encouraged them to choose an open source/open data license if licensing information was unavailable or unclear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We implemented two kinds of schemas: 1) source dataset schemas, which are unique to each dataset and provide data close to their original format;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' and 2) a standardized ThoughtSource schema, which maps all datasets into a common format.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The ThoughtSource schema was created by extending the question answering schema of the BigBIO project 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We implemented tailored algorithms for converting each dataset because the collected datasets provide explanations in different ways, such as math expressions or structured graph-based explanations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Furthermore, we performed preprocessing such as capitalization and punctuation correction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' To recover standard formatted text from pre-tokenized datasets, we reversed the tokenization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' This preprocessing was performed only on data in the ThoughtSource schema, while data in the Source schemas was le� in their original formatting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' All code for running these conversions is available in our Github repository.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We developed a suite of Python libraries and tools for generating novel CoTs and answers by calling LLM APIs, as well as tools for evaluating, comparing and annotating datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We built upon the LangChain library (https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='com/hwchase17/langchain/) for interfacing with a wide variety of external LLM APIs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Data Records This first release of ThoughtSource integrates six scientific/medical, three general-domain and five math word question answering datasets (Table 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' For every dataset except for PubmedQA and MedQA we provide ‘reference CoTs’.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We created these reference CoTs by converting rationales provided by original datasets into reasoning chains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' These rationales, depending on the dataset, were created by human experts or obtained from crowdsourcing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' For MedQA, MedMCQA and PubmedQA, we generated and integrated CoTs with the AI systems text-davinci-002 3 and code-davinci-002 20 (described in detail by co-authors Liévin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' in a separate manuscript 21).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Furthermore, we extended CommonsenseQA and StrategyQA with AI-generated CoTs created by few-shot 4 and zero-shot 5 prompting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Since current LLM models are still prone to errors, it should be noted that AI-generated CoTs may contain faulty reasoning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ThoughtSource: a central hub for large language model reasoning data | 4 Table 1: Integrated datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' For some core datasets, additional datasets were used as sources for additional CoTs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Dataset License Scientific and medical question answering WorldTree V2 22 AI2 Mercury license EntailmentBank 23 CC BY 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='0 OpenBookQA 24 Apache License 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='0 MedQA (USMLE ) 25 Core dataset MIT CoT source: few-shot from Liévin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 21 CC-BY 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='0 MedMCQA 26 Core dataset MIT CoT source: few-shot from Liévin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 21 CC-BY 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='0 PubmedQA 27 Core dataset MIT CoT source: few-shot from Liévin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 21 CC-BY 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='0 General-domain question answering CommonsenseQA 28 Core dataset MIT CoT source: ECQA ³ Community Data License Agreements Sharing license 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='0 CoT source: few-shot from Wei et al .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 4, zero-shot from Kojima et al .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 5 Unspecified StrategyQA 29 Core dataset MIT CoT source: few-shot from Wei et al .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 4, zero-shot from Kojima et al .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 5 Unspecified QED 30 CC BY-SA 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='0 Math word problems AQUA-RAT 31 Apache 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='0 ASDiv 32 CC BY-NC 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='0 GSM8K 33 MIT MAWPS 34 MIT SVAMP 35 MIT ThoughtSource: a central hub for large language model reasoning data | 5 Scientific/medical question answering datasets WorldTree V2 22 is one of the most detailed multi-hop science question answering datasets available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Finding the right multiple-choice answers requires a multi-hop inference combining between 1 and 16 facts (average: 6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' It contains explanations created by experts in the form of multiple facts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We concatenated these facts and applied a set of rules to improve style and grammaticality to yield reference CoTs that are close to natural language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' EntailmentBank 23 contains open-domain science exam questions and answers, along with systematic explanations that show how the correct answer is reached through a series of steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' These steps are organized into a tree structure, known as an entailment tree, which starts with known facts and progresses through intermediate conclusions until the final answer is reached.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' These entailment trees are also serialized into text-based proofs by traversing the trees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We applied a set of rules to improve style and grammaticality in these proofs to yield reference CoTs that are close to natural language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' OpenBookQA 24 contains questions modeled a�er open-book exams of elementary-level science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' They require multi-step reasoning, commonsense knowledge, and a diverse application of core science facts to find the correct answer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The dataset provides over 1,300 core science facts and a mapping to all of the questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' By design, questions in OpenBookQA are answered incorrectly by both retrieval-based and word co-occurrence algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The dataset contains a single-fact explanation of the correct answer for each question, which we adopted to create reference CoTs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' MedQA 25 is a free-form multiple-choice OpenQA dataset containing questions from medical board exams in the US (USMLE), Mainland China and Taiwan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We imported the English-language USMLE subset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Reference CoTs are not provided.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' MedMCQA 26 is a multiple-choice question answering dataset containing real-world medical entrance exam questions from the All India Institute of Medical Sciences (AIIMS PG) and National Eligibility cum Entrance Test (NEET PG).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Answer rationales authored by human experts were integrated as reference CoTs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' PubmedQA 27 is a question answering dataset containing biomedical questions extracted from PubMed abstracts that can be answered with yes/no/maybe answers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' In addition to the short answer, each question comes with a longer answer, which can be used as reference CoT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' For MedQA, MedMCQA and PubmedQA we added CoTs generated with the AI systems text-davinci-002 3 and code-davinci-002 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ThoughtSource: a central hub for large language model reasoning data | 6 General-domain question answering datasets CommonsenseQA 28 is a collection of multiple-choice questions that test a wide range of general knowledge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We created reference CoTs for the train and validation set derived from the crowd-sourced ECQA dataset³.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We also added AI-generated reasoning chains generated with few-shot 4 and zero-shot 5 prompting, which are available for the validation split.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' StrategyQA 29 is a question answering dataset that tests the ability to reason through open-domain questions and provide Yes/No answers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Each example includes a question, a decomposition of the question into reasoning steps, and evidence paragraphs from Wikipedia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The dataset was created through a crowdsourcing process to gather creative and diverse questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Human-generated freetext reasoning chains are part of the train split of the original dataset and were used as CoTs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The dataset also includes relevant paragraphs from Wikipedia, but these were not included in our CoTs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We extended the StrategyQA dataset with AI-generated CoTs created through few-shot 4 and zero-shot 5 prompting, which are available for the train split.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' QED 30 is a collection of expert-annotated structured explanations for answers to questions, built upon a subset of the Google Natural Questions dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Given a question and a passage from Wikipedia, QED uses linguistic information to represent explanations as a series of interpretable steps, such as referential equality, sentencehood, and entailment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Structured reasoning chains by experts are provided for all examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' To create reference CoTs, we extracted the sentence that entails the answer;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' statements about referential equality in QED were converted to natural language and added as additional steps in the CoTs (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' "The noun phrase […] in the sentence and the noun phrase […] in the question refer to the same thing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ").' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Math word problem datasets Algebra Question Answering with Rationales (AQUA-RAT) 31 is a large-scale multiple-choice dataset containing algebraic word problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Each problem consists of a question with five possible answers and a rationale, a step-by-step natural language explanation of the solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We used natural language explanations as reference CoTs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Academia Sinica Diverse (ASDiv) math word problem (MWP) dataset 32 aims to provide more diverse language patterns and problem types than previous datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' It covers most of the math topics taught in elementary school.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Each MWP is labeled with its grade level (for indicating difficulty), the needed math operation (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' division) and includes a short explanation of the solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ASDiv contains explanations of answers in the form of nested math expressions using common operators such as addition, subtraction, division and multiplication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We generated reference CoTs by converting these math expressions into natural language explanation chains using a rule-based method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ThoughtSource: a central hub for large language model reasoning data | 7 Grade School Math 8K (GSM8K) 33 contains grade school math word problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Despite their conceptual simplicity, these problems are more challenging to process than earlier datasets due to their linguistic diversity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The creators of GSM8K instructed crowd workers to write solutions to problems in free text format, which we used as reference CoTs in ThoughtSource, omitting any additional arithmetic specifications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Math Word Problems (MAWPS) 34 is an online platform that provides a collection of math word problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The problems have simple one- or two-line explanations for their solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' MAWPS includes datasets from various sources, offers tools for automatically creating datasets with specific characteristics as well as the possibility to tune lexical and template overlap.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We converted explanatory math expressions to reference CoTs with an approach similar to the one used for ASDiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Simple Variations on Arithmetic Math Word Problems (SVAMP) 35 was created by applying carefully chosen variations to examples from existing datasets, such as ASDiv and MAWPS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' These variations make it difficult for language models to solve the problems using simple heuristics, and instead require a deeper understanding and reasoning ability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We converted math expressions to reference CoTs with an approach similar to the one used for ASDiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Dataset schema Tables 2–5 provide descriptions and datatypes of the various fields in the ThoughtSource schema.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Any performed sample task leads to a generated CoT and answer to the question.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Annotations can be added programmatically or through an annotator tool.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Table 2: Fields of the ‘sample’ object.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Field Description Datatype id Unique identifier of object string ref_id Identifier of external objects such as documents or other resources string question Question of task string type Type of the question answering task, currently one of [“multiplechoice”, “text”, “number”, “collection”] string choices Set of multiple options containing the gold answer list(string) context Additional context for answering the question string cot Reference CoT, o�en human-generated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' list(string) answer Gold answer of task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Can contain multiple elements if type is collection list(string) generated_cot List of generated_cot objects list(generated_cot_object) ThoughtSource: a central hub for large language model reasoning data | 8 Table 3: Fields of the ‘generated_cot’ object.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Field Description Datatype id Unique identifier of object string templates_version Version of the fragments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='json file string instruction Identifier of the cot trigger fragment stored in fragments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='json string cot_trigger Identifier of the cot trigger fragment stored in fragments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='json ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='string ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='cot_trigger_template ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Template to specify structure of prompt text ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='string ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='prompt_text ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Full text of prompt used for the CoT generation step ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='string ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='answers ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='List of generated answer objects ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='list(answer_object) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='cot ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Generated chain-of-thought ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='string ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='author ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Name of the author ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='string ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='date ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Date of the chain-of-thought generation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='string ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='api_service ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Identification of the used api service ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='string ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='model ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Identification of the used language model ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='string ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='comment ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Comment ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='string ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='annotation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='List of annotation objects ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='list(annotation_object) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Table 4: Fields of the ‘answer’ object.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Field Description Datatype id Unique identifier of object string answer_extraction Identifier of the answer extraction fragment stored in fragments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='json string cot_trigger_template Template to specify structure of prompt text string answer_extraction _text Full text of prompt used for the answer extraction step string answer Extracted answer string correct_answer True if the extracted answer is equal to the gold answer, else false bool Table 5: Fields of the ‘annotation’ object.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Field Description Datatype author Name of the author string date Date of the creation of the annotation string key Specifies the label of the annotation string value Specifies the value of the annotation string ThoughtSource: a central hub for large language model reasoning data | 9 Table 6 shows the example counts, CoT counts and answer types of each dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The majority of datasets in the current collection are of the multiple choice answer type.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The medical dataset MedMCQA is the largest among all datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Table 6: Statistics and answer types for all datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Note that generated CoTs are not available for all examples, and multiple CoTs might have been generated for any given example.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' [Link: Notebook used for generating stats and graphs] Dataset ID Examples Reference CoTs Examples w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' generated CoTs Generated CoTs Answer type aqua 97,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='975 97,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='975 0 0 multiple choice asdiv 1218 1218 0 0 number commonsense_qa 12,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='102 10,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='962 1221 2437 multiple choice entailment_bank 1840 1840 0 0 text gsm8k 8792 8792 0 0 number mawps 1921 1921 0 0 number med_qa (USMLE) 12,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='723 0 1273 133,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='660 multiple choice medmc_qa 193,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='155 161,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='558 1000 104,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='987 multiple choice open_book_qa 5957 5957 0 0 multiple choice pubmed_qa 1000 0 500 2500 multiple choice qed 6175 6175 0 0 collection strategy_qa 2780 2290 2289 4532 bool svamp 1000 1000 0 0 number worldtree 4367 4365 0 0 multiple choice We analyzed the distribution of question and CoT field lengths (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' MedQA has the longest median question length, while PubMedQA has the longest median CoT length.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Several datasets contain outlier CoTs with extremely long text lengths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Context fields were only filled for the PubmedQA and QED datasets, with mean context lengths of 116 and 56 tokens, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ThoughtSource: a central hub for large language model reasoning data | 10 Figure 1: Distribution of question and CoT field lengths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Technical Validation The datasets were reviewed by three team members and issues were tracked on the issue tracker of the associated GitHub repository.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' To characterize potential overlaps and relations between datasets, we calculated mutual n-gram overlap using n=3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 2) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' To quantify the overlap between two sets of n-grams we use the Szymkiewicz–Simpson coefficient (overlap coefficient), which can be interpreted as the proportion of n-grams of the smaller dataset that are contained in the bigger dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='worldtree ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='svamp ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='工白 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='strategy_qa ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='百T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='qed ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='pubmed_qa ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='open_book_qa ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='H ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Dataset ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='medmc_qa ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='med_qa ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='mawps ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='gsm8k ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='entailment_bank ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='HH ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='commonsense_qa ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='asdiv ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='enbe ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='150 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='250 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='300 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Numberoftokens inquestionworldtree ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='svamp ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='strategy_qa ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='qed ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='pubmed_qa ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='open_book_qa ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Dataset ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='medmc_qa ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='med_qa ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='mawps ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='gsm8k ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='entailment_bank ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='commonsense_qa ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='asdiv ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='enbe ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='H ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='150 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='250 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='300 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='350 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Number of tokens in CoTThoughtSource: a central hub for large language model reasoning data ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='| 11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='There is an overlap of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='0 between the set of questions in WorldTree v2 and EntailmentBank.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The QA pairs in EntailmentBank were taken from the WorldTree v2 dataset 23, so all the questions in EntailmentBank are a subset of WorldTree v2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Furthermore, there is significant overlap between the questions contained in ASDiv and SVAMP and those in ASDiv and MAWPS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ASDiv and SVAMP have overlapped questions because a subset of examples from ASDiv was used as seed examples for the creation of SVAMP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' For MAWPS and ASDiv, questions were crawled from web resources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The overlap could be due to examples being crawled from the same web resources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Besides overlaps in questions, we also identified overlaps in CoTs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' WorldTree v2 provided an initial pool of atomic facts that the annotators could use to construct an explanation tree in EntailmentBank (in addition to creating their own facts).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' This explains the high overlap of n-grams of CoTs in WorldTree v2 and EntailmentBank.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Similarly, a subset of WorldTree v2 facts was used for the creation of explanations in OpenbookQA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Figure 2: n-gram overlap in questions and CoTs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Overlap is measured by mutual n-gram overlap using n=3, values <0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='01 are omitted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Usage Notes Python libraries for accessing and working with data can be downloaded from the Github repository and installed with the pip tool.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 3 demonstrates how to load a dataset, randomly sample from the pre-populated data in the dataset, call an external LLM API to generate novel CoTs and answers, automatically evaluate the accuracy of generated answers, and finally save all generated data to a JSON file.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 4 depicts an excerpt of the resulting JSON file.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Question CoT aqua1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 asdiv 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='02 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 asdiv 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 commonsense_qa 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 commonsense_qa 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='8 entailment_bank 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 entailment_bank 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 gsm8k0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='04 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 gsm8k0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='03 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 mawps0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='27 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='05 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 mawps 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='6 med_qa 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 med_qa 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 medmc_qa 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='061.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 medmc_qa 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='01 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='55 open_book_qa open_book_ga 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='01 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='4 pubmed_qa 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='030.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='04 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 pubmed_qa 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 qed 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 qed 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='2 strategy_qa 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 strategy_qa 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 svamp 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='19 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='040.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='03 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 svamp 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 worldtree 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='01 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 worldtree 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='41 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='84 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='00 0 aqua asdiv com gsm8k pubn qed svamp worldtree aqua asdiy gsm8k ma med medr oper svamp oper bmed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 9ed worldtree tegy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='.qa 9a eb" 9a 9a 9a 9a _qa ga 9a 9a 9aThoughtSource: a central hub for large language model reasoning data | 12 from cot import Collection # Load a dataset collection_worldtree = Collection(["worldtree"]) # Randomly sample 10 rows of train split collection_worldtree_10 = collection_worldtree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='select(split="train", number_samples=10) # Create a config file for calling OpenAI API to generate new CoTs and answers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' config={ "instruction_keys": ["qa-01"],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' # Determines which instructions are used "cot_trigger_keys": ["kojima-01"],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' # Determines which cot triggers are used "answer_extraction_keys": ["kojima-A-D"],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' # Determines which answer extraction # prompts are used "author" : "your_name",' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' # Name of the person responsible for generation "api_service": "openai",' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' # Name of the API called ("openai",' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' "huggingface_hub" # or a mock for testing: "mock_api") "engine": "text-davinci-002",' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' # Name of the engine used "temperature": 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' # Level of randomness in the generated output "max_tokens": 512,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' # Maximum length of output generated by the model "api_time_interval": 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='0, # Pause between two api calls in seconds "verbose": False, # Determines whether the progress of the generation is printed "warn": True, # Determines whether a warnings that external APIs will be called # are printed } # Generate novel chains-of-thought and answer extractions collection_worldtree_10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='generate(config=config) # Evaluate accuracy of model predictions collection_worldtree_10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content="evaluate() # Example output: {'accuracy': {'qa-01_kojima-01_kojima-A-D': 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='86}} # Save all data (including evaluation data) to JSON file collection_worldtree_10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='dump("worldtree_10_evaluate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='json") Figure 3: Demonstration of the ThoughtSource API.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Basic functionalities of the data loader, generator and evaluator modules are demonstrated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ThoughtSource: a central hub for large language model reasoning data | 13 { "id": "1242", "ref_id": "", "question": "Which is a characteristic of a sperm cell but not of an egg cell?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ", "type": "multiplechoice", "choices": [ "round shape", "presence of a tail", "contains genetic information", "involved in sexual reproduction" ], "context": "", "cot": [ "A part of something means a characteristic of something.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ", "A tail is not part of an egg cell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ", "A tail is a part of a sperm cell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ", "A part of something is present in that something.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='" ], "answer": [ "presence of a tail" ], "generated_cot": [ { "id": "738b54ba-9a20-47e6-b8ff-7cb876103b92", "fragments_version": "0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='01", "api_service": "openai", "model": "{\'name\': \'text-davinci-002\', \'temperature\': 0, \'max_tokens\': 512}", "instruction": "qa-01", "cot_trigger": "kojima-01", "cot": "We know that both sperm and egg cells are involved in sexual reproduction, so we can eliminate (D).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' \\n\\nWe also know that both sperm and egg cells contain genetic information, so we can eliminate (C).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' \\n\\nThat leaves us with (A) and (B).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' \\n\\nWe know that sperm cells have a tail, but egg cells do not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Therefore, the correct answer is (B).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ", "answers": [ { "id": "7f7cc26f-a3b3-4b59-9af7-35980514d0c3", "answer_extraction": "kojima-A-D", "answer": " B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ", "correct_answer": true } ], "author": "your_name", "date": "2023/01/12 14:18:57", "comment": "", "annotation": [] } ] } Figure 4: An excerpt of data generated by running the example code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Data for a single question from Worldtree V2 are shown, including human-authored reference CoT, gold-standard answer, an AI-generated CoT and extracted answer, as well as evaluation results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Some fields were omitted for legibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ThoughtSource: a central hub for large language model reasoning data | 14 In a zero-shot setup, specific text fragments can be used to prompt question answering and CoT reasoning in LLMs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ThoughtSource includes a curated list of text fragments that can be used to generate novel CoTs (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Where possible, we also mapped individual CoTs in pre-existing CoT datasets to the text fragments that were used in their creation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' "instructions": { "qa-01": "Answer the following question through step-by-step reasoning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ", "qa-02": "Answer the following question through careful, concise step-by-step reasoning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ", "qa-03": "Answer the following question through careful, concise step-by-step reasoning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Avoid making up wrong statements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' If the question does not make sense or cannot be answered, write \\"I cannot answer the question\\".' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' If you do not have a good answer, write \\"I do not have a good answer\\".' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' If you are uncertain, write \\"I am uncertain about this\\".' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ", [.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='] }, "cot_triggers": { "kojima-01": "Answer: Let\'s think step by step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ", "kojima-02": "Answer: We should think about this step by step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ", "kojima-03": "Answer: First,", "kojima-04": "Answer: Before we dive into the answer,", [.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='] "lievin-01": "Answer: Let\'s derive the differential diagnosis step by step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ", "lievin-02": "Answer: Let\'s use step by step inductive reasoning, given the medical nature of the question.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ", [.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='] "lievin-26": "Answer: Let\'s follow a Bayesian step by step approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ", "lievin-27": "Answer: Let\'s reflect on each option from the least likely to the most likely.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ", "lievin-28": "Answer: Let\'s use step by step Bayesian reasoning, given the medical nature of the question.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='" }, "answer_extractions":{ "kojima-01": "Therefore, the answer is", "kojima-02": "Therefore,", "kojima-03": "The answer is", "kojima-numerals": "Therefore, the answer (arabic numerals) is", "kojima-yes-no": "Therefore, the answer (Yes or No) is", "kojima-A-C": "Therefore, among A through C, the answer is", "kojima-A-D": "Therefore, among A through D, the answer is", [.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='] } Figure 5: An excerpt of the collection of prompt fragments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' These fragments can be used to build prompts for interacting with LLMs, allowing for empirical testing of how different prompts affect model performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We provide two web-based interfaces for exploring and annotating ThoughtSource data, the Dataset Viewer and the Annotator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The Dataset Viewer is a simple interface for exploring dataset contents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The Annotator (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 6) allows you to upload specific subsets of a dataset, provides convenience functions for highlighting similarities between different generated CoTs and the correctness of generated answers, and allows you to annotate individual CoTs interactively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The annotator facilitates identifying strengths and weaknesses of different CoTs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Annotations can be used for downstream model evaluation and further improving the capabilities of AI models through fine-tuning / reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ThoughtSource: a central hub for large language model reasoning data | 15 Figure 6: The ThoughtSource Annotator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The web-based interface allows for convenient inspection and annotation of reasoning chains and answers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Text that is similar between CoTs can be automatically highlighted based on an easily adjustable similarity threshold, facilitating a better understanding of similarities and differences of different reasoning chains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' All tools and libraries, as well as more detailed demonstration notebooks, can be found on the project Github page.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' We plan to add more datasets and generated CoTs to the ThoughtSource repository, and we welcome outside contributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Novel CoTs for existing core datasets can be generated and shared through the ThoughtSource APIs and JSON files.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=" Completely new datasets can also be added, as described in the Github repository's contribution guide." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Code Availability All code, data and tools are openly available at https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='com/OpenBioLink/ThoughtSource.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Our code and data are licensed under an MIT license, while data adapted from existing datasets are available under the licenses of their respective sources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=" ThoughtSource Annotator tfidf Visualisation threshold Download current Matthias Logout Question 1242 (test) A 28-year-old woman, gravida 1, para O, at 20 weeks' gestation comes to the physician with her husband for a prenatal visit." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Her pregnancy has been uncomplicated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=" They are planning to travel to Ethiopia next month to visit the husband's family uterus that is consistent with a 20-week gestation." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Which of the following drugs is most suitable for pre-exposure prophylaxis against malaria?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Doxycycline B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Primaquine C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Mefloquine D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Chloroquine E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=" Proguanil Let's think step by step." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=" The first thing we need to Let's think step by step like a medical expert." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=" The Let's differentiate using step by step reasoning like do is figure out what kind of malaria is common in first thing to consider is the safety of the a medical expert: 1) Doxycycline is not suitable for Ethiopia." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' According to this website, the most medication for the mother and the developing pre-exposure prophylaxis against malaria in this common type of malaria in Ethiopia is Plasmodium fetus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Doxycycline and mefloquine are not case because the patient is pregnant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 2) falciparum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The next thing we need to do is figure recommended for use in pregnant women, so we Mefloquine is not suitable for pre-exposure out which of the four drugs listed is most effective can eliminate those options.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The next thing to prophylaxis against malaria in this case because against Plasmodium falciparum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' According to this consider is the efficacy of the medication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' the patient is pregnant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 3) Chloroquine is suitable website, chloroguine and proguanil are not Chloroquine is not effective in Ethiopia, so we can for pre-exposure prophylaxis against malaria in effective against Plasmodium falciparum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' That eliminate that option.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' That leaves us with this case because the patient is not pregnant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 4) leaves doxycycline and mefloquine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The next thing proguanil, which is safe for use in pregnant Proguanil is suitable for pre-exposure prophylaxis we need to do is figure out which of the two women and is effective in Ethiopia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' against malaria in this case because the patient is remaining drugs has the fewest side effects Answer: Proguanil not pregnant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' According to this website, the most common side ☆ Answer: Chloroquine effects of doxycycline are nausea, vomiting, and ☆ Incorrect reasoning diarrhea.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The most common side effects of Insufficient knowledge Incorrect reasoning mefloquine are nausea, vomiting, diarrhea, and headache.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Based on the information above, the Incorrect reading comprehension Insufficient knowledge most suitable drug for pre-exposure prophylaxis Incorrect reading comprehension Too verbose against malaria in Ethiopia is mefloquine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Too verbose O Answer: Mefloquine Incorrect reasoning Insufficient knowledge Incorrect reading comprehension Too verboseThoughtSource: a central hub for large language model reasoning data | 16 Acknowledgements We thank primary dataset contributors that assisted with assembling the ThoughtSource meta-dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Author contributions S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' wrote the code for accessing, converting, generating and analysing datasets, and wrote parts of the manuscript and documentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' and O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' generated and analysed CoT data for medical datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Ma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' wrote the code of the annotator so�ware.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Mo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' wrote a first prototype of code for accessing and converting datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' contributed to improving code and documentation quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' conceived and supervised the project and wrote parts of the manuscript and documentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' All authors have read and approved the final manuscript.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Competing interests The authors declare that there are no conflicts of interest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' References 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Sanh, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Multitask Prompted Training Enables Zero-Shot Task Generalization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' arXiv (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Brown, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Language Models are Few-Shot Learners.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' arXiv (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Ouyang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Training language models to follow instructions with human feedback.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' arXiv (2022) doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='48550/arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='2203.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='02155.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Wei, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Chain of Thought Prompting Elicits Reasoning in Large Language Models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' arXiv (2022) doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='48550/arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='2201.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='11903.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Kojima, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Gu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Reid, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Matsuo, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' & Iwasawa, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Large Language Models are Zero-Shot Reasoners.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' arXiv (2022) doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='48550/arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='2205.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='11916.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Huang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' & Chang, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Towards Reasoning in Large Language Models: A Survey.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Preprint at https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='48550/arXiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='2212.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='10403 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Zhou, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Least-to-Most Prompting Enables Complex Reasoning in Large Language Models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' arXiv (2022) doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='48550/arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='2205.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='10625.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Yao, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ReAct: Synergizing Reasoning and Acting in Language Models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' arXiv (2022) doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='48550/arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='2210.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='03629.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Press, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Measuring and Narrowing the Compositionality Gap in Language Models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' arXiv (2022) doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='48550/arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='2210.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='03350.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ThoughtSource: a central hub for large language model reasoning data | 17 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Jung, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Maieutic Prompting: Logically Consistent Reasoning with Recursive Explanations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' arXiv (2022) doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='48550/arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='2205.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='11822.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Dua, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Gupta, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Singh, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' & Gardner, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Successive Prompting for Decomposing Complex Questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Preprint at https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='48550/arXiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='2212.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='04092 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Trivedi, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Balasubramanian, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Khot, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' & Sabharwal, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Interleaving Retrieval with Chain-of-Thought Reasoning for Knowledge-Intensive Multi-Step Questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Preprint at https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='48550/arXiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='2212.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='10509 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Khattab, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Demonstrate-Search-Predict: Composing retrieval and language models for knowledge-intensive NLP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Preprint at https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='48550/arXiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='2212.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='14024 (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Wang, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' in Advances in Neural Information Processing Systems (eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Wallach, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=') vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 32 3266–3280 (Curran Associates, Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Fries, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' BigBIO: A Framework for Data-Centric Biomedical Natural Language Processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' in (arXiv, 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='48550/arXiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='15076.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Gu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Domain-Specific Language Model Pretraining for Biomedical Natural Language Processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' ACM Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Healthc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 3, 2:1-2:23 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Gao, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The Pile: An 800GB Dataset of Diverse Text for Language Modeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' arXiv (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Laurençon, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' The BigScience ROOTS Corpus: A 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='6TB Composite Multilingual Dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' in (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Lhoest, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Datasets: A Community Library for Natural Language Processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' in Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System Demonstrations 175–184 (Association for Computational Linguistics, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='18653/v1/2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='emnlp-demo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Chen, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Evaluating Large Language Models Trained on Code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Preprint at https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='48550/arXiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='2107.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='03374 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Liévin, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Hother, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' & Winther, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Can large language models reason about medical questions?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' arXiv (2022) doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='48550/arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='2207.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='08143.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Xie, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' WorldTree V2: A Corpus of Science-Domain Structured Explanations and Inference Patterns supporting Multi-Hop Inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' in Proceedings of the Twel�h Language Resources and Evaluation Conference 5456–5473 (European Language Resources Association, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Dalvi, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Explaining Answers with Entailment Trees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Preprint at http://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='org/abs/2104.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='08661 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Mihaylov, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Clark, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Khot, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' & Sabharwal, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Can a Suit of Armor Conduct Electricity?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' A New Dataset for Open Book Question Answering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' in Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing 2381–2391 (Association for Computational Linguistics, 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='18653/v1/D18-1260.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Jin, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' What Disease Does This Patient Have?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' A Large-Scale Open Domain Question ThoughtSource: a central hub for large language model reasoning data | 18 Answering Dataset from Medical Exams.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 11, 6421 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Pal, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Umapathi, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' & Sankarasubbu, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' MedMCQA: A Large-scale Multi-Subject Multi-Choice Dataset for Medical domain Question Answering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' in Proceedings of the Conference on Health, Inference, and Learning 248–260 (PMLR, 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Jin, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Dhingra, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Liu, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Cohen, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' & Lu, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' PubMedQA: A Dataset for Biomedical Research Question Answering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' in Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP) 2567–2577 (Association for Computational Linguistics, 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='18653/v1/D19-1259.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Talmor, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Herzig, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Lourie, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' & Berant, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' CommonsenseQA: A Question Answering Challenge Targeting Commonsense Knowledge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' in Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers) 4149–4158 (Association for Computational Linguistics, 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='18653/v1/N19-1421.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Geva, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Did Aristotle Use a Laptop?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' A Question Answering Benchmark with Implicit Reasoning Strategies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Assoc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Linguist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 9, 346–361 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Lamm, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' QED: A Framework and Dataset for Explanations in Question Answering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Assoc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Linguist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 9, 790–806 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Ling, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Yogatama, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Dyer, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' & Blunsom, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Program Induction by Rationale Generation: Learning to Solve and Explain Algebraic Word Problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' in Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers) 158–167 (Association for Computational Linguistics, 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='18653/v1/P17-1015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Miao, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Liang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' & Su, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' A Diverse Corpus for Evaluating and Developing English Math Word Problem Solvers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' in Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics 975–984 (Association for Computational Linguistics, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='18653/v1/2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='acl-main.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Cobbe, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Training Verifiers to Solve Math Word Problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Preprint at https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='48550/arXiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='2110.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='14168 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Koncel-Kedziorski, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Roy, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Amini, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Kushman, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' & Hajishirzi, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' MAWPS: A Math Word Problem Repository.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' in Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies 1152–1157 (Association for Computational Linguistics, 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='18653/v1/N16-1136.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='Patel, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=', Bhattamishra, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' & Goyal, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' Are NLP Models really able to Solve Simple Math Word Problems?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' in Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies 2080–2094 (Association for Computational Linguistics, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content=' doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='18653/v1/2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='naacl-main.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'} +page_content='168.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9dFJT4oBgHgl3EQfoixI/content/2301.11596v1.pdf'}