diff --git a/.gitattributes b/.gitattributes
index c7d9f3332a950355d5a77d85000f05e6f45435ea..9bbd104d040800638d5a9b31631fa4978b1f04ac 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -32,3 +32,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+checkpoints/open-llama/7B/open_llama_7b_preview_300bt_easylm filter=lfs diff=lfs merge=lfs -text
+data/alpaca/alpaca_data_cleaned_archive_origin.json filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..e84385252ea8e87ab9c4aab5d6dc265e858d50c3
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,16 @@
+__pycache__
+.idea
+.DS_Store
+*.egg-info
+build
+
+# data
+data
+checkpoints
+out
+!data/shakespeare/prepare.py
+wandb
+
+# downloaded by our tests
+original_model.py
+original_adapter.py
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..fe60df99e7ff3db486c7722fe98e7739614e7a0f
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [2023] Lightning AI
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
index 09f57832c8954e8c173e36a80408c72d9b921525..767b1052633bbb32fc72d1b7b24b3c2468a0a848 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,187 @@
----
-license: openrail
----
+
+
+
+# ⚡ Lit-LLaMA ️
+
+
+
+![cpu-tests](https://github.com/lightning-AI/lit-llama/actions/workflows/cpu-tests.yml/badge.svg) [![Build Status](https://dev.azure.com/Lightning-AI/lit%20Models/_apis/build/status%2FLightning-AI.lit-LLaMA?branchName=main)](https://dev.azure.com/Lightning-AI/lit%20Models/_build/latest?definitionId=49&branchName=main) [![license](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/Lightning-AI/lit-llama/blob/master/LICENSE) [![Discord](https://img.shields.io/discord/1077906959069626439?style=plastic)](https://discord.gg/VptPCZkGNa)
+
+
+
+
+
+# ⚡ Lit-LLaMA ️
+Independent implementation of [LLaMA]() that is fully open source under the **Apache 2.0 license.**
+
+This implementation builds on [nanoGPT]().
+
+The original LLaMA weights are distributed by Meta under a [research-only license](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md#model-details).
+
+New Apache 2.0 licensed weights are being released as part of the [Open LLaMA project](https://github.com/openlm-research/open_llama). Both can be [loaded in Lit-LLaMA](howto/download_weights.md).
+
+## Why?
+
+We believe that AI should be fully open source and part of the collective knowledge.
+
+The original [LLaMA code](https://github.com/facebookresearch/llama) is [GPL licensed](https://github.com/facebookresearch/llama/blob/main/LICENSE) which means any project using it must also be released under GPL.
+
+This "taints" any other code and prevents integration with the rest of the ecosystem.
+
+**Lit-LLaMA solves that for good.**
+
+
+
+## Design principles
+**Lit-LLaMA** is:
+
+- **Simple:** Single-file implementation without boilerplate.
+- **Correct:** Numerically equivalent to the original model.
+- **Optimized:** Runs on consumer hardware or at scale.
+- **Open-source:** No strings attached.
+
+## Get involved!
+[Join our Discord](https://discord.gg/VptPCZkGNa) to build high-performance, truly open-source models for the common benefit of the community.
+
+
+
+## Setup
+
+Clone the repo
+
+```bash
+git clone https://github.com/Lightning-AI/lit-llama
+cd lit-llama
+```
+
+install dependencies
+
+```bash
+pip install -r requirements.txt
+```
+
+You are all set! 🎉
+
+
+
+## Use the model
+
+To generate text predictions, you need to download the model weights. **If you don't have them, check out our [guide](howto/download_weights.md).**
+
+Run inference:
+
+```bash
+python generate.py --prompt "Hello, my name is"
+```
+
+This will run the 7B model and require ~26 GB of GPU memory (A100 GPU).
+
+[Full guide for generating samples from the model](howto/inference.md).
+
+### Run Lit-LLaMA on consumer devices
+
+On GPUs with `bfloat16` support, the `generate.py` script will automatically convert the weights and consume about ~14 GB.
+For GPUs with less memory, or ones that don't support `bfloat16`, enable quantization (`--quantize llm.int8`):
+
+```bash
+python generate.py --quantize llm.int8 --prompt "Hello, my name is"
+```
+
+See `python generate.py --help` for more options.
+
+You can also use GPTQ-style int4 quantization, but this needs conversions of the weights first:
+
+```bash
+python quantize.py --checkpoint_path lit-llama.pth --tokenizer_path tokenizer.model --output_path llama-7b-gptq.4bit.pth --dtype bfloat16 --quantize gptq.int4
+```
+
+With the generated quantized checkpoint generation works as usual with `--quantize gptq.int4`, bringing GPU usage to about ~5GB. As only the weights of the Linear layers are quantized, it is useful to use `--dtype bfloat16` even with the quantization enabled.
+
+[Full guide for generating samples from the model](howto/inference.md).
+
+## Finetune the model
+
+We provide a simple training scripts in `finetune_lora.py` and `finetune_adapter.py` that instruction-tunes a pretrained model on the [Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset using the techniques of [LoRA](https://arxiv.org/abs/2106.09685) and [Adapter](https://arxiv.org/abs/2303.16199).
+
+1. Download the data and generate a instruction tuning dataset:
+
+ ```bash
+ python scripts/prepare_alpaca.py
+ ```
+
+2. Run the finetuning script
+
+ ```bash
+ python finetune_lora.py
+ ```
+ or
+ ```bash
+ python finetune_adapter.py
+ ```
+
+It is expected that you have downloaded the pretrained weights as described above.
+The finetuning requires at least one GPU with ~24 GB memory (GTX 3090). Follow the instructions in the script to efficiently fit your GPU memory.
+Note: For some GPU models you might need to set `torch.backends.cuda.enable_flash_sdp(False)` (see comments at the top of the script).
+
+More details about each finetuning method and how you can apply it to your own data can be found in our technical how-to guides.
+
+### Finetuning How-To Guides
+
+These technical tutorials illustrate how to run the finetuning code.
+
+- [Finetune with LoRA](howto/finetune_lora.md)
+- [Finetune with Adapters](howto/finetune_adapter.md)
+
+### Understanding Finetuning -- Conceptual Tutorials
+
+Looking for conceptual tutorials and explanations? We have some additional articles below:
+
+- [Understanding Parameter-Efficient Finetuning of Large Language Models: From Prefix Tuning to LLaMA-Adapters](https://lightning.ai/pages/community/article/understanding-llama-adapters/)
+
+## Pre-training
+
+We provide a simple training script based on Fabric if you want to venture into pre-training on RedPajama, a reproduction of the original LLaMA dataset.
+Conversion scripts for our optimized streaming `PackedDataset` are included.
+
+Follow this guide to start pre-training on the RedPajama dataset:
+
+- [Pretrain on RedPajama](howto/train_redpajama.md)
+
+## Get involved!
+
+We are on a quest towards fully open source AI.
+
+
+
+Join us and start contributing, especially on the following areas:
+
+- [ ] [Pre-training](https://github.com/Lightning-AI/lit-llama/labels/pre-training)
+- [ ] [Fine-tuning (full and LoRA)](https://github.com/Lightning-AI/lit-llama/labels/fine-tuning)
+- [ ] [Quantization](https://github.com/Lightning-AI/lit-llama/labels/quantization)
+- [ ] [Sparsification](https://github.com/Lightning-AI/lit-llama/labels/sparsification)
+
+Look at `train.py` for a starting point towards pre-training / fine-tuning using [Lightning Fabric](https://lightning.ai/docs/fabric/stable/).
+
+We welcome all individual contributors, regardless of their level of experience or hardware. Your contributions are valuable, and we are excited to see what you can accomplish in this collaborative and supportive environment.
+
+Unsure about contributing? Check out our [Contributing to Lit-LLaMA: A Hitchhiker’s Guide to the Quest for Fully Open-Source AI](https://lightning.ai/pages/community/tutorial/contributing-to-lit-llama-a-hitchhikers-guide-to-the-quest-for-fully-open-source-ai/) guide.
+
+Don't forget to [join our Discord](https://discord.gg/VptPCZkGNa)!
+
+## Acknowledgements
+
+- [@karpathy](https://github.com/karpathy) for [nanoGPT](https://github.com/karpathy/nanoGPT)
+- [@FacebookResearch](https://github.com/facebookresearch) for the original [LLaMA implementation](https://github.com/facebookresearch/llama)
+- [@TimDettmers](https://github.com/TimDettmers) for [bitsandbytes](https://github.com/TimDettmers/bitsandbytes)
+- [@Microsoft](https://github.com/microsoft) for [LoRA](https://github.com/microsoft/LoRA)
+- [@IST-DASLab](https://github.com/IST-DASLab) for [GPTQ](https://github.com/IST-DASLab/gptq)
+
+## License
+
+Lit-LLaMA is released under the [Apache 2.0](https://github.com/Lightning-AI/lightning-llama/blob/main/LICENSE) license.
diff --git a/checkpoints/lit-llama-bak/7B/lit-llama.pth b/checkpoints/lit-llama-bak/7B/lit-llama.pth
new file mode 100644
index 0000000000000000000000000000000000000000..2e214d1ebbc75845669768f067a130ce56286e18
--- /dev/null
+++ b/checkpoints/lit-llama-bak/7B/lit-llama.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:09b6a8994e8bc8ed517e600e355a19cbe41eaf8338532ff4f88d43df6b95e3cd
+size 26953750909
diff --git a/checkpoints/lit-llama-bak/tokenizer.model b/checkpoints/lit-llama-bak/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..88fe55a1de2ddca4e12292cae5273413d8b637a7
--- /dev/null
+++ b/checkpoints/lit-llama-bak/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bc820fc43f4173d6362c16658c409ed423929a807e55a984af96cce1277d39a4
+size 772031
diff --git a/checkpoints/lit-llama/7B/lit-llama.pth b/checkpoints/lit-llama/7B/lit-llama.pth
new file mode 100644
index 0000000000000000000000000000000000000000..2e214d1ebbc75845669768f067a130ce56286e18
--- /dev/null
+++ b/checkpoints/lit-llama/7B/lit-llama.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:09b6a8994e8bc8ed517e600e355a19cbe41eaf8338532ff4f88d43df6b95e3cd
+size 26953750909
diff --git a/checkpoints/lit-llama/tokenizer.model b/checkpoints/lit-llama/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..88fe55a1de2ddca4e12292cae5273413d8b637a7
--- /dev/null
+++ b/checkpoints/lit-llama/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bc820fc43f4173d6362c16658c409ed423929a807e55a984af96cce1277d39a4
+size 772031
diff --git a/checkpoints/open-llama/7B/.gitattributes b/checkpoints/open-llama/7B/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..9c4ed8b96533287461f2149b8a93fbf24cd5ff5a
--- /dev/null
+++ b/checkpoints/open-llama/7B/.gitattributes
@@ -0,0 +1,35 @@
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.parquet filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.rar filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
+saved_model/**/* filter=lfs diff=lfs merge=lfs -text
+*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
+open_llama_7b_preview_300bt_easylm filter=lfs diff=lfs merge=lfs -text
diff --git a/checkpoints/open-llama/7B/LICENSE.txt b/checkpoints/open-llama/7B/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/checkpoints/open-llama/7B/LICENSE.txt
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/checkpoints/open-llama/7B/README.md b/checkpoints/open-llama/7B/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..a05b0a666b1b1543b3482e726745dc489f3ba9aa
--- /dev/null
+++ b/checkpoints/open-llama/7B/README.md
@@ -0,0 +1,126 @@
+---
+license: apache-2.0
+datasets:
+ - togethercomputer/RedPajama-Data-1T
+---
+
+
+# OpenLLaMA: An Open Reproduction of LLaMA
+
+In this repo, we release a permissively licensed open source reproduction of Meta AI's [LLaMA](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) large language model. In this release, we're releasing a public preview of the 7B OpenLLaMA model that has been trained with 200 billion tokens. We provide PyTorch and Jax weights of pre-trained OpenLLaMA models, as well as evaluation results and comparison against the original LLaMA models. Stay tuned for our updates.
+
+**JAX and PyTorch Weights on Huggingface Hub**
+- [200B Checkpoint](https://huggingface.co/openlm-research/open_llama_7b_preview_200bt)
+- [300B Checkpoint](https://huggingface.co/openlm-research/open_llama_7b_preview_300bt)
+
+
+## Update 5/3/2023
+We have released a new checkpoint of OpenLLaMA 7B trained on 300B tokens. In communicating
+with our users, we have realized that many existing implementations of LLaMA does not
+prepend the BOS token (id=1) at generation time. Our 200B checkpoint is sensitive
+to this and may produce degraded results without BOS token at the beginning. Hence,
+we recommend always prepending the BOS token when using our 200B checkpoint.
+
+In an effort to make our model boradly compatible with existing implementations, we have now
+released a new 300B checkpoint, which is less sensitive to BOS token and can be used
+either way.
+
+
+## Dataset and Training
+
+We train our models on the [RedPajama](https://www.together.xyz/blog/redpajama) dataset released by [Together](https://www.together.xyz/), which is a reproduction of the LLaMA training dataset containing over 1.2 trillion tokens. We follow the exactly same preprocessing steps and training hyperparameters as the original LLaMA paper, including model architecture, context length, training steps, learning rate schedule, and optimizer. The only difference between our setting and the original one is the dataset used: OpenLLaMA employs the RedPajama dataset rather than the one utilized by the original LLaMA.
+
+We train the models on cloud TPU-v4s using [EasyLM](https://github.com/young-geng/EasyLM), a JAX based training pipeline we developed for training and fine-tuning language model. We employ a combination of normal data parallelism and [fully sharded data parallelism (also know as ZeRO stage 3)](https://engineering.fb.com/2021/07/15/open-source/fsdp/) to balance the training throughput and memory usage. Overall we reach a throughput of over 1900 tokens / second / TPU-v4 chip in our training run.
+
+
+## Evaluation
+
+We evaluated OpenLLaMA on a wide range of tasks using [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness). The LLaMA results are generated by running the original LLaMA model on the same evaluation metrics. We note that our results for the LLaMA model differ slightly from the original LLaMA paper, which we believe is a result of different evaluation protocols. Similar differences have been reported in [this issue of lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/issues/443). Additionally, we present the results of GPT-J, a 6B parameter model trained on the [Pile](https://pile.eleuther.ai/) dataset by [EleutherAI](https://www.eleuther.ai/).
+
+The original LLaMA model was trained for 1 trillion tokens and GPT-J was trained for 500 billion tokens, whereas OpenLLaMA was trained on 200 billion tokens. We present the results in the table below. OpenLLaMA exhibits comparable performance to the original LLaMA and GPT-J across a majority of tasks, and outperforms them in some tasks. We expect that the performance of OpenLLaMA, after completing its training on 1 trillion tokens, will be enhanced even further.
+
+
+| **Task/Metric** | **GPT-J 6B** | **LLaMA 7B** | **Open LLaMA 7B Preview 200B Tokens** |
+| ---------------------- | ------------ | ------------ | ------------------------------------- |
+| anli_r1/acc | 0.32 | 0.35 | 0.34 |
+| anli_r2/acc | 0.34 | 0.34 | 0.35 |
+| anli_r3/acc | 0.35 | 0.37 | 0.34 |
+| arc_challenge/acc | 0.34 | 0.39 | 0.31 |
+| arc_challenge/acc_norm | 0.37 | 0.41 | 0.34 |
+| arc_easy/acc | 0.67 | 0.68 | 0.66 |
+| arc_easy/acc_norm | 0.62 | 0.52 | 0.59 |
+| boolq/acc | 0.66 | 0.75 | 0.67 |
+| cb/acc | 0.36 | 0.36 | 0.38 |
+| cb/f1 | 0.26 | 0.24 | 0.29 |
+| hellaswag/acc | 0.50 | 0.56 | 0.47 |
+| hellaswag/acc_norm | 0.66 | 0.73 | 0.63 |
+| openbookqa/acc | 0.29 | 0.29 | 0.26 |
+| openbookqa/acc_norm | 0.38 | 0.41 | 0.37 |
+| piqa/acc | 0.75 | 0.78 | 0.74 |
+| piqa/acc_norm | 0.76 | 0.78 | 0.74 |
+| record/em | 0.88 | 0.91 | 0.87 |
+| record/f1 | 0.89 | 0.91 | 0.88 |
+| rte/acc | 0.54 | 0.56 | 0.53 |
+| truthfulqa_mc/mc1 | 0.20 | 0.21 | 0.21 |
+| truthfulqa_mc/mc2 | 0.36 | 0.34 | 0.34 |
+| wic/acc | 0.50 | 0.50 | 0.50 |
+| winogrande/acc | 0.64 | 0.68 | 0.62 |
+| wsc/acc | 0.37 | 0.35 | 0.57 |
+| Average | 0.50 | 0.52 | 0.50 |
+
+
+
+
+## Preview Weights Release and Usage
+
+To encourage the feedback from the community, we release a preview checkpoint of our weights. The checkpoint can be downloaded from [HuggingFace Hub](https://huggingface.co/openlm-research/open_llama_7b_preview_200bt). We release the weights in two formats: an EasyLM format to be use with our [EasyLM framework](https://github.com/young-geng/EasyLM), and a PyTorch format to be used with the [Huggingface Transformers](https://huggingface.co/docs/transformers/index) library.
+
+For using the weights in our EasyLM framework, please refer to the [LLaMA documentation of EasyLM](https://github.com/young-geng/EasyLM/blob/main/docs/llama.md). Note that unlike the original LLaMA model, our OpenLLaMA tokenizer and weights are trained completely from scratch so it is no longer needed to obtain the original LLaMA tokenizer and weights. For using the weights in the transformers library, please follow the [transformers LLaMA documentation](https://huggingface.co/docs/transformers/main/model_doc/llama). Note that we use BOS (beginning of sentence) token (id=1) during training, so it is important to prepend this token for best performance during few-shot evaluation.
+
+Both our training framework EasyLM and the preview checkpoint weights are licensed permissively under the Apache 2.0 license.
+
+
+## Future Plans
+
+The current release is only a preview of what the complete OpenLLaMA release will offer. We are currently focused on completing the training process on the entire RedPajama dataset. This can gives us a good apple-to-apple comparison between the original LLaMA and our OpenLLaMA. Other than the 7B model, we are also training a smaller 3B model in hope of facilitating language model usage in low resource use cases. Please stay tuned for our upcoming releases.
+
+
+
+## Contact
+
+We would love to get feedback from the community. If you have any questions, please open an issue or contact us.
+
+OpenLLaMA is developed by:
+[Xinyang Geng](https://young-geng.xyz/)* and [Hao Liu](https://www.haoliu.site/)* from Berkeley AI Research.
+*Equal Contribution
+
+
+## Reference
+
+If you found OpenLLaMA useful in your research or applications, please cite using the following BibTeX:
+```
+@software{openlm2023openllama,
+ author = {Geng, Xinyang and Liu, Hao},
+ title = {OpenLLaMA: An Open Reproduction of LLaMA},
+ month = May,
+ year = 2023,
+ url = {https://github.com/openlm-research/open_llama}
+}
+```
+```
+@software{together2023redpajama,
+ author = {Together Computer},
+ title = {RedPajama-Data: An Open Source Recipe to Reproduce LLaMA training dataset},
+ month = April,
+ year = 2023,
+ url = {https://github.com/togethercomputer/RedPajama-Data}
+}
+```
+```
+@article{touvron2023llama,
+ title={Llama: Open and efficient foundation language models},
+ author={Touvron, Hugo and Lavril, Thibaut and Izacard, Gautier and Martinet, Xavier and Lachaux, Marie-Anne and Lacroix, Timoth{\'e}e and Rozi{\`e}re, Baptiste and Goyal, Naman and Hambro, Eric and Azhar, Faisal and others},
+ journal={arXiv preprint arXiv:2302.13971},
+ year={2023}
+}
+```
diff --git a/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_easylm b/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_easylm
new file mode 100644
index 0000000000000000000000000000000000000000..b06d20297811f3056bda7c6ff0bdfd1864de0b8c
--- /dev/null
+++ b/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_easylm
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:63ab9d652aaf4e0e47f1d9a0321ef565b62c02921ce0b18a781ba0daac2ebb98
+size 13476851687
diff --git a/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/config.json b/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..cf5ef7be6f3f9c154b2cdef13ea87b9a67f60abc
--- /dev/null
+++ b/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/config.json
@@ -0,0 +1,22 @@
+{
+ "architectures": [
+ "LlamaForCausalLM"
+ ],
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "initializer_range": 0.02,
+ "intermediate_size": 11008,
+ "max_position_embeddings": 2048,
+ "model_type": "llama",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "pad_token_id": 0,
+ "rms_norm_eps": 1e-06,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float16",
+ "transformers_version": "4.28.0.dev0",
+ "use_cache": true,
+ "vocab_size": 32000
+}
diff --git a/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/generation_config.json b/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..55d7b5b6db760f8c1963be3d56a3bc363bacdfb1
--- /dev/null
+++ b/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/generation_config.json
@@ -0,0 +1,7 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "pad_token_id": 0,
+ "transformers_version": "4.28.0.dev0"
+}
diff --git a/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/pytorch_model-00001-of-00002.bin b/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/pytorch_model-00001-of-00002.bin
new file mode 100644
index 0000000000000000000000000000000000000000..0db7ffadc89169c8f2c1267c614730585e3c7dbc
--- /dev/null
+++ b/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/pytorch_model-00001-of-00002.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:adce75e45dbad20967c0e96a83b318720a767e4b8f77aabcd01cd2b38e8f0b2e
+size 9976634558
diff --git a/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/pytorch_model-00002-of-00002.bin b/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/pytorch_model-00002-of-00002.bin
new file mode 100644
index 0000000000000000000000000000000000000000..56bd685f09828f41347246829488a2b0e5dcb833
--- /dev/null
+++ b/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/pytorch_model-00002-of-00002.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a80d748a6ab528f0db2249013a5d3fea17e039ad9fa1bf3e170e9070ec30f938
+size 3500315539
diff --git a/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/pytorch_model.bin.index.json b/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/pytorch_model.bin.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..db7264b24cac7a39947bb5fc02fe5c2d7ac9eaf4
--- /dev/null
+++ b/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/pytorch_model.bin.index.json
@@ -0,0 +1,330 @@
+{
+ "metadata": {
+ "total_size": 13476839424
+ },
+ "weight_map": {
+ "lm_head.weight": "pytorch_model-00002-of-00002.bin",
+ "model.embed_tokens.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.24.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.24.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.24.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.24.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
+ "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.3.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.3.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.3.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.30.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.30.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.30.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.30.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
+ "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.4.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.4.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.4.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.norm.weight": "pytorch_model-00002-of-00002.bin"
+ }
+}
diff --git a/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/special_tokens_map.json b/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b
--- /dev/null
+++ b/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/special_tokens_map.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/tokenizer.model b/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..88fe55a1de2ddca4e12292cae5273413d8b637a7
--- /dev/null
+++ b/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bc820fc43f4173d6362c16658c409ed423929a807e55a984af96cce1277d39a4
+size 772031
diff --git a/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/tokenizer_config.json b/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..a54b01aa3699f19e1aea416fc337f910f60c6839
--- /dev/null
+++ b/checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights/tokenizer_config.json
@@ -0,0 +1 @@
+{"bos_token": "", "eos_token": "", "model_max_length": 1000000000000000019884624838656, "tokenizer_class": "LlamaTokenizer", "unk_token": ""}
\ No newline at end of file
diff --git a/checkpoints/open-llama/7B/tokenizer.model b/checkpoints/open-llama/7B/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..88fe55a1de2ddca4e12292cae5273413d8b637a7
--- /dev/null
+++ b/checkpoints/open-llama/7B/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bc820fc43f4173d6362c16658c409ed423929a807e55a984af96cce1277d39a4
+size 772031
diff --git a/checkpoints/open-llama/7B/tokenizer.vocab b/checkpoints/open-llama/7B/tokenizer.vocab
new file mode 100644
index 0000000000000000000000000000000000000000..54d6ff3ea3fa307f4920fd473f18a361be6d48e6
--- /dev/null
+++ b/checkpoints/open-llama/7B/tokenizer.vocab
@@ -0,0 +1,32000 @@
+ 0
+ 0
+ 0
+<0x00> 0
+<0x01> 0
+<0x02> 0
+<0x03> 0
+<0x04> 0
+<0x05> 0
+<0x06> 0
+<0x07> 0
+<0x08> 0
+<0x09> 0
+<0x0A> 0
+<0x0B> 0
+<0x0C> 0
+<0x0D> 0
+<0x0E> 0
+<0x0F> 0
+<0x10> 0
+<0x11> 0
+<0x12> 0
+<0x13> 0
+<0x14> 0
+<0x15> 0
+<0x16> 0
+<0x17> 0
+<0x18> 0
+<0x19> 0
+<0x1A> 0
+<0x1B> 0
+<0x1C> 0
+<0x1D> 0
+<0x1E> 0
+<0x1F> 0
+<0x20> 0
+<0x21> 0
+<0x22> 0
+<0x23> 0
+<0x24> 0
+<0x25> 0
+<0x26> 0
+<0x27> 0
+<0x28> 0
+<0x29> 0
+<0x2A> 0
+<0x2B> 0
+<0x2C> 0
+<0x2D> 0
+<0x2E> 0
+<0x2F> 0
+<0x30> 0
+<0x31> 0
+<0x32> 0
+<0x33> 0
+<0x34> 0
+<0x35> 0
+<0x36> 0
+<0x37> 0
+<0x38> 0
+<0x39> 0
+<0x3A> 0
+<0x3B> 0
+<0x3C> 0
+<0x3D> 0
+<0x3E> 0
+<0x3F> 0
+<0x40> 0
+<0x41> 0
+<0x42> 0
+<0x43> 0
+<0x44> 0
+<0x45> 0
+<0x46> 0
+<0x47> 0
+<0x48> 0
+<0x49> 0
+<0x4A> 0
+<0x4B> 0
+<0x4C> 0
+<0x4D> 0
+<0x4E> 0
+<0x4F> 0
+<0x50> 0
+<0x51> 0
+<0x52> 0
+<0x53> 0
+<0x54> 0
+<0x55> 0
+<0x56> 0
+<0x57> 0
+<0x58> 0
+<0x59> 0
+<0x5A> 0
+<0x5B> 0
+<0x5C> 0
+<0x5D> 0
+<0x5E> 0
+<0x5F> 0
+<0x60> 0
+<0x61> 0
+<0x62> 0
+<0x63> 0
+<0x64> 0
+<0x65> 0
+<0x66> 0
+<0x67> 0
+<0x68> 0
+<0x69> 0
+<0x6A> 0
+<0x6B> 0
+<0x6C> 0
+<0x6D> 0
+<0x6E> 0
+<0x6F> 0
+<0x70> 0
+<0x71> 0
+<0x72> 0
+<0x73> 0
+<0x74> 0
+<0x75> 0
+<0x76> 0
+<0x77> 0
+<0x78> 0
+<0x79> 0
+<0x7A> 0
+<0x7B> 0
+<0x7C> 0
+<0x7D> 0
+<0x7E> 0
+<0x7F> 0
+<0x80> 0
+<0x81> 0
+<0x82> 0
+<0x83> 0
+<0x84> 0
+<0x85> 0
+<0x86> 0
+<0x87> 0
+<0x88> 0
+<0x89> 0
+<0x8A> 0
+<0x8B> 0
+<0x8C> 0
+<0x8D> 0
+<0x8E> 0
+<0x8F> 0
+<0x90> 0
+<0x91> 0
+<0x92> 0
+<0x93> 0
+<0x94> 0
+<0x95> 0
+<0x96> 0
+<0x97> 0
+<0x98> 0
+<0x99> 0
+<0x9A> 0
+<0x9B> 0
+<0x9C> 0
+<0x9D> 0
+<0x9E> 0
+<0x9F> 0
+<0xA0> 0
+<0xA1> 0
+<0xA2> 0
+<0xA3> 0
+<0xA4> 0
+<0xA5> 0
+<0xA6> 0
+<0xA7> 0
+<0xA8> 0
+<0xA9> 0
+<0xAA> 0
+<0xAB> 0
+<0xAC> 0
+<0xAD> 0
+<0xAE> 0
+<0xAF> 0
+<0xB0> 0
+<0xB1> 0
+<0xB2> 0
+<0xB3> 0
+<0xB4> 0
+<0xB5> 0
+<0xB6> 0
+<0xB7> 0
+<0xB8> 0
+<0xB9> 0
+<0xBA> 0
+<0xBB> 0
+<0xBC> 0
+<0xBD> 0
+<0xBE> 0
+<0xBF> 0
+<0xC0> 0
+<0xC1> 0
+<0xC2> 0
+<0xC3> 0
+<0xC4> 0
+<0xC5> 0
+<0xC6> 0
+<0xC7> 0
+<0xC8> 0
+<0xC9> 0
+<0xCA> 0
+<0xCB> 0
+<0xCC> 0
+<0xCD> 0
+<0xCE> 0
+<0xCF> 0
+<0xD0> 0
+<0xD1> 0
+<0xD2> 0
+<0xD3> 0
+<0xD4> 0
+<0xD5> 0
+<0xD6> 0
+<0xD7> 0
+<0xD8> 0
+<0xD9> 0
+<0xDA> 0
+<0xDB> 0
+<0xDC> 0
+<0xDD> 0
+<0xDE> 0
+<0xDF> 0
+<0xE0> 0
+<0xE1> 0
+<0xE2> 0
+<0xE3> 0
+<0xE4> 0
+<0xE5> 0
+<0xE6> 0
+<0xE7> 0
+<0xE8> 0
+<0xE9> 0
+<0xEA> 0
+<0xEB> 0
+<0xEC> 0
+<0xED> 0
+<0xEE> 0
+<0xEF> 0
+<0xF0> 0
+<0xF1> 0
+<0xF2> 0
+<0xF3> 0
+<0xF4> 0
+<0xF5> 0
+<0xF6> 0
+<0xF7> 0
+<0xF8> 0
+<0xF9> 0
+<0xFA> 0
+<0xFB> 0
+<0xFC> 0
+<0xFD> 0
+<0xFE> 0
+<0xFF> 0
+▁t -0
+▁a -1
+in -2
+he -3
+re -4
+on -5
+er -6
+▁the -7
+▁s -8
+at -9
+▁o -10
+en -11
+▁w -12
+or -13
+it -14
+an -15
+▁c -16
+is -17
+es -18
+▁f -19
+al -20
+nd -21
+ing -22
+▁p -23
+ed -24
+▁b -25
+ar -26
+ou -27
+▁of -28
+▁in -29
+▁to -30
+▁m -31
+▁and -32
+ic -33
+ion -34
+▁d -35
+as -36
+le -37
+▁h -38
+om -39
+▁th -40
+ent -41
+▁T -42
+il -43
+st -44
+▁S -45
+ro -46
+▁re -47
+el -48
+▁A -49
+▁l -50
+ct -51
+▁n -52
+▁I -53
+et -54
+▁C -55
+▁e -56
+ve -57
+id -58
+▁g -59
+ut -60
+ol -61
+ot -62
+▁is -63
+am -64
+▁M -65
+ur -66
+ly -67
+im -68
+▁on -69
+▁for -70
+ad -71
+ce -72
+ation -73
+▁be -74
+ig -75
+ay -76
+ow -77
+us -78
+ch -79
+▁P -80
+ver -81
+▁B -82
+▁that -83
+▁y -84
+▁st -85
+ir -86
+ith -87
+▁The -88
+▁W -89
+▁H -90
+▁he -91
+▁with -92
+▁( -93
+▁D -94
+ul -95
+ra -96
+un -97
+▁it -98
+se -99
+ter -100
+▁R -101
+▁F -102
+▁as -103
+▁an -104
+▁wh -105
+▁you -106
+▁al -107
+if -108
+em -109
+ers -110
+▁con -111
+▁L -112
+ill -113
+▁N -114
+ag -115
+▁pro -116
+ri -117
+▁E -118
+od -119
+ist -120
+ac -121
+her -122
+▁we -123
+▁G -124
+est -125
+and -126
+op -127
+ate -128
+▁at -129
+um -130
+th -131
+ab -132
+ess -133
+▁was -134
+▁com -135
+ld -136
+pp -137
+▁are -138
+ew -139
+▁v -140
+ore -141
+ke -142
+ity -143
+rom -144
+▁de -145
+▁or -146
+res -147
+igh -148
+▁ex -149
+ment -150
+▁O -151
+os -152
+qu -153
+oc -154
+▁J -155
+▁ha -156
+ect -157
+▁by -158
+ort -159
+ant -160
+ies -161
+ain -162
+art -163
+▁se -164
+iv -165
+▁su -166
+▁r -167
+▁from -168
+ive -169
+nt -170
+ust -171
+end -172
+▁not -173
+▁this -174
+ud -175
+▁have -176
+▁sh -177
+ight -178
+all -179
+▁ch -180
+our -181
+▁U -182
+ard -183
+red -184
+▁u -185
+ial -186
+ear -187
+▁“ -188
+▁le -189
+out -190
+ould -191
+pt -192
+ell -193
+ge -194
+ost -195
+▁In -196
+▁wor -197
+rou -198
+og -199
+▁us -200
+▁ab -201
+gh -202
+ast -203
+ure -204
+ome -205
+pe -206
+ak -207
+ine -208
+▁pl -209
+ok -210
+▁has -211
+ich -212
+per -213
+▁can -214
+pl -215
+▁j -216
+▁K -217
+▁St -218
+ions -219
+ff -220
+▁Th -221
+ber -222
+▁will -223
+ide -224
+▁all -225
+ia -226
+▁k -227
+ack -228
+ans -229
+ical -230
+▁int -231
+▁" -232
+▁whe -233
+▁his -234
+ated -235
+ice -236
+ther -237
+ong -238
+▁go -239
+▁do -240
+ame -241
+age -242
+▁ne -243
+ie -244
+▁but -245
+ry -246
+ary -247
+ip -248
+▁ad -249
+ub -250
+iz -251
+ous -252
+▁cl -253
+ue -254
+are -255
+▁me -256
+▁en -257
+act -258
+▁their -259
+cc -260
+▁comp -261
+ind -262
+▁V -263
+▁im -264
+orm -265
+ass -266
+▁they -267
+ap -268
+able -269
+.. -270
+cl -271
+▁cont -272
+ult -273
+▁out -274
+▁un -275
+▁Ch -276
+one -277
+ime -278
+ook -279
+▁who -280
+ep -281
+▁which -282
+▁more -283
+▁Y -284
+ance -285
+ations -286
+▁sa -287
+▁res -288
+ally -289
+ib -290
+ere -291
+▁up -292
+▁one -293
+ire -294
+▁your -295
+ign -296
+very -297
+so -298
+ru -299
+ence -300
+ord -301
+▁so -302
+ake -303
+▁about -304
+ite -305
+av -306
+ction -307
+du -308
+▁man -309
+▁$ -310
+ace -311
+ks -312
+▁It -313
+ase -314
+▁app -315
+ents -316
+port -317
+▁li -318
+▁were -319
+▁ar -320
+▁per -321
+ount -322
+vel -323
+ach -324
+ail -325
+▁year -326
+▁- -327
+▁tr -328
+▁dis -329
+ond -330
+form -331
+▁other -332
+ile -333
+ays -334
+▁te -335
+▁ag -336
+ress -337
+▁part -338
+▁ev -339
+ory -340
+ition -341
+▁her -342
+ose -343
+▁had -344
+▁new -345
+▁been -346
+▁off -347
+reat -348
+erv -349
+ll -350
+▁qu -351
+ely -352
+▁sp -353
+▁our -354
+ens -355
+mer -356
+▁also -357
+own -358
+int -359
+lic -360
+▁its -361
+ian -362
+ov -363
+vers -364
+▁work -365
+ob -366
+ors -367
+ang -368
+ree -369
+▁my -370
+ild -371
+ark -372
+te -373
+▁pe -374
+ings -375
+▁if -376
+▁\ -377
+▁there -378
+▁He -379
+▁would -380
+ish -381
+ound -382
+▁sc -383
+ove -384
+▁rec -385
+▁Re -386
+▁any -387
+▁time -388
+▁over -389
+▁kn -390
+▁them -391
+ck -392
+ne -393
+irst -394
+ric -395
+▁some -396
+). -397
+ph -398
+ople -399
+ough -400
+use -401
+▁than -402
+▁when -403
+▁We -404
+▁bec -405
+▁said -406
+▁pre -407
+▁ro -408
+nder -409
+ces -410
+rit -411
+▁This -412
+▁like -413
+fter -414
+▁what -415
+▁= -416
+ath -417
+oy -418
+oll -419
+wo -420
+ons -421
+ates -422
+wn -423
+old -424
+▁into -425
+▁get -426
+urn -427
+... -428
+▁acc -429
+xt -430
+▁– -431
+ink -432
+▁Un -433
+ade -434
+▁people -435
+▁comm -436
+hed -437
+▁no -438
+clud -439
+▁< -440
+), -441
+ual -442
+▁she -443
+▁just -444
+tern -445
+ick -446
+▁Wh -447
+▁am -448
+rough -449
+ject -450
+aw -451
+ious -452
+ater -453
+ational -454
+▁first -455
+com -456
+iew -457
+▁des -458
+▁As -459
+ont -460
+we -461
+▁& -462
+iss -463
+ng -464
+lect -465
+▁how -466
+ft -467
+ool -468
+ood -469
+▁ind -470
+▁act -471
+▁Se -472
+amp -473
+iff -474
+▁fe -475
+▁under -476
+▁Com -477
+ram -478
+tt -479
+pec -480
+ities -481
+▁De -482
+▁bet -483
+ause -484
+ning -485
+▁only -486
+olog -487
+▁prov -488
+▁tra -489
+li -490
+▁includ -491
+▁cons -492
+ced -493
+eth -494
+▁two -495
+▁may -496
+); -497
+▁bl -498
+▁em -499
+▁most -500
+erm -501
+▁New -502
+round -503
+ert -504
+// -505
+▁need -506
+ident -507
+pect -508
+ise -509
+▁after -510
+get -511
+.” -512
+les -513
+ful -514
+▁inv -515
+▁reg -516
+ific -517
+ublic -518
+rib -519
+▁years -520
+▁add -521
+▁through -522
+ps -523
+▁Pro -524
+elf -525
+ating -526
+row -527
+▁att -528
+▁{ -529
+▁And -530
+yst -531
+oint -532
+▁know -533
+ock -534
+ics -535
+itt -536
+▁him -537
+▁could -538
+▁imp -539
+cre -540
+ife -541
+▁back -542
+▁supp -543
+▁stud -544
+ward -545
+▁where -546
+▁these -547
+▁ass -548
+ict -549
+▁co -550
+▁Ar -551
+▁rel -552
+als -553
+ments -554
+hen -555
+ased -556
+oth -557
+▁An -558
+fore -559
+▁pol -560
+ited -561
+▁see -562
+▁diff -563
+ener -564
+day -565
+▁bu -566
+ork -567
+▁play -568
+ys -569
+▁well -570
+▁ra -571
+▁Al -572
+▁pr -573
+arch -574
+▁fl -575
+▁[ -576
+hip -577
+ily -578
+any -579
+▁ph -580
+▁ret -581
+▁rem -582
+und -583
+ility -584
+uring -585
+▁every -586
+cess -587
+▁} -588
+▁sub -589
+ata -590
+anc -591
+oss -592
+▁produ -593
+▁serv -594
+ative -595
+ange -596
+=" -597
+erson -598
+ren -599
+io -600
+uch -601
+ts -602
+▁such -603
+ivers -604
+ular -605
+ring -606
+▁use -607
+▁You -608
+▁make -609
+ied -610
+ystem -611
+ract -612
+▁many -613
+velop -614
+▁* -615
+▁look -616
+▁did -617
+ible -618
+▁col -619
+▁ac -620
+▁want -621
+▁hel -622
+ower -623
+▁ent -624
+land -625
+▁now -626
+ures -627
+▁pres -628
+ty -629
+▁then -630
+alth -631
+eg -632
+▁set -633
+▁even -634
+▁fin -635
+ss -636
+▁right -637
+riv -638
+▁way -639
+ience -640
+▁again -641
+ull -642
+▁very -643
+aking -644
+chool -645
+▁For -646
+▁But -647
+▁ob -648
+▁because -649
+gan -650
+▁' -651
+ues -652
+ices -653
+ement -654
+▁Sh -655
+,” -656
+au -657
+▁def -658
+other -659
+its -660
+▁should -661
+▁trans -662
+iness -663
+▁show -664
+ah -665
+▁world -666
+▁end -667
+meric -668
+▁eff -669
+con -670
+▁sec -671
+▁own -672
+ax -673
+▁exper -674
+ars -675
+▁being -676
+air -677
+ting -678
+ution -679
+ash -680
+up -681
+ient -682
+ner -683
+oun -684
+▁inst -685
+▁ke -686
+irect -687
+cept -688
+iel -689
+▁count -690
+hes -691
+▁mod -692
+cy -693
+ank -694
+▁help -695
+▁inter -696
+ins -697
+az -698
+ism -699
+▁If -700
+ug -701
+vern -702
+▁art -703
+▁dec -704
+ix -705
+▁spec -706
+formation -707
+▁those -708
+▁think -709
+ble -710
+▁gu -711
+▁long -712
+ex -713
+pr -714
+▁sur -715
+▁good -716
+man -717
+ason -718
+▁sm -719
+ollow -720
+▁Americ -721
+view -722
+ven -723
+uth -724
+ween -725
+▁made -726
+▁call -727
+ale -728
+▁much -729
+ince -730
+ives -731
+▁loc -732
+▁differe -733
+▁start -734
+▁don -735
+▁high -736
+▁mem -737
+usiness -738
+▁fam -739
+ef -740
+▁bel -741
+▁mon -742
+ines -743
+▁does -744
+▁requ -745
+▁develop -746
+led -747
+ek -748
+▁num -749
+▁class -750
+▁before -751
+▁. -752
+▁public -753
+▁Be -754
+▁char -755
+oh -756
+▁great -757
+▁between -758
+oci -759
+▁down -760
+▁lead -761
+ten -762
+▁ins -763
+▁Le -764
+gram -765
+▁exp -766
+way -767
+▁‘ -768
+▁Cl -769
+▁op -770
+▁Ad -771
+ather -772
+▁med -773
+chn -774
+ists -775
+▁How -776
+▁read -777
+▁ref -778
+▁system -779
+▁gr -780
+▁Sc -781
+imes -782
+▁person -783
+▁| -784
+▁Tr -785
+ouse -786
+▁form -787
+▁ear -788
+▁here -789
+▁det -790
+der -791
+atch -792
+., -793
+▁Is -794
+ton -795
+uss -796
+alk -797
+The -798
+ology -799
+▁Q -800
+▁last -801
+ave -802
+iversity -803
+▁min -804
+uc -805
+▁used -806
+▁— -807
+▁find -808
+ode -809
+▁ed -810
+▁fil -811
+▁commun -812
+▁sign -813
+▁cur -814
+▁ser -815
+▁som -816
+ern -817
+ually -818
+▁On -819
+▁disc -820
+read -821
+▁ext -822
+ital -823
+▁val -824
+▁Ind -825
+ters -826
+▁follow -827
+▁car -828
+▁support -829
+▁life -830
+▁fun -831
+▁take -832
+▁There -833
+▁sim -834
+▁rep -835
+ets -836
+let -837
+▁while -838
+-- -839
+ages -840
+arm -841
+▁She -842
+▁conf -843
+ision -844
+." -845
+ize -846
+ield -847
+ames -848
+▁var -849
+vent -850
+ered -851
+▁found -852
+▁import -853
+▁cour -854
+orn -855
+els -856
+rent -857
+▁Ph -858
+roup -859
+▁point -860
+ved -861
+ -862
+ute -863
+▁str -864
+▁cre -865
+▁day -866
+ness -867
+▁both -868
+▁same -869
+:// -870
+▁Bl -871
+▁inf -872
+▁return -873
+▁information -874
+▁child -875
+▁still -876
+ized -877
+▁though -878
+the -879
+ever -880
+line -881
+▁Ex -882
+ection -883
+▁cent -884
+ands -885
+ined -886
+▁gener -887
+ross -888
+▁real -889
+▁Z -890
+ically -891
+ral -892
+▁say -893
+▁av -894
+▁best -895
+▁each -896
+akes -897
+▁Fr -898
+cent -899
+ants -900
+▁data -901
+▁mark -902
+▁They -903
+▁dist -904
+▁around -905
+▁different -906
+▁report -907
+▁Con -908
+▁three -909
+▁team -910
+ety -911
+▁book -912
+ished -913
+▁cor -914
+▁business -915
+▁br -916
+aj -917
+▁provid -918
+▁during -919
+ann -920
+▁care -921
+▁el -922
+illion -923
+ional -924
+▁/ -925
+ier -926
+ature -927
+▁result -928
+erest -929
+oot -930
+ruct -931
+▁So -932
+ural -933
+▁gl -934
+gy -935
+vernment -936
+ER -937
+▁fact -938
+▁contin -939
+▁really -940
+bs -941
+▁Sp -942
+▁appro -943
+▁Comm -944
+▁che -945
+▁going -946
+▁lar -947
+▁Pl -948
+bers -949
+ox -950
+▁partic -951
+ired -952
+inal -953
+ared -954
+ense -955
+▁resp -956
+▁All -957
+▁What -958
+ik -959
+ving -960
+raph -961
+▁program -962
+▁Mar -963
+ger -964
+▁including -965
+▁ty -966
+▁vis -967
+▁dep -968
+▁home -969
+ien -970
+▁law -971
+▁iss -972
+ody -973
+▁school -974
+att -975
+▁del -976
+ina -977
+ple -978
+ec -979
+▁sl -980
+▁Col -981
+▁writ -982
+mber -983
+ms -984
+▁against -985
+▁place -986
+▁Res -987
+▁University -988
+▁US -989
+▁process -990
+▁direct -991
+▁design -992
+▁pat -993
+▁# -994
+▁post -995
+omen -996
+self -997
+oad -998
+ering -999
+ration -1000
+rist -1001
+▁run -1002
+▁To -1003
+▁expl -1004
+usic -1005
+▁stat -1006
+▁Te -1007
+▁sol -1008
+ged -1009
+ially -1010
+▁state -1011
+▁est -1012
+▁techn -1013
+uro -1014
+▁health -1015
+ploy -1016
+ym -1017
+▁number -1018
+▁adv -1019
+▁const -1020
+▁open -1021
+▁interest -1022
+▁htt -1023
+▁cle -1024
+▁pur -1025
+▁hist -1026
+▁pass -1027
+ways -1028
+▁happ -1029
+ator -1030
+ris -1031
+ron -1032
+sc -1033
+▁prof -1034
+▁prot -1035
+▁second -1036
+riend -1037
+ves -1038
+▁build -1039
+▁organ -1040
+▁leg -1041
+be -1042
+ission -1043
+nce -1044
+arn -1045
+ases -1046
+▁invest -1047
+ember -1048
+▁mov -1049
+▁That -1050
+▁list -1051
+ocial -1052
+▁poss -1053
+▁perform -1054
+▁since -1055
+▁At -1056
+iving -1057
+uman -1058
+▁too -1059
+uthor -1060
+▁effect -1061
+▁must -1062
+ption -1063
+less -1064
+app -1065
+to -1066
+▁main -1067
+son -1068
+outh -1069
+▁says -1070
+▁month -1071
+▁Or -1072
+ohn -1073
+▁oper -1074
+ining -1075
+▁project -1076
+▁using -1077
+▁inc -1078
+▁opp -1079
+▁i -1080
+▁incre -1081
+orth -1082
+▁la -1083
+ittle -1084
+▁game -1085
+ccess -1086
+▁few -1087
+_{ -1088
+▁another -1089
+▁lot -1090
+ething -1091
+gg -1092
+▁rest -1093
+"> -1094
+eng -1095
+conom -1096
+▁hand -1097
+ality -1098
+rol -1099
+co -1100
+▁case -1101
+▁power -1102
+ote -1103
+lease -1104
+▁May -1105
+▁rese -1106
+osed -1107
+ury -1108
+▁top -1109
+lement -1110
+eb -1111
+oard -1112
+▁head -1113
+ertain -1114
+ON -1115
+▁dem -1116
+▁name -1117
+min -1118
+stand -1119
+thing -1120
+▁Jan -1121
+▁quest -1122
+▁rece -1123
+▁company -1124
+▁pop -1125
+ley -1126
+▁family -1127
+▁tri -1128
+IN -1129
+▁test -1130
+▁No -1131
+▁Go -1132
+▁product -1133
+ump -1134
+▁ann -1135
+▁allow -1136
+St -1137
+ource -1138
+med -1139
+▁sk -1140
+▁without -1141
+▁Cent -1142
+ately -1143
+yn -1144
+ird -1145
+▁When -1146
+▁things -1147
+par -1148
+▁par -1149
+▁pos -1150
+▁week -1151
+pro -1152
+▁come -1153
+▁plan -1154
+▁John -1155
+que -1156
+ling -1157
+ior -1158
+ization -1159
+▁something -1160
+▁With -1161
+▁big -1162
+▁next -1163
+▁never -1164
+," -1165
+por -1166
+lection -1167
+ternational -1168
+ery -1169
+▁put -1170
+▁bro -1171
+err -1172
+▁By -1173
+▁di -1174
+▁current -1175
+▁small -1176
+ised -1177
+une -1178
+ott -1179
+▁Am -1180
+▁belie -1181
+ailable -1182
+▁// -1183
+ified -1184
+▁Co -1185
+▁government -1186
+▁record -1187
+▁Eng -1188
+ope -1189
+▁ide -1190
+ains -1191
+▁ca -1192
+enn -1193
+▁Man -1194
+▁spe -1195
+▁American -1196
+▁level -1197
+▁Can -1198
+ivid -1199
+▁stand -1200
+▁old -1201
+ubl -1202
+▁Rep -1203
+ober -1204
+of -1205
+ived -1206
+▁turn -1207
+▁equ -1208
+▁might -1209
+▁feel -1210
+reen -1211
+▁stre -1212
+▁seem -1213
+icle -1214
+iron -1215
+▁met -1216
+vious -1217
+▁manag -1218
+▁cap -1219
+rem -1220
+itions -1221
+▁important -1222
+▁better -1223
+iven -1224
+▁always -1225
+▁try -1226
+▁fr -1227
+▁Car -1228
+ories -1229
+aim -1230
+ries -1231
+▁four -1232
+▁offic -1233
+here -1234
+▁United -1235
+ium -1236
+arent -1237
+▁little -1238
+▁country -1239
+ustom -1240
+▁Christ -1241
+▁within -1242
+▁proble -1243
+▁present -1244
+▁mat -1245
+▁term -1246
+sp -1247
+▁got -1248
+▁beh -1249
+ney -1250
+ES -1251
+▁eas -1252
+ured -1253
+ency -1254
+AT -1255
+AR -1256
+ample -1257
+▁Wor -1258
+▁fac -1259
+▁mar -1260
+▁group -1261
+▁grow -1262
+ability -1263
+ument -1264
+▁aff -1265
+gin -1266
+ider -1267
+ention -1268
+sh -1269
+▁story -1270
+▁keep -1271
+▁elect -1272
+▁eng -1273
+▁market -1274
+▁prom -1275
+▁pay -1276
+▁less -1277
+ively -1278
+▁view -1279
+ify -1280
+▁let -1281
+▁Pol -1282
+ajor -1283
+▁ask -1284
+▁hard -1285
+▁cr -1286
+▁love -1287
+ination -1288
+▁change -1289
+inc -1290
+▁sing -1291
+ably -1292
+▁available -1293
+ained -1294
+▁Ab -1295
+ently -1296
+▁win -1297
+▁polit -1298
+▁bre -1299
+▁local -1300
+▁Med -1301
+▁Su -1302
+▁experience -1303
+▁order -1304
+lex -1305
+atic -1306
+ush -1307
+▁chang -1308
+▁children -1309
+ording -1310
+.... -1311
+ortun -1312
+() -1313
+br -1314
+▁et -1315
+▁friend -1316
+ats -1317
+ients -1318
+▁million -1319
+▁Euro -1320
+oney -1321
+▁water -1322
+ians -1323
+▁es -1324
+work -1325
+til -1326
+▁cost -1327
+▁music -1328
+▁free -1329
+▁full -1330
+▁Pres -1331
+▁One -1332
+▁sit -1333
+over -1334
+by -1335
+ights -1336
+▁non -1337
+OR -1338
+▁working -1339
+put -1340
+ential -1341
+▁access -1342
+augh -1343
+▁Pr -1344
+ref -1345
+▁charact -1346
+ourn -1347
+▁service -1348
+▁days -1349
+▁community -1350
+▁proper -1351
+ene -1352
+In -1353
+ware -1354
+ma -1355
+▁expect -1356
+▁bas -1357
+icy -1358
+org -1359
+ane -1360
+▁En -1361
+▁social -1362
+▁line -1363
+acy -1364
+iver -1365
+ead -1366
+▁meet -1367
+▁Fl -1368
+AN -1369
+▁able -1370
+▁left -1371
+▁@ -1372
+▁Br -1373
+▁employ -1374
+uture -1375
+▁success -1376
+▁Ass -1377
+ister -1378
+▁activ -1379
+ee -1380
+▁indust -1381
+▁research -1382
+ulation -1383
+▁fund -1384
+▁+ -1385
+▁pract -1386
+ising -1387
+▁App -1388
+oor -1389
+▁past -1390
+rict -1391
+viron -1392
+osp -1393
+ocus -1394
+▁move -1395
+▁women -1396
+ours -1397
+oid -1398
+aster -1399
+▁near -1400
+▁Ne -1401
+irc -1402
+ai -1403
+▁orig -1404
+ograph -1405
+▁students -1406
+▁human -1407
+urs -1408
+▁priv -1409
+▁addition -1410
+urity -1411
+** -1412
+duct -1413
+ended -1414
+▁provide -1415
+▁Im -1416
+▁: -1417
+▁author -1418
+▁called -1419
+▁mil -1420
+ethod -1421
+iting -1422
+uck -1423
+▁cond -1424
+ards -1425
+▁York -1426
+{\ -1427
+▁Ed -1428
+▁econom -1429
+▁State -1430
+▁$\ -1431
+▁fore -1432
+▁why -1433
+▁X -1434
+▁Gu -1435
+ateg -1436
+▁word -1437
+pos -1438
+▁area -1439
+side -1440
+▁event -1441
+▁occ -1442
+eter -1443
+▁function -1444
+▁film -1445
+▁Gr -1446
+▁today -1447
+▁services -1448
+idd -1449
+ending -1450
+▁major -1451
+cer -1452
+ID -1453
+▁Europe -1454
+veral -1455
+▁World -1456
+▁making -1457
+▁National -1458
+▁members -1459
+▁z -1460
+for -1461
+▁dev -1462
+ances -1463
+resent -1464
+▁away -1465
+▁My -1466
+aff -1467
+▁Inst -1468
+▁anal -1469
+▁desc -1470
+▁give -1471
+erence -1472
+alf -1473
+▁until -1474
+EN -1475
+▁mean -1476
+ready -1477
+▁understand -1478
+▁across -1479
+af -1480
+▁Min -1481
+", -1482
+▁based -1483
+val -1484
+aut -1485
+▁war -1486
+uary -1487
+obal -1488
+▁value -1489
+▁dra -1490
+rop -1491
+lish -1492
+ales -1493
+▁along -1494
+▁ident -1495
+artment -1496
+▁kind -1497
+itor -1498
+▁lik -1499
+▁city -1500
+wards -1501
+▁far -1502
+▁After -1503
+ots -1504
+▁custom -1505
+▁applic -1506
+▁young -1507
+ividual -1508
+▁didn -1509
+eral -1510
+▁States -1511
+▁camp -1512
+▁conn -1513
+math -1514
+▁following -1515
+▁Ju -1516
+▁become -1517
+▁study -1518
+▁ben -1519
+▁Oct -1520
+▁history -1521
+▁course -1522
+urther -1523
+▁cho -1524
+▁job -1525
+itive -1526
+▁educ -1527
+vironment -1528
+▁special -1529
+▁lim -1530
+▁development -1531
+go -1532
+AS -1533
+▁Cont -1534
+▁_ -1535
+ike -1536
+▁Me -1537
+▁Af -1538
+▁sure -1539
+idence -1540
+▁season -1541
+▁cult -1542
+▁Act -1543
+▁thing -1544
+▁ -1545
+▁known -1546
+▁Ge -1547
+iod -1548
+▁impro -1549
+▁Apr -1550
+▁mult -1551
+yl -1552
+gether -1553
+oman -1554
+▁together -1555
+▁often -1556
+▁However -1557
+▁already -1558
+ffic -1559
+▁Mc -1560
+ream -1561
+imate -1562
+sel -1563
+▁trad -1564
+▁pa -1565
+▁times -1566
+ole -1567
+ray -1568
+▁clear -1569
+▁saf -1570
+▁came -1571
+omm -1572
+▁control -1573
+▁Em -1574
+▁tell -1575
+▁having -1576
+▁aut -1577
+▁least -1578
+▁Will -1579
+▁creat -1580
+▁Bro -1581
+▁men -1582
+leg -1583
+ither -1584
+▁invol -1585
+▁exc -1586
+iol -1587
+▁short -1588
+ored -1589
+▁Count -1590
+▁future -1591
+▁phot -1592
+▁focus -1593
+▁particular -1594
+▁several -1595
+▁later -1596
+rap -1597
+▁early -1598
+lev -1599
+ows -1600
+▁Cal -1601
+ued -1602
+▁given -1603
+▁City -1604
+▁Ag -1605
+▁Mr -1606
+▁Part -1607
+▁Do -1608
+right -1609
+ung -1610
+▁profess -1611
+▁individual -1612
+▁These -1613
+aterial -1614
+▁ever -1615
+▁Aug -1616
+▁March -1617
+▁certain -1618
+▁School -1619
+▁opt -1620
+elt -1621
+▁include -1622
+▁talk -1623
+aps -1624
+▁example -1625
+ideo -1626
+ched -1627
+▁net -1628
+▁large -1629
+▁Aust -1630
+▁claim -1631
+▁pot -1632
+aken -1633
+▁ant -1634
+▁others -1635
+tr -1636
+▁conc -1637
+▁South -1638
+ony -1639
+▁means -1640
+▁air -1641
+▁self -1642
+ww -1643
+che -1644
+▁ve -1645
+▁possible -1646
+▁treat -1647
+de -1648
+▁hold -1649
+▁Har -1650
+not -1651
+▁sw -1652
+▁January -1653
+ources -1654
+▁field -1655
+ected -1656
+▁wr -1657
+▁money -1658
+▁Art -1659
+▁among -1660
+ones -1661
+AL -1662
+IS -1663
+ides -1664
+urch -1665
+www -1666
+ball -1667
+roll -1668
+▁vers -1669
+ribut -1670
+iam -1671
+abor -1672
+lege -1673
+ills -1674
+▁account -1675
+▁series -1676
+▁-- -1677
+ask -1678
+cing -1679
+▁Dep -1680
+aring -1681
+▁feat -1682
+most -1683
+▁enough -1684
+▁ris -1685
+▁Sept -1686
+yle -1687
+ique -1688
+▁took -1689
+▁Not -1690
+▁Comp -1691
+▁June -1692
+rew -1693
+ipp -1694
+▁light -1695
+ted -1696
+▁tem -1697
+▁type -1698
+▁April -1699
+▁War -1700
+▁done -1701
+irl -1702
+▁review -1703
+▁Gl -1704
+ilar -1705
+▁President -1706
+▁side -1707
+me -1708
+ogn -1709
+ript -1710
+▁... -1711
+▁July -1712
+▁fre -1713
+▁happen -1714
+▁North -1715
+▁His -1716
+ada -1717
+▁learn -1718
+▁house -1719
+set -1720
+ape -1721
+▁discuss -1722
+▁told -1723
+ids -1724
+▁live -1725
+▁reason -1726
+▁Par -1727
+▁Che -1728
+▁crit -1729
+▁makes -1730
+▁hig -1731
+▁News -1732
+▁thought -1733
+▁tax -1734
+▁seen -1735
+▁Tra -1736
+▁webs -1737
+▁strong -1738
+▁vari -1739
+IC -1740
+▁Our -1741
+▁works -1742
+▁opportun -1743
+ails -1744
+ership -1745
+back -1746
+▁vol -1747
+▁Feb -1748
+▁CO -1749
+my -1750
+▁https -1751
+▁publ -1752
+▁benef -1753
+ster -1754
+▁food -1755
+▁Dr -1756
+▁deb -1757
+▁Dav -1758
+▁deal -1759
+▁mom -1760
+▁media -1761
+▁five -1762
+IT -1763
+▁Dis -1764
+▁doc -1765
+▁appear -1766
+▁looking -1767
+verage -1768
+▁news -1769
+▁night -1770
+▁Reg -1771
+▁period -1772
+▁Cor -1773
+ournal -1774
+ental -1775
+▁doing -1776
+▁whether -1777
+▁sum -1778
+▁port -1779
+joy -1780
+cember -1781
+▁site -1782
+uff -1783
+▁doesn -1784
+▁Des -1785
+▁Bar -1786
+▁tot -1787
+ler -1788
+anies -1789
+▁key -1790
+▁Qu -1791
+ators -1792
+▁meas -1793
+▁represent -1794
+▁once -1795
+Th -1796
+▁God -1797
+ief -1798
+We -1799
+▁redu -1800
+book -1801
+▁question -1802
+sw -1803
+▁El -1804
+ovember -1805
+▁land -1806
+angu -1807
+▁percent -1808
+▁relations -1809
+▁chall -1810
+▁death -1811
+▁went -1812
+▁More -1813
+▁won -1814
+▁Serv -1815
+▁signific -1816
+reg -1817
+▁compet -1818
+cil -1819
+▁stop -1820
+▁low -1821
+▁true -1822
+▁Per -1823
+ison -1824
+▁jo -1825
+▁personal -1826
+▁tre -1827
+play -1828
+essage -1829
+hers -1830
+▁dri -1831
+▁Brit -1832
+▁actually -1833
+▁comb -1834
+ED -1835
+▁Pe -1836
+lu -1837
+▁method -1838
+▁material -1839
+▁else -1840
+olution -1841
+▁cou -1842
+gr -1843
+▁months -1844
+▁industry -1845
+▁room -1846
+illed -1847
+▁online -1848
+ges -1849
+ilities -1850
+▁body -1851
+▁init -1852
+▁October -1853
+ival -1854
+▁December -1855
+▁due -1856
+minist -1857
+▁require -1858
+rodu -1859
+aces -1860
+▁bit -1861
+though -1862
+▁Stud -1863
+▁pain -1864
+▁September -1865
+ches -1866
+▁ener -1867
+▁create -1868
+ele -1869
+agn -1870
+ml -1871
+ibility -1872
+gest -1873
+▁further -1874
+▁pri -1875
+▁started -1876
+▁address -1877
+bl -1878
+▁space -1879
+▁America -1880
+▁November -1881
+used -1882
+itted -1883
+ancial -1884
+▁Her -1885
+▁sound -1886
+▁enc -1887
+arget -1888
+▁yet -1889
+▁August -1890
+): -1891
+oph -1892
+pre -1893
+fer -1894
+ule -1895
+now -1896
+It -1897
+▁character -1898
+gress -1899
+▁respons -1900
+▁Pa -1901
+ports -1902
+▁super -1903
+▁Russ -1904
+uation -1905
+▁comes -1906
+▁problem -1907
+▁bring -1908
+▁needs -1909
+ification -1910
+itle -1911
+▁final -1912
+▁Afric -1913
+▁Just -1914
+▁role -1915
+▁press -1916
+iter -1917
+▁national -1918
+wh -1919
+▁http -1920
+▁respect -1921
+irm -1922
+▁Sm -1923
+▁West -1924
+▁announ -1925
+▁song -1926
+equ -1927
+▁single -1928
+▁mind -1929
+▁Acc -1930
+▁cell -1931
+▁prob -1932
+▁Health -1933
+▁While -1934
+ham -1935
+aining -1936
+ociety -1937
+▁Ke -1938
+ried -1939
+▁environment -1940
+▁Cour -1941
+▁visit -1942
+▁typ -1943
+▁building -1944
+▁exist -1945
+▁object -1946
+amb -1947
+▁bo -1948
+ging -1949
+ault -1950
+ples -1951
+▁House -1952
+hing -1953
+▁enjoy -1954
+earch -1955
+▁fav -1956
+▁lo -1957
+▁results -1958
+ilt -1959
+▁begin -1960
+▁complet -1961
+▁Post -1962
+▁aud -1963
+▁From -1964
+▁issues -1965
+text -1966
+cret -1967
+▁model -1968
+AC -1969
+ests -1970
+utes -1971
+▁political -1972
+▁above -1973
+oon -1974
+selves -1975
+▁, -1976
+umb -1977
+▁companies -1978
+▁ago -1979
+---- -1980
+ift -1981
+ino -1982
+▁getting -1983
+str -1984
+▁Char -1985
+▁La -1986
+▁action -1987
+na -1988
+unch -1989
+▁third -1990
+▁Mark -1991
+▁dr -1992
+▁private -1993
+vert -1994
+ibr -1995
+▁age -1996
+▁sent -1997
+EC -1998
+head -1999
+▁significant -2000
+rain -2001
+year -2002
+ze -2003
+▁Now -2004
+▁TH -2005
+ana -2006
+fact -2007
+clus -2008
+▁ey -2009
+empt -2010
+▁believe -2011
+▁previous -2012
+ama -2013
+pecially -2014
+▁arg -2015
+opy -2016
+▁website -2017
+▁Int -2018
+utions -2019
+lation -2020
+▁technology -2021
+', -2022
+ruction -2023
+▁estab -2024
+oe -2025
+▁common -2026
+asing -2027
+▁mot -2028
+▁rights -2029
+onse -2030
+▁Don -2031
+▁taken -2032
+▁shall -2033
+▁Law -2034
+▁face -2035
+▁determ -2036
+▁energy -2037
+▁viol -2038
+▁impact -2039
+▁Pre -2040
+error -2041
+▁share -2042
+▁Austral -2043
+▁sugg -2044
+ington -2045
+anks -2046
+ume -2047
+▁effort -2048
+▁former -2049
+▁offer -2050
+istic -2051
+dd -2052
+ST -2053
+▁step -2054
+▁half -2055
+▁County -2056
+▁Sch -2057
+▁whole -2058
+rac -2059
+hib -2060
+epend -2061
+▁describ -2062
+▁fail -2063
+ules -2064
+atural -2065
+▁Mich -2066
+▁phys -2067
+▁parent -2068
+▁subject -2069
+rug -2070
+lete -2071
+▁six -2072
+▁products -2073
+▁Fin -2074
+▁according -2075
+abel -2076
+ological -2077
+▁compl -2078
+time -2079
+ruary -2080
+▁struct -2081
+▁particip -2082
+▁associ -2083
+}} -2084
+▁court -2085
+▁general -2086
+ht -2087
+▁video -2088
+▁content -2089
+▁consider -2090
+ouncil -2091
+chie -2092
+▁official -2093
+▁tw -2094
+ysis -2095
+ades -2096
+▁International -2097
+▁ens -2098
+▁Germ -2099
+▁almost -2100
+▁UK -2101
+▁mor -2102
+ext -2103
+▁gen -2104
+▁fall -2105
+itte -2106
+fect -2107
+▁attack -2108
+▁heart -2109
+▁lat -2110
+▁> -2111
+▁Trump -2112
+ression -2113
+▁hy -2114
+▁Sec -2115
+▁position -2116
+hel -2117
+▁games -2118
+▁quality -2119
+irt -2120
+atory -2121
+itary -2122
+▁ep -2123
+iction -2124
+▁rad -2125
+face -2126
+▁similar -2127
+▁either -2128
+ondon -2129
+▁break -2130
+▁bar -2131
+^{ -2132
+part -2133
+▁prop -2134
+▁performance -2135
+iday -2136
+adem -2137
+▁check -2138
+ederal -2139
+▁issue -2140
+▁wind -2141
+▁Some -2142
+▁amount -2143
+elling -2144
+▁wa -2145
+ends -2146
+ald -2147
+▁held -2148
+▁coll -2149
+▁dom -2150
+▁entire -2151
+▁member -2152
+load -2153
+▁Of -2154
+▁February -2155
+▁agre -2156
+▁Center -2157
+cial -2158
+fully -2159
+icult -2160
+▁taking -2161
+cal -2162
+▁rather -2163
+iet -2164
+sy -2165
+▁international -2166
+▁policy -2167
+ledge -2168
+▁China -2169
+▁Park -2170
+uk -2171
+▁Direct -2172
+▁miss -2173
+apt -2174
+RE -2175
+▁dou -2176
+cast -2177
+▁recogn -2178
+▁original -2179
+ecut -2180
+hern -2181
+▁hours -2182
+▁continue -2183
+▁friends -2184
+▁hom -2185
+▁quick -2186
+▁cut -2187
+▁collect -2188
+▁cal -2189
+▁necess -2190
+ication -2191
+▁Sw -2192
+-> -2193
+▁likely -2194
+ille -2195
+ament -2196
+▁text -2197
+▁shows -2198
+era -2199
+ided -2200
+▁page -2201
+raft -2202
+▁range -2203
+▁countries -2204
+▁events -2205
+▁Pat -2206
+key -2207
+▁close -2208
+▁idea -2209
+▁provided -2210
+ception -2211
+▁various -2212
+icro -2213
+▁buy -2214
+▁contact -2215
+▁relationship -2216
+(" -2217
+▁global -2218
+▁cases -2219
+▁however -2220
+hold -2221
+▁foot -2222
+▁recent -2223
+▁risk -2224
+▁host -2225
+▁sex -2226
+leased -2227
+ites -2228
+▁black -2229
+▁education -2230
+ender -2231
+▁High -2232
+▁anything -2233
+add -2234
+▁front -2235
+ari -2236
+▁hope -2237
+new -2238
+▁Mon -2239
+▁everything -2240
+off -2241
+ivil -2242
+ires -2243
+▁watch -2244
+sec -2245
+itch -2246
+▁King -2247
+▁especially -2248
+ees -2249
+aign -2250
+alt -2251
+▁Ste -2252
+▁Off -2253
+▁became -2254
+▁abs -2255
+igital -2256
+▁London -2257
+ype -2258
+▁areas -2259
+▁someone -2260
+▁below -2261
+:: -2262
+▁beaut -2263
+atur -2264
+▁repl -2265
+▁Court -2266
+ci -2267
+▁intern -2268
+anguage -2269
+▁total -2270
+▁concern -2271
+room -2272
+amed -2273
+eal -2274
+▁behind -2275
+▁received -2276
+▁contract -2277
+▁investig -2278
+bum -2279
+▁imm -2280
+▁security -2281
+▁party -2282
+__ -2283
+(); -2284
+▁article -2285
+igure -2286
+▁trying -2287
+come -2288
+}$ -2289
+▁Dem -2290
+▁patients -2291
+▁Department -2292
+.) -2293
+mit -2294
+▁walk -2295
+iddle -2296
+▁travel -2297
+nov -2298
+AM -2299
+▁First -2300
+▁Ang -2301
+▁clos -2302
+ceed -2303
+roups -2304
+iences -2305
+▁x -2306
+▁circ -2307
+▁famil -2308
+ogle -2309
+cle -2310
+▁required -2311
+su -2312
+▁dest -2313
+aching -2314
+Ch -2315
+ensive -2316
+icles -2317
+▁pie -2318
+▁points -2319
+▁Sen -2320
+▁accept -2321
+▁asked -2322
+▁David -2323
+▁coming -2324
+▁obs -2325
+▁exam -2326
+▁ground -2327
+ops -2328
+▁Ir -2329
+▁cred -2330
+▁management -2331
+ude -2332
+▁Gener -2333
+▁mass -2334
+▁hon -2335
+../ -2336
+iple -2337
+▁Cons -2338
+▁regard -2339
+▁achie -2340
+▁wanted -2341
+▁India -2342
+uel -2343
+itution -2344
+▁difficult -2345
+▁bad -2346
+▁pers -2347
+▁Associ -2348
+itional -2349
+▁office -2350
+▁Black -2351
+▁Here -2352
+ips -2353
+▁target -2354
+▁quite -2355
+ael -2356
+▁living -2357
+ores -2358
+ague -2359
+ground -2360
+▁upon -2361
+▁Ser -2362
+▁changes -2363
+▁Net -2364
+uge -2365
+idge -2366
+▁ill -2367
+▁Your -2368
+▁staff -2369
+▁Hist -2370
+▁matter -2371
+rest -2372
+ocr -2373
+▁financial -2374
+▁neg -2375
+▁forward -2376
+}\ -2377
+▁improve -2378
+▁bi -2379
+▁president -2380
+reet -2381
+▁po -2382
+▁nothing -2383
+ply -2384
+▁prim -2385
+▁hot -2386
+itute -2387
+▁added -2388
+▁soft -2389
+oved -2390
+ises -2391
+▁stay -2392
+based -2393
+▁production -2394
+▁San -2395
+nes -2396
+medi -2397
+▁terms -2398
+bor -2399
+▁Hol -2400
+oud -2401
+▁emb -2402
+urg -2403
+▁reported -2404
+▁road -2405
+▁words -2406
+▁seems -2407
+▁pack -2408
+▁potential -2409
+▁decision -2410
+▁Phil -2411
+▁log -2412
+cont -2413
+apan -2414
+▁Ob -2415
+▁specific -2416
+ET -2417
+icated -2418
+inary -2419
+ett -2420
+▁police -2421
+▁easy -2422
+ittee -2423
+▁ess -2424
+▁sense -2425
+aker -2426
+▁Calif -2427
+▁wond -2428
+▁systems -2429
+▁Commun -2430
+Con -2431
+tain -2432
+▁training -2433
+▁Ac -2434
+ica -2435
+▁began -2436
+▁probably -2437
+▁created -2438
+▁select -2439
+▁anim -2440
+▁jud -2441
+▁upd -2442
+▁sat -2443
+▁fire -2444
+lin -2445
+▁everyone -2446
+▁comput -2447
+▁writing -2448
+▁version -2449
+urb -2450
+▁dam -2451
+atform -2452
+▁date -2453
+▁property -2454
+▁II -2455
+overed -2456
+▁needed -2457
+▁career -2458
+▁increase -2459
+▁Manag -2460
+▁legal -2461
+▁sym -2462
+▁lives -2463
+▁town -2464
+▁Trans -2465
+▁questions -2466
+▁popular -2467
+▁Day -2468
+▁red -2469
+egin -2470
+itter -2471
+td -2472
+▁late -2473
+acc -2474
+▁features -2475
+▁cannot -2476
+aur -2477
+atives -2478
+▁worked -2479
+▁ways -2480
+▁Us -2481
+▁Val -2482
+▁und -2483
+▁moment -2484
+oo -2485
+uments -2486
+▁Book -2487
+ford -2488
+▁Up -2489
+AP -2490
+ospital -2491
+▁rate -2492
+▁TV -2493
+rael -2494
+ached -2495
+▁themselves -2496
+▁outside -2497
+▁higher -2498
+▁isn -2499
+▁rev -2500
+▁consum -2501
+▁itself -2502
+aced -2503
+▁William -2504
+▁tool -2505
+▁board -2506
+▁includes -2507
+▁• -2508
+▁influ -2509
+▁English -2510
+▁Bu -2511
+▁deg -2512
+ension -2513
+iful -2514
+▁Tw -2515
+oice -2516
+▁answ -2517
+ara -2518
+hood -2519
+rect -2520
+ese -2521
+▁groups -2522
+ornia -2523
+ems -2524
+RO -2525
+▁suggest -2526
+▁pred -2527
+". -2528
+▁provides -2529
+▁girl -2530
+▁takes -2531
+itting -2532
+rab -2533
+izing -2534
+▁published -2535
+action -2536
+▁white -2537
+▁introdu -2538
+▁cele -2539
+▁network -2540
+▁minutes -2541
+attle -2542
+▁assist -2543
+ij -2544
+ishing -2545
+▁Ear -2546
+▁engine -2547
+▁section -2548
+▁vot -2549
+▁soon -2550
+▁campaign -2551
+ening -2552
+▁sus -2553
+fl -2554
+▁track -2555
+▁saw -2556
+div -2557
+▁inj -2558
+▁Air -2559
+▁College -2560
+ength -2561
+▁European -2562
+html -2563
+anced -2564
+▁himself -2565
+▁fight -2566
+▁vict -2567
+▁Home -2568
+▁Red -2569
+asc -2570
+▁document -2571
+EL -2572
+▁Paul -2573
+field -2574
+▁perfect -2575
+ams -2576
+▁Japan -2577
+▁link -2578
+▁evidence -2579
+ners -2580
+▁players -2581
+▁growth -2582
+▁protect -2583
+aily -2584
+▁written -2585
+▁size -2586
+▁price -2587
+▁Sim -2588
+▁Read -2589
+order -2590
+col -2591
+▁becom -2592
+lier -2593
+adu -2594
+tty -2595
+▁defin -2596
+▁recently -2597
+alse -2598
+uses -2599
+▁Research -2600
+▁histor -2601
+▁integ -2602
+▁recomm -2603
+▁administ -2604
+▁sect -2605
+▁mother -2606
+ession -2607
+ky -2608
+▁Prof -2609
+▁Are -2610
+▁tour -2611
+▁California -2612
+▁Wash -2613
+▁ste -2614
+▁path -2615
+orge -2616
+▁emer -2617
+arter -2618
+▁woman -2619
+ables -2620
+IL -2621
+ny -2622
+▁currently -2623
+▁album -2624
+▁approach -2625
+oke -2626
+▁decl -2627
+▁prior -2628
+▁Republic -2629
+▁treatment -2630
+aving -2631
+▁Mor -2632
+ivity -2633
+eks -2634
+▁included -2635
+▁Let -2636
+oura -2637
+▁norm -2638
+ida -2639
+▁loss -2640
+▁pick -2641
+▁attempt -2642
+eed -2643
+▁via -2644
+▁dise -2645
+umn -2646
+pped -2647
+▁Add -2648
+ux -2649
+aries -2650
+▁involved -2651
+▁pict -2652
+▁deep -2653
+▁economic -2654
+useum -2655
+lim -2656
+▁cover -2657
+▁natural -2658
+ha -2659
+▁response -2660
+hat -2661
+▁James -2662
+▁Council -2663
+pite -2664
+▁arch -2665
+▁table -2666
+▁veh -2667
+▁books -2668
+▁Israel -2669
+isions -2670
+▁led -2671
+ufact -2672
+▁complete -2673
+▁release -2674
+ags -2675
+oring -2676
+rel -2677
+inks -2678
+▁Ret -2679
+ION -2680
+aint -2681
+▁Pal -2682
+▁den -2683
+etimes -2684
+▁states -2685
+▁British -2686
+sych -2687
+erc -2688
+Ex -2689
+ING -2690
+▁purch -2691
+▁Then -2692
+▁problems -2693
+▁opportunity -2694
+▁East -2695
+▁lost -2696
+BC -2697
+▁simply -2698
+▁ful -2699
+▁medical -2700
+▁conditions -2701
+▁Inc -2702
+mon -2703
+▁played -2704
+▁Sy -2705
+▁search -2706
+▁source -2707
+▁sens -2708
+▁related -2709
+(\ -2710
+rought -2711
+▁conduct -2712
+▁Op -2713
+▁throughout -2714
+Pro -2715
+rie -2716
+▁request -2717
+▁title -2718
+wood -2719
+▁modern -2720
+▁father -2721
+arning -2722
+ctor -2723
+mb -2724
+OT -2725
+▁knowledge -2726
+▁der -2727
+airs -2728
+▁sett -2729
+ocal -2730
+light -2731
+▁Dist -2732
+▁built -2733
+sequ -2734
+▁Fil -2735
+▁Group -2736
+▁Review -2737
+rown -2738
+oper -2739
+▁favor -2740
+▁THE -2741
+▁longer -2742
+pir -2743
+”. -2744
+▁Washington -2745
+▁Why -2746
+== -2747
+▁Canada -2748
+verse -2749
+▁movie -2750
+▁cy -2751
+anger -2752
+▁paper -2753
+estern -2754
+ription -2755
+▁*/ -2756
+dom -2757
+▁expected -2758
+vey -2759
+idered -2760
+duc -2761
+▁Ant -2762
+▁tou -2763
+▁Inter -2764
+ror -2765
+▁practice -2766
+▁military -2767
+(' -2768
+begin -2769
+enc -2770
+▁meeting -2771
+▁leading -2772
+▁relig -2773
+raw -2774
+▁region -2775
+ulations -2776
+▁offers -2777
+US -2778
+erved -2779
+▁drug -2780
+haps -2781
+ications -2782
+arc -2783
+▁п -2784
+▁reve -2785
+ст -2786
+lying -2787
+iber -2788
+▁Cong -2789
+▁broad -2790
+ming -2791
+ements -2792
+▁released -2793
+▁threat -2794
+ribution -2795
+▁Street -2796
+▁sold -2797
+▁son -2798
+▁Techn -2799
+▁comment -2800
+▁stri -2801
+oul -2802
+▁Rich -2803
+▁interview -2804
+▁unique -2805
+▁language -2806
+▁tal -2807
+▁playing -2808
+▁hit -2809
+▁analysis -2810
+astic -2811
+▁implement -2812
+irth -2813
+ressed -2814
+▁prevent -2815
+asons -2816
+▁ut -2817
+▁base -2818
+yond -2819
+AD -2820
+unt -2821
+▁pretty -2822
+▁code -2823
+lam -2824
+ayer -2825
+▁Public -2826
+▁hor -2827
+▁cand -2828
+▁file -2829
+ography -2830
+▁Michael -2831
+▁wasn -2832
+▁Ben -2833
+▁separ -2834
+▁anyone -2835
+▁application -2836
+UR -2837
+omb -2838
+▁running -2839
+arr -2840
+▁platform -2841
+▁simple -2842
+rote -2843
+▁install -2844
+▁user -2845
+▁IN -2846
+▁tru -2847
+▁weeks -2848
+irit -2849
+▁Out -2850
+▁Rober -2851
+▁Dan -2852
+▁ord -2853
+▁div -2854
+itation -2855
+imum -2856
+▁band -2857
+▁General -2858
+▁Mod -2859
+▁serious -2860
+▁average -2861
+▁leave -2862
+▁considered -2863
+ville -2864
+▁heav -2865
+▁image -2866
+idents -2867
+▁Other -2868
+▁Business -2869
+▁Found -2870
+▁Je -2871
+ares -2872
+oto -2873
+▁worth -2874
+iation -2875
+> -2876
+istry -2877
+nces -2878
+▁stru -2879
+isc -2880
+fort -2881
+urance -2882
+▁complex -2883
+alls -2884
+▁culture -2885
+▁Jack -2886
+▁coun -2887
+ico -2888
+iles -2889
+urd -2890
+▁mention -2891
+▁Sub -2892
+▁connect -2893
+aches -2894
+▁Ann -2895
+▁announced -2896
+▁cross -2897
+vis -2898
+▁professional -2899
+▁Indian -2900
+rast -2901
+▁ensure -2902
+pose -2903
+▁cause -2904
+rench -2905
+▁instead -2906
+▁style -2907
+)) -2908
+illing -2909
+▁dead -2910
+▁Great -2911
+на -2912
+▁capital -2913
+▁immedi -2914
+ibrary -2915
+▁additional -2916
+mission -2917
+ecutive -2918
+▁student -2919
+rick -2920
+▁saying -2921
+▁Arch -2922
+erve -2923
+▁learning -2924
+▁Jew -2925
+▁whose -2926
+aging -2927
+raz -2928
+▁Over -2929
+▁Well -2930
+▁activities -2931
+▁mag -2932
+▁stories -2933
+▁Most -2934
+▁exact -2935
+▁reading -2936
+▁independ -2937
+▁wrong -2938
+aged -2939
+▁White -2940
+▁Get -2941
+obile -2942
+aves -2943
+iqu -2944
+▁latest -2945
+lements -2946
+htt -2947
+▁levels -2948
+▁Sund -2949
+▁projects -2950
+▁designed -2951
+ences -2952
+▁nature -2953
+vest -2954
+itiz -2955
+▁Face -2956
+▁parents -2957
+▁beautiful -2958
+▁director -2959
+down -2960
+▁speak -2961
+▁regular -2962
+▁surpr -2963
+anch -2964
+▁nation -2965
+▁standard -2966
+▁necessary -2967
+force -2968
+▁sil -2969
+inated -2970
+het -2971
+▁System -2972
+▁gave -2973
+▁Ev -2974
+▁print -2975
+see -2976
+▁Educ -2977
+iment -2978
+▁Vol -2979
+▁driv -2980
+▁lower -2981
+▁challeng -2982
+name -2983
+rag -2984
+VID -2985
+▁star -2986
+▁Port -2987
+▁round -2988
+▁clean -2989
+▁According -2990
+hem -2991
+▁Even -2992
+▁draw -2993
+▁extrem -2994
+▁Every -2995
+▁See -2996
+▁award -2997
+]. -2998
+▁population -2999
+imately -3000
+chan -3001
+▁scient -3002
+▁inside -3003
+▁billion -3004
+▁Science -3005
+▁details -3006
+▁Green -3007
+▁capt -3008
+▁Program -3009
+▁ability -3010
+ought -3011
+▁boy -3012
+▁felt -3013
+▁AN -3014
+▁== -3015
+▁screen -3016
+▁Australia -3017
+▁в -3018
+▁please -3019
+▁brought -3020
+▁progress -3021
+body -3022
+▁Institute -3023
+▁programs -3024
+▁Thom -3025
+rs -3026
+▁ten -3027
+unk -3028
+where -3029
+▁hands -3030
+opp -3031
+}, -3032
+▁oil -3033
+▁plans -3034
+▁subst -3035
+bit -3036
+▁prep -3037
+cks -3038
+▁couple -3039
+▁Board -3040
+▁resources -3041
+}{ -3042
+atter -3043
+▁goal -3044
+▁princ -3045
+)$ -3046
+la -3047
+aly -3048
+▁manufact -3049
+▁customers -3050
+oom -3051
+▁block -3052
+▁Mart -3053
+▁Develop -3054
+▁science -3055
+rim -3056
+▁extra -3057
+▁bank -3058
+▁force -3059
+▁Press -3060
+▁Rel -3061
+▁developed -3062
+wor -3063
+mitted -3064
+▁sort -3065
+like -3066
+rm -3067
+▁users -3068
+rows -3069
+▁wrote -3070
+mercial -3071
+ties -3072
+▁Conf -3073
+▁Since -3074
+▁pan -3075
+▁Church -3076
+▁ter -3077
+▁Friday -3078
+▁Tex -3079
+Name -3080
+▁Rec -3081
+ords -3082
+▁goes -3083
+▁interesting -3084
+▁receive -3085
+▁behav -3086
+resh -3087
+arily -3088
+this -3089
+▁stage -3090
+▁safety -3091
+opt -3092
+gen -3093
+▁etc -3094
+▁Year -3095
+▁demon -3096
+▁statement -3097
+▁schools -3098
+▁mis -3099
+▁cook -3100
+▁Best -3101
+▁Director -3102
+▁knew -3103
+ellow -3104
+▁mid -3105
+▁hol -3106
+EO -3107
+▁lack -3108
+▁suff -3109
+etic -3110
+▁ap -3111
+▁options -3112
+▁mach -3113
+▁century -3114
+ado -3115
+▁liter -3116
+▁email -3117
+▁hum -3118
+▁Journal -3119
+▁match -3120
+▁heard -3121
+osing -3122
+▁civil -3123
+▁fair -3124
+roid -3125
+▁ver -3126
+▁blog -3127
+vin -3128
+▁born -3129
+pm -3130
+▁Work -3131
+▁Wed -3132
+▁Bre -3133
+▁digital -3134
+▁Google -3135
+▁towards -3136
+▁Company -3137
+rated -3138
+ened -3139
+LE -3140
+_{\ -3141
+▁lic -3142
+resp -3143
+▁fast -3144
+▁attention -3145
+ago -3146
+▁wonder -3147
+▁trade -3148
+▁multiple -3149
+ying -3150
+▁Commission -3151
+▁hear -3152
+▁autom -3153
+OS -3154
+▁studies -3155
+▁situation -3156
+IG -3157
+?” -3158
+ра -3159
+▁fem -3160
+▁respond -3161
+▁Africa -3162
+▁bed -3163
+ae -3164
+▁Ber -3165
+>< -3166
+▁display -3167
+▁center -3168
+▁mus -3169
+ston -3170
+lands -3171
+▁ball -3172
+▁About -3173
+▁Sur -3174
+▁organization -3175
+▁surv -3176
+▁society -3177
+ctions -3178
+▁Bel -3179
+urt -3180
+▁card -3181
+▁costs -3182
+▁celebr -3183
+▁encoura -3184
+ни -3185
+olic -3186
+▁Mem -3187
+▁Association -3188
+▁collection -3189
+▁federal -3190
+▁aim -3191
+mar -3192
+▁Leg -3193
+▁Under -3194
+▁color -3195
+▁acqu -3196
+rial -3197
+▁write -3198
+itect -3199
+▁novel -3200
+▁IS -3201
+▁web -3202
+itz -3203
+▁sales -3204
+▁Sam -3205
+zz -3206
+▁Super -3207
+wide -3208
+lied -3209
+▁Music -3210
+ribute -3211
+aughter -3212
+▁govern -3213
+▁box -3214
+▁hous -3215
+▁Def -3216
+▁quickly -3217
+▁organiz -3218
+▁trust -3219
+▁effects -3220
+ixed -3221
+board -3222
+▁consist -3223
+▁parts -3224
+inter -3225
+▁seek -3226
+▁George -3227
+▁Spec -3228
+▁England -3229
+▁player -3230
+enge -3231
+▁decided -3232
+▁Sunday -3233
+▁teac -3234
+mark -3235
+▁rout -3236
+▁kids -3237
+▁occur -3238
+▁Facebook -3239
+rian -3240
+▁Tod -3241
+atal -3242
+▁usually -3243
+ava -3244
+ech -3245
+▁Government -3246
+frac -3247
+▁turned -3248
+▁Del -3249
+CO -3250
+▁Hall -3251
+▁Mond -3252
+▁brand -3253
+▁History -3254
+ulated -3255
+▁gra -3256
+▁People -3257
+▁concept -3258
+▁located -3259
+▁Angel -3260
+▁Christian -3261
+▁Service -3262
+▁contribut -3263
+unction -3264
+lished -3265
+▁innov -3266
+▁looks -3267
+ube -3268
+▁Mus -3269
+▁) -3270
+▁blood -3271
+▁streng -3272
+▁squ -3273
+▁workers -3274
+▁Top -3275
+nown -3276
+istics -3277
+▁club -3278
+eta -3279
+▁Satur -3280
+omp -3281
+▁Also -3282
+▁remember -3283
+Cl -3284
+▁families -3285
+▁summer -3286
+▁! -3287
+aith -3288
+irus -3289
+band -3290
+enty -3291
+rang -3292
+▁Texas -3293
+lear -3294
+inese -3295
+▁farm -3296
+ocation -3297
+▁named -3298
+▁candid -3299
+▁Mary -3300
+ki -3301
+▁gets -3302
+▁bal -3303
+apter -3304
+men -3305
+.[ -3306
+▁shown -3307
+pper -3308
+▁neigh -3309
+▁nearly -3310
+▁French -3311
+▁positive -3312
+▁Mat -3313
+▁particularly -3314
+icip -3315
+▁disease -3316
+▁race -3317
+▁assess -3318
+do -3319
+▁avoid -3320
+▁limited -3321
+▁giving -3322
+da -3323
+itar -3324
+iding -3325
+ellig -3326
+▁Democr -3327
+▁chance -3328
+▁qual -3329
+Ar -3330
+▁Life -3331
+▁earlier -3332
+▁effic -3333
+odes -3334
+osition -3335
+pri -3336
+istan -3337
+icate -3338
+idth -3339
+▁imag -3340
+▁demand -3341
+▁died -3342
+▁directly -3343
+▁effective -3344
+just -3345
+trans -3346
+change -3347
+▁rock -3348
+aught -3349
+▁associated -3350
+▁Office -3351
+▁increased -3352
+▁software -3353
+▁increasing -3354
+▁wife -3355
+class -3356
+▁answer -3357
+▁beyond -3358
+ategy -3359
+▁phone -3360
+▁convers -3361
+more -3362
+aled -3363
+▁values -3364
+▁refer -3365
+▁arriv -3366
+▁href -3367
+▁drive -3368
+▁sen -3369
+▁inform -3370
+▁detail -3371
+▁individuals -3372
+▁COVID -3373
+rief -3374
+▁dark -3375
+iance -3376
+ils -3377
+undred -3378
+▁thous -3379
+▁Good -3380
+UT -3381
+▁false -3382
+venue -3383
+▁morning -3384
+▁Many -3385
+▁reflect -3386
+▁beginning -3387
+▁earn -3388
+▁meaning -3389
+▁park -3390
+Ph -3391
+▁Sing -3392
+▁Mal -3393
+▁providing -3394
+ey -3395
+▁eyes -3396
+hest -3397
+CC -3398
+▁Att -3399
+ously -3400
+rack -3401
+bon -3402
+▁firm -3403
+itness -3404
+▁Fam -3405
+▁sn -3406
+span -3407
+wise -3408
+▁ri -3409
+▁successful -3410
+▁plant -3411
+iant -3412
+▁correct -3413
+▁ideas -3414
+ancy -3415
+▁Rob -3416
+”, -3417
+▁physical -3418
+▁largest -3419
+▁Saturday -3420
+▁copy -3421
+▁rules -3422
+▁ST -3423
+▁Congress -3424
+udget -3425
+▁ones -3426
+▁Gold -3427
+ret -3428
+berg -3429
+▁choice -3430
+ibly -3431
+▁prec -3432
+▁Mac -3433
+▁prem -3434
+▁device -3435
+ev -3436
+owers -3437
+▁sle -3438
+azing -3439
+▁stra -3440
+def -3441
+ios -3442
+emic -3443
+▁Bill -3444
+mas -3445
+▁characters -3446
+▁ready -3447
+▁gives -3448
+▁huge -3449
+ко -3450
+▁achieve -3451
+bb -3452
+▁bott -3453
+▁Alex -3454
+▁fans -3455
+▁reports -3456
+▁frequ -3457
+ius -3458
+▁highly -3459
+ander -3460
+▁Class -3461
+▁promot -3462
+olf -3463
+▁degree -3464
+▁Education -3465
+IV -3466
+▁wide -3467
+▁served -3468
+▁pric -3469
+igation -3470
+▁Lou -3471
+▁Sol -3472
+▁growing -3473
+▁enter -3474
+▁Mil -3475
+▁lif -3476
+▁Mag -3477
+▁Although -3478
+mp -3479
+▁gas -3480
+▁Flor -3481
+▁anti -3482
+▁allowed -3483
+▁Jer -3484
+▁spent -3485
+ester -3486
+▁corpor -3487
+▁aware -3488
+▁USA -3489
+▁remain -3490
+▁thinking -3491
+▁movement -3492
+▁ded -3493
+▁Child -3494
+▁Franc -3495
+▁Chinese -3496
+▁ge -3497
+▁slow -3498
+▁epis -3499
+▁understanding -3500
+▁cam -3501
+estival -3502
+place -3503
+▁Met -3504
+▁described -3505
+▁Monday -3506
+▁efforts -3507
+▁location -3508
+▁activity -3509
+▁purpose -3510
+▁sal -3511
+▁foreign -3512
+▁sust -3513
+▁speed -3514
+eria -3515
+ped -3516
+▁climate -3517
+bert -3518
+▁message -3519
+▁cast -3520
+▁AM -3521
+fig -3522
+▁sometimes -3523
+igned -3524
+но -3525
+▁hop -3526
+Id -3527
+▁Rem -3528
+▁Bank -3529
+▁Dec -3530
+........ -3531
+▁Fore -3532
+▁happy -3533
+▁piece -3534
+▁investment -3535
+▁skills -3536
+▁voice -3537
+▁feature -3538
+String -3539
+edom -3540
+▁leaders -3541
+▁PM -3542
+▁moved -3543
+▁Cam -3544
+▁ult -3545
+▁Cap -3546
+▁Services -3547
+▁clin -3548
+▁UN -3549
+ros -3550
+▁aw -3551
+ya -3552
+▁journal -3553
+▁During -3554
+CH -3555
+▁Robert -3556
+▁pull -3557
+▁Please -3558
+▁strugg -3559
+▁solution -3560
+▁door -3561
+▁id -3562
+▁employees -3563
+▁wel -3564
+▁store -3565
+▁except -3566
+▁looked -3567
+], -3568
+▁credit -3569
+▁regist -3570
+▁Supp -3571
+▁Str -3572
+▁Tom -3573
+▁reach -3574
+▁basis -3575
+▁context -3576
+ро -3577
+▁structure -3578
+EM -3579
+▁Road -3580
+▁с -3581
+source -3582
+weet -3583
+ronic -3584
+oyal -3585
+▁max -3586
+▁Committee -3587
+▁benefits -3588
+▁names -3589
+▁Star -3590
+▁changed -3591
+▁communities -3592
+coming -3593
+▁places -3594
+▁collabor -3595
+▁established -3596
+▁sche -3597
+▁eight -3598
+ocks -3599
+▁note -3600
+▁traditional -3601
+ero -3602
+▁finally -3603
+▁Smith -3604
+▁strateg -3605
+▁Plan -3606
+▁fully -3607
+▁Jud -3608
+onent -3609
+▁happened -3610
+icine -3611
+▁Profess -3612
+▁carry -3613
+▁teams -3614
+▁types -3615
+IM -3616
+▁safe -3617
+You -3618
+▁paid -3619
+ka -3620
+▁Last -3621
+▁ang -3622
+▁completely -3623
+sey -3624
+▁Any -3625
+IP -3626
+▁altern -3627
+▁Social -3628
+!! -3629
+▁allows -3630
+ults -3631
+OL -3632
+▁Two -3633
+aming -3634
+▁continued -3635
+▁que -3636
+iers -3637
+▁dream -3638
+ona -3639
+▁doub -3640
+▁condition -3641
+section -3642
+▁bus -3643
+▁Intern -3644
+▁OF -3645
+▁France -3646
+▁numbers -3647
+encies -3648
+▁although -3649
+porary -3650
+▁measure -3651
+▁Cur -3652
+▁emot -3653
+ste -3654
+▁difference -3655
+▁Report -3656
+▁£ -3657
+itable -3658
+label -3659
+acity -3660
+▁Const -3661
+▁opin -3662
+▁dog -3663
+▁=> -3664
+▁bill -3665
+ampions -3666
+▁transport -3667
+sub -3668
+▁feed -3669
+▁seven -3670
+ipment -3671
+▁Big -3672
+bar -3673
+▁compared -3674
+ria -3675
+▁mer -3676
+▁sell -3677
+▁categ -3678
+left -3679
+▁citiz -3680
+▁economy -3681
+▁vill -3682
+▁Miss -3683
+▁appropri -3684
+▁Sl -3685
+▁followed -3686
+bre -3687
+vention -3688
+▁tried -3689
+ancing -3690
+▁church -3691
+▁exhib -3692
+▁pen -3693
+edy -3694
+▁variety -3695
+▁dro -3696
+▁touch -3697
+akers -3698
+ENT -3699
+▁Oh -3700
+▁Thurs -3701
+UL -3702
+rupt -3703
+DA -3704
+▁Sal -3705
+▁Tues -3706
+▁uses -3707
+bu -3708
+▁starting -3709
+▁birth -3710
+▁hospital -3711
+▁computer -3712
+▁polic -3713
+house -3714
+▁Av -3715
+▁shoot -3716
+▁expand -3717
+iller -3718
+comp -3719
+▁status -3720
+no -3721
+▁cells -3722
+▁businesses -3723
+IA -3724
+ynam -3725
+▁commercial -3726
+▁yourself -3727
+NA -3728
+▁origin -3729
+▁lay -3730
+▁mount -3731
+▁grand -3732
+▁construction -3733
+▁quarter -3734
+▁moving -3735
+itude -3736
+▁Minister -3737
+▁Fe -3738
+▁du -3739
+▁continu -3740
+▁hundred -3741
+▁temper -3742
+ache -3743
+▁election -3744
+▁choose -3745
+-------- -3746
+▁legis -3747
+▁spirit -3748
+▁` -3749
+▁Jo -3750
+inn -3751
+▁active -3752
+umber -3753
+ifying -3754
+isation -3755
+▁roll -3756
+▁myself -3757
+▁appoint -3758
+▁fear -3759
+▁green -3760
+▁Development -3761
+▁shot -3762
+▁Project -3763
+▁Sun -3764
+eq -3765
+bol -3766
+▁Management -3767
+▁peace -3768
+▁wants -3769
+▁demonstr -3770
+▁Who -3771
+ota -3772
+hy -3773
+▁Society -3774
+▁leader -3775
+eters -3776
+ken -3777
+▁remains -3778
+vere -3779
+▁background -3780
+ervation -3781
+▁transfer -3782
+▁mechan -3783
+aby -3784
+▁alone -3785
+▁wild -3786
+Type -3787
+▁apply -3788
+▁void -3789
+▁Camp -3790
+▁vote -3791
+▁brother -3792
+▁sun -3793
+▁cancer -3794
+rey -3795
+▁Econom -3796
+▁Vir -3797
+▁establish -3798
+▁lines -3799
+▁gun -3800
+▁ple -3801
+▁affect -3802
+▁Form -3803
+▁recommend -3804
+uty -3805
+▁vacc -3806
+▁Germany -3807
+▁comfort -3808
+roy -3809
+lor -3810
+▁greater -3811
+UN -3812
+This -3813
+▁Times -3814
+ologies -3815
+▁Sa -3816
+ounds -3817
+▁Thursday -3818
+▁materials -3819
+▁pm -3820
+term -3821
+▁er -3822
+izes -3823
+▁easily -3824
+▁African -3825
+▁psych -3826
+azine -3827
+▁Ham -3828
+▁excell -3829
+imal -3830
+▁certainly -3831
+ulture -3832
+▁opportunities -3833
+▁Step -3834
+▁Tuesday -3835
+▁Women -3836
+▁option -3837
+▁passed -3838
+▁images -3839
+▁Que -3840
+▁college -3841
+▁impl -3842
+▁Wednes -3843
+hens -3844
+▁Florida -3845
+edd -3846
+▁models -3847
+▁Russia -3848
+▁operations -3849
+▁cool -3850
+▁helped -3851
+▁initi -3852
+▁River -3853
+▁aspect -3854
+outhern -3855
+eared -3856
+IR -3857
+▁critical -3858
+▁gold -3859
+▁middle -3860
+mod -3861
+▁planning -3862
+cause -3863
+▁Human -3864
+▁Wind -3865
+?" -3866
+▁Time -3867
+udd -3868
+orage -3869
+▁daily -3870
+▁virt -3871
+za -3872
+▁expert -3873
+esc -3874
+▁normal -3875
+▁produced -3876
+▁save -3877
+▁Today -3878
+▁Union -3879
+▁Play -3880
+▁eval -3881
+▁fine -3882
+▁grant -3883
+▁estim -3884
+irection -3885
+▁parties -3886
+▁central -3887
+box -3888
+▁die -3889
+▁Mo -3890
+atively -3891
+▁Lat -3892
+ano -3893
+▁exactly -3894
+▁Hill -3895
+▁perhaps -3896
+lick -3897
+▁District -3898
+▁Scott -3899
+▁Mex -3900
+▁Hel -3901
+▁talking -3902
+▁Museum -3903
+▁deliver -3904
+ulate -3905
+▁interested -3906
+iring -3907
+iar -3908
+▁eth -3909
+annel -3910
+miss -3911
+▁creating -3912
+well -3913
+▁officials -3914
+▁poor -3915
+▁responsible -3916
+icks -3917
+PS -3918
+▁Rock -3919
+▁weight -3920
+▁spread -3921
+▁annual -3922
+▁benefit -3923
+▁Foundation -3924
+▁Free -3925
+▁consult -3926
+▁Frank -3927
+list -3928
+▁Det -3929
+state -3930
+icon -3931
+▁Twitter -3932
+▁Data -3933
+▁Sum -3934
+▁feet -3935
+▁primary -3936
+ady -3937
+▁Vis -3938
+▁stock -3939
+▁Wednesday -3940
+▁attract -3941
+risis -3942
+▁requirements -3943
+bf -3944
+List -3945
+ato -3946
+▁Cr -3947
+▁Bay -3948
+CA -3949
+▁Bi -3950
+▁ww -3951
+▁unf -3952
+▁? -3953
+▁Award -3954
+▁sy -3955
+▁Peter -3956
+ait -3957
+▁Care -3958
+▁Carol -3959
+▁Thomas -3960
+▁agreement -3961
+▁serve -3962
+▁Richard -3963
+pective -3964
+uration -3965
+▁tim -3966
+type -3967
+▁agree -3968
+▁surface -3969
+igr -3970
+▁sem -3971
+▁Author -3972
+▁calc -3973
+▁www -3974
+▁truth -3975
+▁wood -3976
+▁killed -3977
+levant -3978
+▁income -3979
+osh -3980
+▁artist -3981
+▁reasons -3982
+$. -3983
+itc -3984
+▁Wil -3985
+▁Sk -3986
+▁cru -3987
+▁scene -3988
+▁claims -3989
+▁Pri -3990
+anned -3991
+Res -3992
+yp -3993
+▁immediately -3994
+▁Jes -3995
+message -3996
+▁rates -3997
+▁secret -3998
+▁AP -3999
+▁gradu -4000
+Val -4001
+net -4002
+itely -4003
+▁Information -4004
+▁stuff -4005
+▁chem -4006
+point -4007
+▁experien -4008
+cel -4009
+▁Because -4010
+ructure -4011
+▁obvious -4012
+▁Jul -4013
+▁Mad -4014
+▁Chic -4015
+▁victim -4016
+▁Iran -4017
+▁maybe -4018
+▁overall -4019
+▁restaur -4020
+▁sch -4021
+▁letter -4022
+care -4023
+tained -4024
+▁mission -4025
+▁vehicle -4026
+") -4027
+gl -4028
+▁proced -4029
+▁presented -4030
+▁Academ -4031
+▁existing -4032
+▁German -4033
+▁academ -4034
+▁Stat -4035
+atre -4036
+▁items -4037
+ula -4038
+code -4039
+▁Secret -4040
+▁beg -4041
+$, -4042
+▁Tur -4043
+▁comments -4044
+▁Party -4045
+actions -4046
+▁ur -4047
+▁© -4048
+▁independent -4049
+▁Jose -4050
+ourney -4051
+▁Mer -4052
+▁charge -4053
+log -4054
+He -4055
+▁solutions -4056
+▁therefore -4057
+antly -4058
+Sh -4059
+▁shared -4060
+▁cert -4061
+▁conv -4062
+▁TO -4063
+iny -4064
+▁Aut -4065
+pha -4066
+▁electric -4067
+▁clients -4068
+▁repe -4069
+▁Spe -4070
+▁send -4071
+▁doll -4072
+▁Offic -4073
+▁contain -4074
+RA -4075
+reci -4076
+respond -4077
+▁powerful -4078
+▁Di -4079
+▁trial -4080
+nership -4081
+▁organizations -4082
+▁pressure -4083
+▁Club -4084
+▁pattern -4085
+anda -4086
+▁scen -4087
+▁Prot -4088
+▁commit -4089
+rad -4090
+▁Global -4091
+▁Next -4092
+▁dram -4093
+atever -4094
+ented -4095
+▁couldn -4096
+▁load -4097
+rapy -4098
+▁connection -4099
+▁absol -4100
+▁Week -4101
+mathcal -4102
+▁challenge -4103
+but -4104
+▁Francis -4105
+orthern -4106
+▁download -4107
+BA -4108
+▁mess -4109
+▁propos -4110
+▁surround -4111
+▁Like -4112
+▁^ -4113
+▁sector -4114
+▁Media -4115
+▁Kore -4116
+▁Tre -4117
+▁challenges -4118
+▁limit -4119
+▁opening -4120
+▁string -4121
+▁majority -4122
+▁Publ -4123
+▁reality -4124
+cape -4125
+ashion -4126
+semb -4127
+▁Elect -4128
+▁species -4129
+▁Policy -4130
+ager -4131
+▁Islam -4132
+Cont -4133
+▁amazing -4134
+▁cry -4135
+arden -4136
+▁List -4137
+▁initial -4138
+▁harm -4139
+▁elements -4140
+▁mill -4141
+▁Tor -4142
+▁budget -4143
+▁ahead -4144
+▁violence -4145
+▁micro -4146
+▁il -4147
+▁sports -4148
+▁League -4149
+▁records -4150
+": -4151
+Un -4152
+top -4153
+arant -4154
+▁favorite -4155
+▁dat -4156
+▁strategy -4157
+▁tick -4158
+▁double -4159
+oma -4160
+▁Central -4161
+standing -4162
+▁length -4163
+▁audience -4164
+▁forces -4165
+▁Russian -4166
+▁error -4167
+▁Lab -4168
+▁toward -4169
+▁Nov -4170
+▁tools -4171
+▁funding -4172
+▁rule -4173
+▁floor -4174
+▁notice -4175
+▁fresh -4176
+ocol -4177
+▁Div -4178
+win -4179
+▁Rad -4180
+met -4181
+TS -4182
+▁Er -4183
+antic -4184
+▁customer -4185
+inate -4186
+▁station -4187
+aper -4188
+▁Obama -4189
+eline -4190
+▁figure -4191
+▁laws -4192
+▁Los -4193
+▁advant -4194
+▁highest -4195
+▁Ins -4196
+▁sexual -4197
+▁hour -4198
+TM -4199
+▁sites -4200
+anta -4201
+▁Rev -4202
+▁miles -4203
+▁Americans -4204
+han -4205
+▁spect -4206
+▁ir -4207
+▁% -4208
+race -4209
+aling -4210
+▁Christmas -4211
+▁gone -4212
+▁protection -4213
+iling -4214
+▁despite -4215
+▁Market -4216
+rive -4217
+▁EU -4218
+▁lab -4219
+▁faith -4220
+▁Island -4221
+▁offered -4222
+using -4223
+▁/** -4224
+orial -4225
+▁numer -4226
+▁thus -4227
+ette -4228
+onents -4229
+▁patient -4230
+▁Community -4231
+▁Where -4232
+oms -4233
+▁methods -4234
+▁marketing -4235
+▁reduce -4236
+▁Lead -4237
+▁Gen -4238
+▁regarding -4239
+▁exerc -4240
+riage -4241
+▁satis -4242
+▁equipment -4243
+▁Back -4244
+▁applications -4245
+▁Som -4246
+▁nice -4247
+levision -4248
+▁click -4249
+▁warm -4250
+▁fig -4251
+▁null -4252
+▁artists -4253
+▁continues -4254
+▁Lu -4255
+▁wall -4256
+▁supply -4257
+▁crim -4258
+gar -4259
+iled -4260
+▁Open -4261
+imb -4262
+▁picture -4263
+▁danger -4264
+▁football -4265
+▁Hot -4266
+▁bound -4267
+▁controll -4268
+▁conflic -4269
+▁songs -4270
+date -4271
+fo -4272
+▁straight -4273
+▁administration -4274
+▁Brown -4275
+▁» -4276
+▁jobs -4277
+▁previously -4278
+▁launch -4279
+▁har -4280
+▁Fund -4281
+▁lands -4282
+mary -4283
+▁sequ -4284
+▁Micro -4285
+ternal -4286
+▁larger -4287
+▁element -4288
+▁Another -4289
+▁Chicago -4290
+▁Western -4291
+uclear -4292
+▁join -4293
+yer -4294
+usband -4295
+▁feeling -4296
+▁Earth -4297
+known -4298
+ga -4299
+PA -4300
+▁multi -4301
+▁predict -4302
+clusion -4303
+▁Thanks -4304
+▁anc -4305
+▁setting -4306
+▁cultural -4307
+▁analy -4308
+▁produce -4309
+▁wat -4310
+▁disapp -4311
+▁CD -4312
+works -4313
+▁pet -4314
+▁leadership -4315
+▁showed -4316
+▁ign -4317
+estic -4318
+/> -4319
+▁Sil -4320
+▁Yes -4321
+orney -4322
+▁Fre -4323
+▁biggest -4324
+▁Web -4325
+▁Their -4326
+erences -4327
+ре -4328
+▁laun -4329
+▁architect -4330
+eds -4331
+▁Ep -4332
+▁Mass -4333
+ta -4334
+▁basic -4335
+▁Wood -4336
+▁** -4337
+▁poll -4338
+igration -4339
+▁Security -4340
+▁partner -4341
+▁enh -4342
+SS -4343
+▁insurance -4344
+▁Invest -4345
+edia -4346
+▁Vict -4347
+▁spot -4348
+▁arrest -4349
+▁Once -4350
+▁Virgin -4351
+osure -4352
+го -4353
+▁Mont -4354
+apers -4355
+▁opened -4356
+▁vs -4357
+▁Canad -4358
+▁Engine -4359
+with -4360
+rowd -4361
+▁goals -4362
+▁Ma -4363
+▁Nor -4364
+▁sustain -4365
+▁proposed -4366
+▁sources -4367
+▁wish -4368
+▁Tim -4369
+▁unit -4370
+▁kept -4371
+▁Family -4372
+▁generally -4373
+▁train -4374
+orter -4375
+▁View -4376
+entially -4377
+▁actual -4378
+▁Love -4379
+▁End -4380
+▁direction -4381
+▁actions -4382
+gency -4383
+▁pages -4384
+▁becomes -4385
+▁accur -4386
+▁Bur -4387
+ania -4388
+osis -4389
+asion -4390
+▁fit -4391
+gu -4392
+▁daughter -4393
+▁map -4394
+cite -4395
+▁mobile -4396
+▁Jim -4397
+▁cat -4398
+▁Down -4399
+▁express -4400
+▁factors -4401
+elligence -4402
+▁calls -4403
+▁appropriate -4404
+MS -4405
+▁wait -4406
+▁Special -4407
+▁brain -4408
+inct -4409
+▁environmental -4410
+▁Show -4411
+▁|| -4412
+▁Lord -4413
+▁meant -4414
+▁attend -4415
+azon -4416
+▁Mel -4417
+▁films -4418
+inger -4419
+fficient -4420
+▁guid -4421
+fr -4422
+▁sale -4423
+▁completed -4424
+▁presence -4425
+▁Hen -4426
+▁north -4427
+▁trip -4428
+ca -4429
+lo -4430
+▁policies -4431
+▁advert -4432
+ili -4433
+▁trend -4434
+▁Bet -4435
+▁Priv -4436
+Sc -4437
+▁raised -4438
+eless -4439
+▁Each -4440
+▁Joe -4441
+itors -4442
+▁religious -4443
+path -4444
+ini -4445
+▁susp -4446
+▁eye -4447
+▁forms -4448
+includ -4449
+▁influence -4450
+▁fram -4451
+▁whom -4452
+▁Pet -4453
+▁Louis -4454
+▁magn -4455
+rd -4456
+▁standards -4457
+ura -4458
+▁Atl -4459
+▁Arts -4460
+▁push -4461
+▁Design -4462
+▁clearly -4463
+▁appears -4464
+▁nom -4465
+BS -4466
+▁approx -4467
+▁rat -4468
+▁Technology -4469
+▁param -4470
+hips -4471
+▁Cath -4472
+▁terror -4473
+▁caused -4474
+▁Lear -4475
+▁views -4476
+▁Apple -4477
+andom -4478
+▁restrict -4479
+▁van -4480
+▁seeing -4481
+ii -4482
+usion -4483
+▁Bo -4484
+▁counter -4485
+▁Series -4486
+▁capacity -4487
+▁otherwise -4488
+ena -4489
+orrow -4490
+uled -4491
+▁compar -4492
+,\ -4493
+▁Water -4494
+▁prison -4495
+▁Posted -4496
+OM -4497
+▁relevant -4498
+▁joined -4499
+▁Arab -4500
+▁famous -4501
+▁Long -4502
+▁smart -4503
+▁appreci -4504
+▁learned -4505
+▁rob -4506
+▁RE -4507
+▁senior -4508
+inner -4509
+▁rare -4510
+▁client -4511
+▁profession -4512
+agram -4513
+oses -4514
+▁earth -4515
+▁operation -4516
+itled -4517
+▁measures -4518
+▁devices -4519
+▁master -4520
+▁Town -4521
+▁Conn -4522
+▁pros -4523
+▁rap -4524
+▁Chief -4525
+unte -4526
+**** -4527
+▁truly -4528
+As -4529
+▁Oper -4530
+▁assum -4531
+prof -4532
+ruit -4533
+▁theory -4534
+▁citizens -4535
+ivery -4536
+▁developing -4537
+▁Power -4538
+▁Internet -4539
+param -4540
+reek -4541
+▁wouldn -4542
+gment -4543
+rig -4544
+To -4545
+▁instr -4546
+▁entertain -4547
+▁residents -4548
+gn -4549
+arrow -4550
+▁valid -4551
+▁Ref -4552
+▁reached -4553
+going -4554
+▁conference -4555
+▁Whe -4556
+▁aren -4557
+asy -4558
+▁flow -4559
+overy -4560
+▁insp -4561
+uit -4562
+▁Martin -4563
+▁Land -4564
+▁CEO -4565
+cell -4566
+▁Follow -4567
+▁cities -4568
+ework -4569
+▁solid -4570
+▁thousands -4571
+active -4572
+roud -4573
+▁Writ -4574
+▁loved -4575
+▁instance -4576
+▁trou -4577
+▁blue -4578
+▁funds -4579
+▁entry -4580
+CE -4581
+▁Grand -4582
+▁mentioned -4583
+abilities -4584
+▁seemed -4585
+▁unc -4586
+▁weap -4587
+▁writer -4588
+▁husband -4589
+▁south -4590
+▁appeared -4591
+▁spend -4592
+▁defend -4593
+▁radio -4594
+▁Nav -4595
+och -4596
+andemic -4597
+▁damage -4598
+▁helps -4599
+uries -4600
+Col -4601
+dis -4602
+reme -4603
+"/> -4604
+▁proceed -4605
+▁performed -4606
+AG -4607
+▁Head -4608
+yan -4609
+data -4610
+ario -4611
+FL -4612
+▁Second -4613
+}. -4614
+)( -4615
+▁phil -4616
+▁rank -4617
+▁closed -4618
+road -4619
+▁generation -4620
+OC -4621
+▁strength -4622
+▁occup -4623
+pping -4624
+▁requires -4625
+▁Rights -4626
+▁Police -4627
+▁skin -4628
+▁Polit -4629
+▁female -4630
+▁Angeles -4631
+ita -4632
+▁memory -4633
+ala -4634
+▁fan -4635
+▁и -4636
+px -4637
+acing -4638
+life -4639
+sm -4640
+▁decades -4641
+iat -4642
+▁competition -4643
+▁reference -4644
+▁Sar -4645
+▁crisis -4646
+▁excellent -4647
+▁posted -4648
+▁Den -4649
+▁brief -4650
+▁Look -4651
+writ -4652
+▁minor -4653
+▁knows -4654
+▁district -4655
+▁purchase -4656
+lig -4657
+▁Aff -4658
+▁mist -4659
+▁returned -4660
+▁journey -4661
+▁rise -4662
+comm -4663
+clusive -4664
+▁correspond -4665
+osoph -4666
+▁Mount -4667
+SP -4668
+asure -4669
+▁prices -4670
+▁photo -4671
+sum -4672
+▁agency -4673
+enses -4674
+▁defined -4675
+▁о -4676
+▁scale -4677
+ho -4678
+▁Organ -4679
+▁offering -4680
+▁officer -4681
+▁AL -4682
+▁experiences -4683
+▁Enter -4684
+▁Johnson -4685
+▁becoming -4686
+▁signed -4687
+Is -4688
+elta -4689
+▁researc -4690
+▁resist -4691
+▁transform -4692
+▁Chris -4693
+▁behavior -4694
+bour -4695
+▁advice -4696
+▁Find -4697
+▁fant -4698
+▁definitely -4699
+▁chief -4700
+▁Medical -4701
+▁adapt -4702
+▁indic -4703
+look -4704
+▁NY -4705
+▁manager -4706
+▁Main -4707
+New -4708
+▁instit -4709
+▁essential -4710
+▁opinion -4711
+▁depend -4712
+icit -4713
+▁thank -4714
+▁cop -4715
+▁slight -4716
+olars -4717
+▁mental -4718
+▁Old -4719
+▁sleep -4720
+▁failed -4721
+▁steps -4722
+abled -4723
+equation -4724
+▁distribution -4725
+▁older -4726
+▁episode -4727
+▁wid -4728
+ifically -4729
+ogue -4730
+anging -4731
+▁hair -4732
+▁machine -4733
+▁Federal -4734
+▁cash -4735
+▁battle -4736
+▁testing -4737
+▁incred -4738
+▁helping -4739
+▁Cle -4740
+▁fol -4741
+▁Professor -4742
+iques -4743
+▁situ -4744
+cher -4745
+▁guy -4746
+▁Mike -4747
+▁input -4748
+▁fra -4749
+▁ID -4750
+▁practices -4751
+alled -4752
+▁determine -4753
+IF -4754
+▁somew -4755
+}) -4756
+▁Ill -4757
+▁freedom -4758
+▁Hy -4759
+▁amb -4760
+▁score -4761
+emporary -4762
+▁graph -4763
+▁ended -4764
+rastructure -4765
+▁Person -4766
+dro -4767
+odies -4768
+▁circum -4769
+▁quant -4770
+▁interpre -4771
+azz -4772
+▁labor -4773
+▁vision -4774
+▁noted -4775
+▁Prov -4776
+▁Ire -4777
+SA -4778
+SE -4779
+▁Lake -4780
+▁selected -4781
+▁bul -4782
+▁Jeff -4783
+▁catch -4784
+▁mix -4785
+▁heavy -4786
+▁Build -4787
+▁spl -4788
+▁tend -4789
+▁homes -4790
+▁Asia -4791
+▁err -4792
+}( -4793
+▁decisions -4794
+▁rich -4795
+▁Team -4796
+▁Both -4797
+▁Photo -4798
+▁ship -4799
+word -4800
+aste -4801
+▁Cy -4802
+▁drink -4803
+▁thanks -4804
+▁IT -4805
+▁replace -4806
+hab -4807
+▁fell -4808
+▁weekend -4809
+▁Lee -4810
+▁owner -4811
+▁focused -4812
+▁executive -4813
+▁Test -4814
+▁Justice -4815
+▁Paris -4816
+▁finding -4817
+etry -4818
+▁discussion -4819
+▁eventually -4820
+nergy -4821
+▁flu -4822
+▁prefer -4823
+▁neighbor -4824
+▁throw -4825
+▁guide -4826
+▁placed -4827
+▁illust -4828
+▁AND -4829
+▁Charles -4830
+▁appeal -4831
+▁street -4832
+▁Georg -4833
+▁liber -4834
+iden -4835
+▁Sand -4836
+soft -4837
+▁Jesus -4838
+oving -4839
+▁stated -4840
+▁Section -4841
+atically -4842
+▁bur -4843
+▁dynam -4844
+▁crime -4845
+▁driving -4846
+ри -4847
+▁operating -4848
+▁covered -4849
+▁crowd -4850
+ella -4851
+eration -4852
+▁Bob -4853
+lies -4854
+▁communication -4855
+▁runs -4856
+And -4857
+poses -4858
+▁politics -4859
+hi -4860
+gs -4861
+mm -4862
+▁lived -4863
+▁concent -4864
+▁Its -4865
+burg -4866
+▁department -4867
+▁Australian -4868
+pected -4869
+isher -4870
+▁Inf -4871
+icians -4872
+▁experienced -4873
+PR -4874
+▁sea -4875
+▁contains -4876
+▁Centre -4877
+An -4878
+▁Use -4879
+▁technical -4880
+▁task -4881
+Re -4882
+▁core -4883
+igan -4884
+▁Jewish -4885
+ва -4886
+▁passion -4887
+iency -4888
+▁Exp -4889
+▁Administ -4890
+OW -4891
+othing -4892
+▁/* -4893
+▁concerns -4894
+ani -4895
+under -4896
+▁authority -4897
+sk -4898
+II -4899
+▁window -4900
+▁editor -4901
+night -4902
+▁investigation -4903
+http -4904
+▁cas -4905
+▁extremely -4906
+▁separate -4907
+▁incor -4908
+▁McC -4909
+▁cars -4910
+nel -4911
+▁justice -4912
+OD -4913
+▁afford -4914
+▁creative -4915
+▁supported -4916
+▁rent -4917
+▁lock -4918
+▁facilities -4919
+▁teaching -4920
+CT -4921
+igma -4922
+▁prote -4923
+▁mut -4924
+▁Mot -4925
+▁dim -4926
+▁monitor -4927
+▁Adv -4928
+▁showing -4929
+▁Jon -4930
+▁whatever -4931
+▁nor -4932
+▁leaving -4933
+oes -4934
+▁diagn -4935
+idden -4936
+▁Corpor -4937
+▁Canadian -4938
+▁properties -4939
+▁expression -4940
+▁disp -4941
+▁onto -4942
+ias -4943
+uthors -4944
+▁Game -4945
+▁documents -4946
+liament -4947
+▁plus -4948
+char -4949
+▁committed -4950
+▁border -4951
+ipe -4952
+cean -4953
+▁partnership -4954
+osen -4955
+▁Royal -4956
+▁arm -4957
+▁animals -4958
+▁beat -4959
+▁remind -4960
+▁cold -4961
+print -4962
+rate -4963
+▁internal -4964
+▁historical -4965
+▁heat -4966
+▁Food -4967
+oration -4968
+▁Roman -4969
+▁youth -4970
+../../ -4971
+▁CH -4972
+▁distance -4973
+▁interests -4974
+▁issued -4975
+gers -4976
+iced -4977
+▁Dou -4978
+▁articles -4979
+▁Mur -4980
+▁Middle -4981
+▁glass -4982
+▁follows -4983
+▁Young -4984
+▁construct -4985
+}^ -4986
+▁adjust -4987
+CL -4988
+▁nav -4989
+arsh -4990
+▁Muslim -4991
+▁useful -4992
+▁consequ -4993
+bed -4994
+hand -4995
+▁criminal -4996
+inem -4997
+ortunately -4998
+dule -4999
+ban -5000
+▁arrang -5001
+OP -5002
+▁married -5003
+▁thr -5004
+▁Stand -5005
+▁Ver -5006
+▁mur -5007
+▁determined -5008
+▁Saf -5009
+tered -5010
+aren -5011
+▁identify -5012
+▁Indust -5013
+cap -5014
+▁Thank -5015
+▁command -5016
+ли -5017
+▁Cup -5018
+▁recorded -5019
+▁introduced -5020
+▁visual -5021
+▁markets -5022
+No -5023
+▁managed -5024
+▁hero -5025
+urban -5026
+If -5027
+ultural -5028
+▁happens -5029
+▁storage -5030
+▁Tour -5031
+▁defense -5032
+▁Amazon -5033
+cious -5034
+.’ -5035
+zen -5036
+lt -5037
+▁Mexico -5038
+▁Fire -5039
+ingly -5040
+▁Ira -5041
+oken -5042
+▁hearing -5043
+▁critic -5044
+▁identified -5045
+▁guess -5046
+▁mostly -5047
+irmed -5048
+▁bottom -5049
+xy -5050
+]; -5051
+▁forced -5052
+▁Ireland -5053
+▁Andrew -5054
+otic -5055
+value -5056
+▁tro -5057
+View -5058
+▁pandemic -5059
+▁univers -5060
+▁Japanese -5061
+ishes -5062
+▁advantage -5063
+▁Exper -5064
+related -5065
+то -5066
+▁haven -5067
+▁numerous -5068
+~\ -5069
+▁plays -5070
+ffect -5071
+▁internet -5072
+▁fuel -5073
+▁Wall -5074
+Tr -5075
+▁Pan -5076
+friend -5077
+▁ban -5078
+▁Library -5079
+▁coach -5080
+lock -5081
+phone -5082
+▁doubt -5083
+▁stress -5084
+▁Before -5085
+▁girls -5086
+"); -5087
+▁update -5088
+TH -5089
+▁Sup -5090
+▁photos -5091
+▁importance -5092
+▁launched -5093
+MA -5094
+▁purposes -5095
+▁Senate -5096
+▁Way -5097
+ship -5098
+}_ -5099
+▁applied -5100
+▁Virginia -5101
+▁clinical -5102
+▁Id -5103
+phas -5104
+▁Jones -5105
+▁sam -5106
+▁chair -5107
+▁instruct -5108
+▁traffic -5109
+▁Du -5110
+▁Victor -5111
+empl -5112
+▁television -5113
+▁retail -5114
+▁finished -5115
+opping -5116
+') -5117
+▁guys -5118
+inations -5119
+▁Aud -5120
+▁breat -5121
+sembly -5122
+▁speech -5123
+▁Ter -5124
+▁delivery -5125
+rieved -5126
+▁watching -5127
+▁apparent -5128
+▁advoc -5129
+allery -5130
+▁witness -5131
+▁burn -5132
+▁identity -5133
+▁hab -5134
+translation -5135
+▁Festival -5136
+▁weather -5137
+oted -5138
+▁survey -5139
+▁cloud -5140
+racy -5141
+▁sets -5142
+▁Daniel -5143
+▁Call -5144
+▁consid -5145
+utely -5146
+ideos -5147
+▁dedicated -5148
+okes -5149
+itutional -5150
+▁Men -5151
+▁Joseph -5152
+kins -5153
+item -5154
+ка -5155
+▁officers -5156
+▁explore -5157
+▁hotel -5158
+eah -5159
+▁Film -5160
+▁changing -5161
+▁column -5162
+ading -5163
+iner -5164
+▁Through -5165
+▁static -5166
+illa -5167
+▁bra -5168
+▁Cast -5169
+ser -5170
+▁Watch -5171
+▁alle -5172
+▁PC -5173
+rison -5174
+▁ec -5175
+▁Energy -5176
+nam -5177
+AB -5178
+ribe -5179
+Get -5180
+▁advance -5181
+va -5182
+while -5183
+EW -5184
+ucky -5185
+olar -5186
+ua -5187
+▁listen -5188
+string -5189
+inating -5190
+rics -5191
+▁Secretary -5192
+▁fif -5193
+▁Academy -5194
+▁institutions -5195
+▁proud -5196
+▁partners -5197
+▁gar -5198
+▁prepar -5199
+▁healthy -5200
+rier -5201
+▁prepared -5202
+▁session -5203
+▁Iraq -5204
+▁pun -5205
+▁Real -5206
+▁Valley -5207
+▁Network -5208
+stein -5209
+▁reli -5210
+▁Boy -5211
+▁driver -5212
+▁notes -5213
+alian -5214
+▁pp -5215
+▁row -5216
+ses -5217
+▁reb -5218
+MP -5219
+▁maintain -5220
+What -5221
+▁alternative -5222
+▁movies -5223
+Data -5224
+▁carried -5225
+ixt -5226
+Ind -5227
+▁connected -5228
+▁university -5229
+▁Haw -5230
+▁manufacture -5231
+mail -5232
+▁processes -5233
+perty -5234
+▁Bas -5235
+Ob -5236
+▁smaller -5237
+▁links -5238
+▁camera -5239
+oves -5240
+▁Gal -5241
+▁Gra -5242
+▁Sports -5243
+rehens -5244
+▁responsibility -5245
+▁balance -5246
+▁Pac -5247
+umer -5248
+host -5249
+alpha -5250
+▁Penn -5251
+rid -5252
+atures -5253
+▁Army -5254
+▁Note -5255
+▁wonderful -5256
+!” -5257
+how -5258
+▁guarant -5259
+ен -5260
+▁files -5261
+▁Ok -5262
+LC -5263
+ns -5264
+▁Dev -5265
+▁nut -5266
+▁на -5267
+▁narr -5268
+▁dig -5269
+▁easier -5270
+▁Blue -5271
+▁Coll -5272
+▁Further -5273
+▁agreed -5274
+https -5275
+ulty -5276
+▁coast -5277
+▁output -5278
+▁unless -5279
+ears -5280
+▁corporate -5281
+▁Environment -5282
+rum -5283
+▁fashion -5284
+▁tree -5285
+oti -5286
+▁nuclear -5287
+▁temperature -5288
+▁injury -5289
+▁selection -5290
+▁config -5291
+">< -5292
+▁housing -5293
+▁drop -5294
+▁stars -5295
+abase -5296
+itchen -5297
+▁shut -5298
+ups -5299
+ipped -5300
+ensions -5301
+▁failure -5302
+arks -5303
+▁pieces -5304
+▁Charl -5305
+▁brings -5306
+igen -5307
+▁classes -5308
+▁negative -5309
+▁winning -5310
+▁evening -5311
+▁Children -5312
+▁Ken -5313
+oston -5314
+asks -5315
+▁array -5316
+▁decre -5317
+▁circumst -5318
+iler -5319
+▁announce -5320
+▁volunte -5321
+▁experts -5322
+For -5323
+ooth -5324
+▁kid -5325
+▁Studies -5326
+owl -5327
+▁joint -5328
+▁Phot -5329
+▁Max -5330
+▁Pak -5331
+▁util -5332
+▁rapid -5333
+▁adding -5334
+▁sounds -5335
+▁relationships -5336
+▁Check -5337
+▁academic -5338
+▁nine -5339
+▁enforce -5340
+ola -5341
+ora -5342
+▁fourth -5343
+▁willing -5344
+▁units -5345
+ffee -5346
+▁sport -5347
+▁exchange -5348
+bi -5349
+▁Uk -5350
+ervice -5351
+achel -5352
+secut -5353
+Se -5354
+▁distribut -5355
+rence -5356
+stream -5357
+▁waiting -5358
+ambda -5359
+cript -5360
+alle -5361
+▁revealed -5362
+▁д -5363
+▁centre -5364
+DF -5365
+ograp -5366
+▁classic -5367
+▁Ro -5368
+bur -5369
+▁Only -5370
+ployment -5371
+▁index -5372
+▁herself -5373
+▁Lim -5374
+▁Those -5375
+▁calling -5376
+▁estate -5377
+ingu -5378
+▁raise -5379
+▁authorities -5380
+▁kill -5381
+agged -5382
+▁functions -5383
+▁explain -5384
+▁Hor -5385
+▁eat -5386
+▁belief -5387
+lymp -5388
+amma -5389
+comes -5390
+▁Ell -5391
+▁&& -5392
+▁payment -5393
+▁Color -5394
+▁Dom -5395
+unning -5396
+amm -5397
+▁buildings -5398
+▁label -5399
+та -5400
+▁String -5401
+▁Wild -5402
+▁shape -5403
+ansion -5404
+▁evalu -5405
+▁transl -5406
+ón -5407
+▁marriage -5408
+den -5409
+▁Nig -5410
+▁advent -5411
+▁spending -5412
+▁intended -5413
+▁CA -5414
+ensity -5415
+Al -5416
+▁dial -5417
+idential -5418
+▁Wal -5419
+▁reform -5420
+bo -5421
+▁shares -5422
+which -5423
+▁item -5424
+▁advanced -5425
+▁debt -5426
+▁Master -5427
+iples -5428
+▁equal -5429
+▁factor -5430
+▁supposed -5431
+Value -5432
+▁Ext -5433
+obe -5434
+▁leads -5435
+rical -5436
+▁secure -5437
+▁Equ -5438
+astern -5439
+▁· -5440
+▁metal -5441
+▁ice -5442
+fol -5443
+gery -5444
+▁Card -5445
+▁infrastructure -5446
+agement -5447
+ancer -5448
+▁Williams -5449
+Le -5450
+▁gain -5451
+▁musical -5452
+.' -5453
+▁manner -5454
+ui -5455
+▁missing -5456
+▁tells -5457
+aks -5458
+▁coord -5459
+▁Despite -5460
+▁IP -5461
+pace -5462
+▁Catholic -5463
+▁assert -5464
+▁Tri -5465
+▁male -5466
+▁inspir -5467
+▁Chair -5468
+But -5469
+▁Italy -5470
+▁Ltd -5471
+rc -5472
+▁volume -5473
+▁supporting -5474
+▁facility -5475
+▁destroy -5476
+abin -5477
+▁Champions -5478
+▁grad -5479
+Pl -5480
+▁affected -5481
+rant -5482
+▁tele -5483
+elines -5484
+▁starts -5485
+▁enem -5486
+▁Hospital -5487
+▁mort -5488
+▁debut -5489
+▁sched -5490
+▁approximately -5491
+▁fields -5492
+▁san -5493
+▁Lo -5494
+▁Trust -5495
+agon -5496
+▁Conference -5497
+▁ment -5498
+▁Online -5499
+▁regul -5500
+▁explained -5501
+▁Gar -5502
+▁discl -5503
+xi -5504
+▁Die -5505
+▁shop -5506
+▁ing -5507
+▁scientific -5508
+cknow -5509
+▁holds -5510
+ushed -5511
+▁lit -5512
+▁Walk -5513
+GB -5514
+▁protected -5515
+▁keeping -5516
+▁Set -5517
+’. -5518
+▁obtained -5519
+▁conclud -5520
+▁belong -5521
+▁depart -5522
+itage -5523
+▁Van -5524
+.; -5525
+west -5526
+ossible -5527
+▁combination -5528
+Im -5529
+▁Have -5530
+▁concerned -5531
+▁manage -5532
+ECT -5533
+▁Cre -5534
+▁Republican -5535
+▁\\ -5536
+▁sharing -5537
+▁approved -5538
+▁conflict -5539
+▁baby -5540
+▁Sign -5541
+▁Jun -5542
+▁Henry -5543
+▁researchers -5544
+orters -5545
+▁holding -5546
+▁teacher -5547
+isters -5548
+There -5549
+lywood -5550
+▁conducted -5551
+▁coverage -5552
+▁matters -5553
+▁Hon -5554
+▁lose -5555
+anish -5556
+▁attacks -5557
+▁plants -5558
+ih -5559
+▁org -5560
+▁Britain -5561
+▁lots -5562
+▁feels -5563
+▁Executive -5564
+▁vehicles -5565
+▁ton -5566
+▁listed -5567
+▁underst -5568
+▁spring -5569
+▁remove -5570
+▁Four -5571
+▁Fort -5572
+▁resol -5573
+link -5574
+▁apart -5575
+▁massive -5576
+▁Mic -5577
+▁Boston -5578
+amin -5579
+▁none -5580
+Man -5581
+gypt -5582
+▁Tay -5583
+ль -5584
+▁Steve -5585
+▁Carolina -5586
+▁Three -5587
+▁significantly -5588
+▁wealth -5589
+▁processing -5590
+HS -5591
+▁commission -5592
+▁teachers -5593
+ographic -5594
+▁pair -5595
+▁Kar -5596
+▁Iss -5597
+ocracy -5598
+▁Space -5599
+oster -5600
+▁regional -5601
+▁Liber -5602
+▁jump -5603
+ла -5604
+ilit -5605
+▁pod -5606
+▁Awards -5607
+can -5608
+▁satisf -5609
+▁minimum -5610
+▁Pop -5611
+▁honest -5612
+▁reject -5613
+==== -5614
+▁emergency -5615
+▁exercise -5616
+▁($ -5617
+▁argument -5618
+▁phase -5619
+eller -5620
+icial -5621
+df -5622
+▁forest -5623
+▁Again -5624
+olit -5625
+TER -5626
+▁Scot -5627
+▁Columb -5628
+▁Olymp -5629
+▁Supreme -5630
+legal -5631
+▁familiar -5632
+▁Retrieved -5633
+▁perspective -5634
+▁vac -5635
+elled -5636
+▁motor -5637
+▁allowing -5638
+▁handle -5639
+▁greatest -5640
+▁Support -5641
+▁island -5642
+omy -5643
+▁Civil -5644
+▁AR -5645
+size -5646
+SC -5647
+▁discovered -5648
+▁Live -5649
+▁registered -5650
+▁Govern -5651
+._ -5652
+pat -5653
+▁bond -5654
+▁dance -5655
+aid -5656
+▁panel -5657
+▁emphas -5658
+fe -5659
+▁dry -5660
+▁welcome -5661
+▁Ari -5662
+▁package -5663
+iana -5664
+law -5665
+)\ -5666
+▁technologies -5667
+TR -5668
+▁bringing -5669
+ressive -5670
+▁/// -5671
+▁pictures -5672
+▁Donald -5673
+▁conversation -5674
+iveness -5675
+icious -5676
+▁default -5677
+▁Squ -5678
+▁draft -5679
+▁sac -5680
+▁Pakistan -5681
+ipping -5682
+▁discover -5683
+rix -5684
+▁specifically -5685
+▁subsc -5686
+▁vent -5687
+dden -5688
+▁substant -5689
+All -5690
+▁bird -5691
+cles -5692
+▁efficient -5693
+▁pool -5694
+▁Product -5695
+▁doctor -5696
+▁pil -5697
+free -5698
+▁batter -5699
+▁gender -5700
+▁hyp -5701
+otes -5702
+iverse -5703
+▁domestic -5704
+▁Log -5705
+▁Ap -5706
+▁Kingdom -5707
+▁carbon -5708
+ker -5709
+▁abuse -5710
+’, -5711
+const -5712
+▁aspects -5713
+gor -5714
+▁Copy -5715
+▁committee -5716
+enced -5717
+requ -5718
+▁owners -5719
+andon -5720
+▁Start -5721
+▁tests -5722
+ione -5723
+▁edge -5724
+▁Far -5725
+ATION -5726
+uster -5727
+▁resource -5728
+▁agent -5729
+▁administr -5730
+▁optim -5731
+▁era -5732
+▁flight -5733
+ompl -5734
+ivered -5735
+▁inn -5736
+▁charges -5737
+▁Francisco -5738
+▁Code -5739
+▁asking -5740
+▁fish -5741
+▁doct -5742
+▁Jac -5743
+▁Kim -5744
+rite -5745
+▁fle -5746
+▁divid -5747
+ogen -5748
+▁reduced -5749
+▁industrial -5750
+width -5751
+▁Lar -5752
+hent -5753
+prene -5754
+inton -5755
+coh -5756
+▁Thus -5757
+▁debate -5758
+▁putting -5759
+▁combined -5760
+AA -5761
+kes -5762
+▁Po -5763
+▁cere -5764
+▁Spring -5765
+($ -5766
+▁murder -5767
+sen -5768
+atus -5769
+▁Force -5770
+▁tradition -5771
+▁decade -5772
+▁Eth -5773
+▁Phys -5774
+▁subs -5775
+lines -5776
+ba -5777
+▁circumstances -5778
+▁AD -5779
+arp -5780
+▁maximum -5781
+iser -5782
+▁Jackson -5783
+▁height -5784
+▁alleged -5785
+aked -5786
+reprene -5787
+arian -5788
+▁excited -5789
+▁commitment -5790
+▁Egypt -5791
+nic -5792
+mond -5793
+rees -5794
+▁village -5795
+▁sudden -5796
+▁ancient -5797
+table -5798
+▁random -5799
+ки -5800
+build -5801
+▁soul -5802
+BI -5803
+irty -5804
+▁creation -5805
+elly -5806
+mosp -5807
+▁finish -5808
+▁removed -5809
+▁animal -5810
+▁Southern -5811
+▁winter -5812
+▁severe -5813
+▁chemical -5814
+те -5815
+By -5816
+greg -5817
+▁Ha -5818
+▁Beach -5819
+On -5820
+pload -5821
+▁paint -5822
+▁format -5823
+▁hundreds -5824
+hetic -5825
+▁acknow -5826
+mes -5827
+▁thoughts -5828
+▁Microsoft -5829
+▁renew -5830
+ran -5831
+▁Make -5832
+▁treated -5833
+▁rain -5834
+Class -5835
+▁mechanism -5836
+▁candidate -5837
+▁rom -5838
+▁mere -5839
+▁ran -5840
+▁agencies -5841
+▁serving -5842
+▁grew -5843
+▁fat -5844
+▁fix -5845
+▁Digital -5846
+▁{\ -5847
+orders -5848
+▁rail -5849
+▁formed -5850
+▁fort -5851
+▁craft -5852
+▁da -5853
+▁believed -5854
+iments -5855
+▁rid -5856
+▁seeking -5857
+▁pal -5858
+▁authors -5859
+▁au -5860
+▁по -5861
+=\ -5862
+test -5863
+irms -5864
+▁king -5865
+index -5866
+▁readers -5867
+▁Division -5868
+▁corner -5869
+etime -5870
+▁filed -5871
+▁Light -5872
+▁fighting -5873
+▁Expl -5874
+▁weak -5875
+▁bath -5876
+▁highlight -5877
+▁kick -5878
+ocument -5879
+ician -5880
+▁ast -5881
+▁Medicine -5882
+▁accident -5883
+▁Yet -5884
+▁DC -5885
+▁cooper -5886
+▁Instead -5887
+▁}, -5888
+▁leaves -5889
+▁waste -5890
+▁Access -5891
+▁theme -5892
+▁Non -5893
+▁revenue -5894
+olec -5895
+ipal -5896
+▁observed -5897
+▁Georgia -5898
+js -5899
+▁reviews -5900
+▁participants -5901
+▁Take -5902
+▁Sus -5903
+▁ath -5904
+Exception -5905
+▁script -5906
+▁entirely -5907
+▁caught -5908
+parent -5909
+▁standing -5910
+EP -5911
+▁involve -5912
+athan -5913
+____ -5914
+▁promote -5915
+▁edition -5916
+▁confirmed -5917
+▁arrived -5918
+anche -5919
+▁Night -5920
+▁adop -5921
+▁sympt -5922
+▁detect -5923
+razil -5924
+és -5925
+▁Full -5926
+▁slightly -5927
+▁decide -5928
+real -5929
+▁Ol -5930
+▁adults -5931
+▁library -5932
+▁les -5933
+▁Games -5934
+power -5935
+▁incorpor -5936
+aser -5937
+oles -5938
+▁crew -5939
+prise -5940
+▁Right -5941
+▁worse -5942
+▁symbol -5943
+ipl -5944
+▁begins -5945
+▁defe -5946
+▁flex -5947
+)); -5948
+▁employment -5949
+rich -5950
+▁intelligence -5951
+▁drugs -5952
+▁territ -5953
+▁assistance -5954
+▁ON -5955
+▁flat -5956
+cd -5957
+uing -5958
+onav -5959
+Be -5960
+▁transition -5961
+ador -5962
+osophy -5963
+▁accompl -5964
+pack -5965
+▁Win -5966
+▁inspired -5967
+hop -5968
+▁AT -5969
+▁relatively -5970
+▁syn -5971
+▁Pay -5972
+ircraft -5973
+▁differences -5974
+ghan -5975
+▁indeed -5976
+king -5977
+▁Est -5978
+▁assets -5979
+eler -5980
+▁newsp -5981
+fra -5982
+▁concert -5983
+▁plenty -5984
+▁square -5985
+▁spons -5986
+▁veter -5987
+▁subsequ -5988
+▁filled -5989
+▁Italian -5990
+▁dollars -5991
+▁ideal -5992
+apping -5993
+▁consumer -5994
+▁Democratic -5995
+▁forget -5996
+sl -5997
+▁Article -5998
+▁returns -5999
+osc -6000
+▁Little -6001
+▁diss -6002
+▁examples -6003
+pty -6004
+content -6005
+▁absolutely -6006
+CR -6007
+▁Korea -6008
+kin -6009
+▁Windows -6010
+▁category -6011
+▁Fair -6012
+▁Term -6013
+▁aid -6014
+▁Palest -6015
+▁Hom -6016
+iano -6017
+ati -6018
+▁server -6019
+▁Does -6020
+tes -6021
+po -6022
+▁km -6023
+ellect -6024
+▁obtain -6025
+▁Pass -6026
+othes -6027
+▁accounts -6028
+▁Matt -6029
+▁vul -6030
+▁Story -6031
+▁PR -6032
+oral -6033
+▁Previous -6034
+arts -6035
+eps -6036
+person -6037
+▁Did -6038
+▁Irish -6039
+▁Mid -6040
+▁positions -6041
+▁seat -6042
+mathbb -6043
+otal -6044
+▁Kenn -6045
+▁sand -6046
+▁« -6047
+▁regulations -6048
+▁solar -6049
+▁uns -6050
+ulf -6051
+▁motion -6052
+Int -6053
+▁Spanish -6054
+▁gather -6055
+▁figures -6056
+abeth -6057
+war -6058
+▁statements -6059
+stra -6060
+▁techniques -6061
+pire -6062
+▁exciting -6063
+▁Video -6064
+EF -6065
+▁tom -6066
+EST -6067
+essions -6068
+▁Creat -6069
+upp -6070
+▁Fed -6071
+Mod -6072
+▁remaining -6073
+▁Histor -6074
+▁restaurant -6075
+▁visitors -6076
+▁legislation -6077
+▁pursu -6078
+▁perman -6079
+▁Cross -6080
+rehensive -6081
+alysis -6082
+▁ce -6083
+▁motiv -6084
+▁forg -6085
+▁Prime -6086
+▁Theatre -6087
+▁Dar -6088
+▁bright -6089
+▁trees -6090
+▁delivered -6091
+▁Click -6092
+asters -6093
+ansas -6094
+outs -6095
+▁stick -6096
+rome -6097
+▁goods -6098
+▁Mov -6099
+▁license -6100
+▁Taylor -6101
+▁components -6102
+▁swe -6103
+▁Michigan -6104
+▁causes -6105
+▁hun -6106
+▁Mean -6107
+▁flo -6108
+water -6109
+Qu -6110
+▁judge -6111
+▁detailed -6112
+▁elim -6113
+mu -6114
+▁Son -6115
+yers -6116
+▁locations -6117
+▁Engineering -6118
+▁mir -6119
+ja -6120
+▁Brook -6121
+▁videos -6122
+En -6123
+▁employee -6124
+jo -6125
+▁victory -6126
+▁religion -6127
+tenance -6128
+ERS -6129
+De -6130
+▁educational -6131
+▁Circ -6132
+▁museum -6133
+▁dangerous -6134
+▁Wat -6135
+▁referred -6136
+▁candidates -6137
+long -6138
+▁discussed -6139
+▁enjoyed -6140
+lem -6141
+▁Ent -6142
+pet -6143
+draw -6144
+▁Ram -6145
+▁Sciences -6146
+ifications -6147
+▁route -6148
+▁shock -6149
+▁native -6150
+▁Contact -6151
+▁recognized -6152
+▁urban -6153
+function -6154
+▁negoti -6155
+uable -6156
+▁iron -6157
+ye -6158
+▁tow -6159
+▁WH -6160
+aded -6161
+"; -6162
+AY -6163
+▁prove -6164
+iture -6165
+▁turns -6166
+▁wear -6167
+▁Key -6168
+▁speaking -6169
+ket -6170
+▁brows -6171
+▁persons -6172
+▁Pacific -6173
+▁chain -6174
+DS -6175
+aven -6176
+▁channel -6177
+bridge -6178
+▁accepted -6179
+issions -6180
+▁reporting -6181
+▁odd -6182
+▁Kh -6183
+ylv -6184
+▁fill -6185
+▁charged -6186
+▁stopped -6187
+othe -6188
+▁studio -6189
+▁sides -6190
+inding -6191
+▁entered -6192
+▁SP -6193
+▁conver -6194
+▁Jr -6195
+ruption -6196
+▁investors -6197
+▁objects -6198
+▁yes -6199
+▁switch -6200
+▁Eliz -6201
+▁medium -6202
+erves -6203
+eper -6204
+▁Know -6205
+▁spoke -6206
+▁sister -6207
+onavirus -6208
+▁MP -6209
+FC -6210
+▁snow -6211
+▁walking -6212
+▁mand -6213
+▁millions -6214
+▁possibly -6215
+▁virtual -6216
+App -6217
+▁Brazil -6218
+resents -6219
+▁Custom -6220
+Americ -6221
+▁Orig -6222
+▁conven -6223
+▁mode -6224
+▁Prem -6225
+▁Though -6226
+▁tun -6227
+▁sample -6228
+taining -6229
+▁ven -6230
+▁beauty -6231
+▁fellow -6232
+▁pref -6233
+▁planet -6234
+▁fab -6235
+Er -6236
+Object -6237
+▁honor -6238
+▁resolution -6239
+ommod -6240
+▁facts -6241
+▁originally -6242
+▁telling -6243
+phi -6244
+▁Nick -6245
+▁updated -6246
+▁gift -6247
+▁Rest -6248
+▁topic -6249
+▁planned -6250
+merce -6251
+urrent -6252
+align -6253
+▁contrast -6254
+▁favour -6255
+ownt -6256
+page -6257
+raine -6258
+▁desire -6259
+▁tit -6260
+▁ongoing -6261
+▁household -6262
+▁worldwide -6263
+script -6264
+▁receiving -6265
+▁Northern -6266
+▁Maybe -6267
+▁professionals -6268
+▁schedule -6269
+▁Still -6270
+yles -6271
+level -6272
+▁shel -6273
+▁signs -6274
+▁Queen -6275
+▁distinct -6276
+illy -6277
+▁engineering -6278
+TC -6279
+▁extended -6280
+anges -6281
+edding -6282
+▁Local -6283
+▁Ron -6284
+▁directed -6285
+▁typically -6286
+inity -6287
+▁Sea -6288
+▁minute -6289
+▁Foot -6290
+gent -6291
+noon -6292
+▁adm -6293
+▁Meet -6294
+▁extent -6295
+▁icon -6296
+▁risks -6297
+▁Rom -6298
+▁guitar -6299
+isk -6300
+▁Guide -6301
+▁Mu -6302
+_\ -6303
+▁Resp -6304
+▁respectively -6305
+%) -6306
+▁exec -6307
+▁Books -6308
+▁Girl -6309
+▁programme -6310
+▁implementation -6311
+▁incident -6312
+▁Therefore -6313
+ensus -6314
+▁scholars -6315
+▁neighborhood -6316
+▁Kn -6317
+▁estimated -6318
+▁Turn -6319
+▁Finally -6320
+esterday -6321
+▁sweet -6322
+array -6323
+▁confidence -6324
+▁Current -6325
+▁plot -6326
+▁owned -6327
+▁Fred -6328
+▁posts -6329
+▁Zeal -6330
+▁recon -6331
+cription -6332
+iti -6333
+mathbf -6334
+Don -6335
+▁Ren -6336
+▁sad -6337
+ocket -6338
+▁emotional -6339
+▁Hard -6340
+▁Table -6341
+▁; -6342
+▁council -6343
+▁selling -6344
+▁Tem -6345
+▁appearance -6346
+aph -6347
+▁Mult -6348
+▁III -6349
+▁phen -6350
+!" -6351
+▁assessment -6352
+▁quiet -6353
+▁expressed -6354
+▁warn -6355
+▁atmosp -6356
+▁claimed -6357
+▁stream -6358
+▁Financial -6359
+▁fixed -6360
+▁privacy -6361
+inst -6362
+▁west -6363
+▁accord -6364
+▁execut -6365
+▁interpret -6366
+▁Coast -6367
+▁featured -6368
+(). -6369
+▁chapter -6370
+▁consumers -6371
+▁rac -6372
+▁victims -6373
+▁behavi -6374
+gorith -6375
+▁dress -6376
+▁Season -6377
+▁м -6378
+▁improved -6379
+▁__ -6380
+▁Psych -6381
+▁coal -6382
+var -6383
+▁Tal -6384
+endment -6385
+gely -6386
+pi -6387
+▁Carl -6388
+file -6389
+▁county -6390
+▁contemporary -6391
+▁Lind -6392
+▁Fox -6393
+▁furn -6394
+▁instrument -6395
+▁diverse -6396
+stract -6397
+▁Bra -6398
+▁weapons -6399
+adow -6400
+▁ends -6401
+▁Spain -6402
+▁closer -6403
+▁literature -6404
+osite -6405
+▁frame -6406
+▁Page -6407
+▁Tax -6408
+▁Stan -6409
+▁Eric -6410
+▁profile -6411
+▁Sat -6412
+▁sitting -6413
+▁therapy -6414
+▁Bern -6415
+rec -6416
+▁Daily -6417
+▁з -6418
+▁Cost -6419
+▁forth -6420
+▁happening -6421
+▁Sir -6422
+▁bought -6423
+crete -6424
+▁ourselves -6425
+▁component -6426
+▁sufficient -6427
+▁represents -6428
+▁ride -6429
+▁suggested -6430
+▁talent -6431
+uls -6432
+▁MS -6433
+▁definition -6434
+TA -6435
+iary -6436
+▁}); -6437
+▁NOT -6438
+▁Ray -6439
+▁delay -6440
+▁strategies -6441
+▁fasc -6442
+▁constant -6443
+▁Rod -6444
+wan -6445
+▁Sher -6446
+▁Applic -6447
+enger -6448
+▁bodies -6449
+▁Ohio -6450
+osit -6451
+▁remained -6452
+▁photograph -6453
+ureau -6454
+iki -6455
+▁AS -6456
+▁Perform -6457
+enth -6458
+▁Hu -6459
+news -6460
+style -6461
+▁vast -6462
+▁reasonable -6463
+door -6464
+▁Ash -6465
+ounter -6466
+▁Using -6467
+zy -6468
+That -6469
+▁virus -6470
+▁talks -6471
+▁description -6472
+▁comfortable -6473
+cohol -6474
+ло -6475
+▁Zealand -6476
+uling -6477
+▁Hun -6478
+▁shooting -6479
+▁!= -6480
+ле -6481
+▁button -6482
+▁roles -6483
+▁Hand -6484
+CS -6485
+▁Labor -6486
+▁liqu -6487
+▁storm -6488
+utch -6489
+▁coffee -6490
+▁regions -6491
+▁teen -6492
+run -6493
+inson -6494
+▁Administration -6495
+▁Control -6496
+▁arts -6497
+▁cart -6498
+▁Copyright -6499
+ervative -6500
+▁teach -6501
+onto -6502
+uality -6503
+ums -6504
+ether -6505
+▁twice -6506
+▁ped -6507
+▁Li -6508
+▁considering -6509
+▁god -6510
+▁scientists -6511
+▁Search -6512
+redit -6513
+▁Field -6514
+ibilities -6515
+ifts -6516
+▁elected -6517
+from -6518
+▁serves -6519
+▁moments -6520
+▁occasion -6521
+holders -6522
+▁fees -6523
+position -6524
+▁pure -6525
+cover -6526
+ATE -6527
+foot -6528
+▁guard -6529
+▁rough -6530
+▁Ryan -6531
+▁suggests -6532
+▁worst -6533
+ighter -6534
+▁arms -6535
+▁signal -6536
+ads -6537
+▁accomp -6538
+▁isol -6539
+▁enable -6540
+▁Bal -6541
+▁Lew -6542
+With -6543
+inent -6544
+fil -6545
+▁vulner -6546
+▁expensive -6547
+▁portion -6548
+▁Which -6549
+▁Es -6550
+▁recip -6551
+▁Disney -6552
+▁Ly -6553
+▁founded -6554
+▁chosen -6555
+▁Jenn -6556
+▁electronic -6557
+iro -6558
+rovers -6559
+▁acting -6560
+▁proof -6561
+▁river -6562
+▁cards -6563
+▁broken -6564
+▁largely -6565
+▁Clin -6566
+ez -6567
+▁legisl -6568
+▁scheme -6569
+▁Stephen -6570
+^{- -6571
+lyn -6572
+▁networks -6573
+adium -6574
+▁facing -6575
+DP -6576
+▁earned -6577
+▁adult -6578
+born -6579
+▁banks -6580
+'); -6581
+user -6582
+▁remark -6583
+▁kitchen -6584
+▁Anth -6585
+group -6586
+▁innovation -6587
+▁Point -6588
+▁Mrs -6589
+▁Cov -6590
+▁holiday -6591
+▁agric -6592
+tra -6593
+▁stands -6594
+osa -6595
+▁CN -6596
+onym -6597
+▁evolution -6598
+▁Adam -6599
+▁rot -6600
+rog -6601
+][ -6602
+▁Action -6603
+▁impossible -6604
+▁taught -6605
+▁campus -6606
+▁houses -6607
+▁Assist -6608
+option -6609
+ao -6610
+▁sav -6611
+pan -6612
+cho -6613
+▁collaboration -6614
+▁observ -6615
+▁league -6616
+▁Fact -6617
+iversary -6618
+agues -6619
+▁imagine -6620
+orough -6621
+Time -6622
+▁Source -6623
+▁boys -6624
+▁professor -6625
+bal -6626
+adel -6627
+▁Tags -6628
+conn -6629
+▁Bush -6630
+▁accommod -6631
+▁encourage -6632
+▁increasingly -6633
+▁Elizabeth -6634
+▁Broad -6635
+▁describe -6636
+amental -6637
+▁suit -6638
+UM -6639
+▁Radio -6640
+▁ultimately -6641
+orry -6642
+▁innovative -6643
+Par -6644
+▁surprise -6645
+▁tough -6646
+▁mel -6647
+▁actor -6648
+▁aircraft -6649
+▁acts -6650
+▁info -6651
+obs -6652
+ptions -6653
+▁Put -6654
+▁healthcare -6655
+ican -6656
+▁Cook -6657
+▁humans -6658
+▁collected -6659
+▁stores -6660
+ifies -6661
+lambda -6662
+▁relative -6663
+не -6664
+▁Fac -6665
+▁Ms -6666
+▁enforcement -6667
+▁mouth -6668
+▁finds -6669
+▁historic -6670
+▁à -6671
+▁OR -6672
+▁orders -6673
+▁Mah -6674
+▁revolution -6675
+olds -6676
+▁worry -6677
+fast -6678
+ESS -6679
+▁Categ -6680
+во -6681
+▁Blog -6682
+▁stret -6683
+▁>> -6684
+▁}; -6685
+My -6686
+▁colle -6687
+▁intent -6688
+ras -6689
+▁seconds -6690
+creen -6691
+udi -6692
+inder -6693
+▁Asian -6694
+State -6695
+icted -6696
+▁recording -6697
+cons -6698
+▁possibility -6699
+location -6700
+anna -6701
+▁resulting -6702
+▁extensive -6703
+▁compens -6704
+▁Luc -6705
+length -6706
+▁fee -6707
+▁recovery -6708
+▁existence -6709
+ogy -6710
+▁FOR -6711
+▁effectively -6712
+': -6713
+rage -6714
+aine -6715
+▁soc -6716
+▁practical -6717
+▁wine -6718
+▁afternoon -6719
+▁Tenn -6720
+iral -6721
+edu -6722
+mat -6723
+color -6724
+▁Ford -6725
+EX -6726
+▁Jersey -6727
+▁Such -6728
+▁believes -6729
+▁cro -6730
+▁Ur -6731
+▁evol -6732
+▁Officer -6733
+Text -6734
+ned -6735
+ordan -6736
+▁Imp -6737
+avor -6738
+uma -6739
+▁Ox -6740
+▁CL -6741
+▁simpl -6742
+▁strategic -6743
+iest -6744
+▁surrounding -6745
+angle -6746
+▁awarded -6747
+Bl -6748
+apore -6749
+Ed -6750
+▁occurred -6751
+▁Order -6752
+▁confirm -6753
+usal -6754
+izer -6755
+▁external -6756
+▁Hollywood -6757
+▁Disc -6758
+▁winner -6759
+bsp -6760
+▁powers -6761
+▁tight -6762
+▁streets -6763
+aduate -6764
+bury -6765
+unicip -6766
+profit -6767
+▁manufacturing -6768
+rency -6769
+▁consistent -6770
+▁advertising -6771
+▁symptoms -6772
+ellectual -6773
+▁Staff -6774
+^{\ -6775
+▁east -6776
+▁wire -6777
+▁elig -6778
+▁sustainable -6779
+▁Viet -6780
+▁suc -6781
+▁minister -6782
+▁participate -6783
+▁depth -6784
+lights -6785
+acter -6786
+▁Brian -6787
+▁shift -6788
+▁widely -6789
+▁attorney -6790
+fin -6791
+erman -6792
+▁Chapter -6793
+▁Bul -6794
+car -6795
+icide -6796
+▁closely -6797
+▁Common -6798
+▁MA -6799
+ifer -6800
+▁pregn -6801
+▁Mir -6802
+enda -6803
+▁magazine -6804
+▁mine -6805
+▁alt -6806
+eries -6807
+▁database -6808
+Set -6809
+▁mit -6810
+▁boost -6811
+oir -6812
+▁struggle -6813
+▁ring -6814
+▁Cas -6815
+▁Cat -6816
+▁Economic -6817
+ер -6818
+▁properly -6819
+▁Connect -6820
+▁grown -6821
+▁comprehensive -6822
+▁bigger -6823
+▁realize -6824
+ello -6825
+▁rooms -6826
+▁domain -6827
+▁expansion -6828
+▁latter -6829
+▁Mill -6830
+▁Imm -6831
+▁Dam -6832
+iform -6833
+▁Latin -6834
+▁enhance -6835
+lets -6836
+▁wave -6837
+▁repeated -6838
+▁reaction -6839
+▁vaccine -6840
+▁bes -6841
+▁totally -6842
+▁Date -6843
+▁mail -6844
+▁Chem -6845
+▁usual -6846
+▁cycle -6847
+▁reput -6848
+▁Ca -6849
+▁colon -6850
+▁agents -6851
+erry -6852
+▁protein -6853
+▁awards -6854
+▁equival -6855
+▁Hotel -6856
+▁principles -6857
+ти -6858
+▁flood -6859
+▁Published -6860
+oking -6861
+▁Case -6862
+%, -6863
+▁hydro -6864
+▁structures -6865
+omet -6866
+▁upper -6867
+▁framework -6868
+▁medicine -6869
+grade -6870
+▁Agency -6871
+aron -6872
+▁Bru -6873
+▁Sym -6874
+▁fly -6875
+▁successfully -6876
+▁Wilson -6877
+▁entertainment -6878
+that -6879
+pass -6880
+▁trig -6881
+▁garden -6882
+▁Biden -6883
+▁mixed -6884
+File -6885
+▁tech -6886
+▁Ministry -6887
+▁spaces -6888
+esome -6889
+▁nob -6890
+▁elections -6891
+Tube -6892
+ormal -6893
+aug -6894
+agraph -6895
+▁Modern -6896
+Comp -6897
+stone -6898
+They -6899
+▁Instagram -6900
+▁Vice -6901
+▁retire -6902
+thew -6903
+etary -6904
+▁duty -6905
+een -6906
+isa -6907
+▁steel -6908
+▁Harry -6909
+▁explains -6910
+▁Pract -6911
+▁Sarah -6912
+▁Contin -6913
+teen -6914
+url -6915
+▁Swed -6916
+▁supports -6917
+▁Capital -6918
+phy -6919
+................ -6920
+essed -6921
+▁passing -6922
+▁findings -6923
+▁Country -6924
+▁Students -6925
+▁acid -6926
+▁audio -6927
+▁Quest -6928
+▁suspect -6929
+▁Nic -6930
+owered -6931
+rible -6932
+child -6933
+▁represented -6934
+overty -6935
+ampion -6936
+▁Guard -6937
+▁courses -6938
+▁maintenance -6939
+▁shap -6940
+▁writers -6941
+▁Dun -6942
+▁meetings -6943
+▁protest -6944
+}_{ -6945
+▁unable -6946
+▁letters -6947
+ortion -6948
+inois -6949
+▁Clinton -6950
+▁Song -6951
+▁integr -6952
+▁union -6953
+▁achieved -6954
+▁Jews -6955
+▁paying -6956
+▁illegal -6957
+▁trading -6958
+▁joy -6959
+▁Rog -6960
+▁Kong -6961
+ride -6962
+nbsp -6963
+duction -6964
+ulous -6965
+Test -6966
+▁recommended -6967
+▁linked -6968
+aints -6969
+▁Travel -6970
+▁younger -6971
+▁jur -6972
+▁/> -6973
+▁alleg -6974
+Key -6975
+amber -6976
+▁hall -6977
+Element -6978
+▁Manager -6979
+▁Kevin -6980
+▁Colorado -6981
+▁turning -6982
+Ad -6983
+▁experiment -6984
+spec -6985
+▁Privacy -6986
+uzz -6987
+▁arrested -6988
+zer -6989
+she -6990
+▁Was -6991
+▁succeed -6992
+PM -6993
+▁Greek -6994
+ilton -6995
+▁lie -6996
+▁Whether -6997
+▁Member -6998
+hire -6999
+▁Never -7000
+EA -7001
+ko -7002
+()); -7003
+▁procedures -7004
+ping -7005
+▁## -7006
+▁capable -7007
+sem -7008
+▁division -7009
+ela -7010
+▁approval -7011
+▁recognition -7012
+▁Senior -7013
+▁twenty -7014
+▁army -7015
+▁Democrats -7016
+itals -7017
+▁vo -7018
+▁Capt -7019
+▁Philipp -7020
+▁guests -7021
+▁governments -7022
+ima -7023
+▁relax -7024
+▁topics -7025
+ells -7026
+▁SH -7027
+▁cookies -7028
+BO -7029
+▁covers -7030
+---------------- -7031
+▁stone -7032
+ila -7033
+▁relation -7034
+▁advis -7035
+▁Age -7036
+EG -7037
+▁Afghan -7038
+▁publication -7039
+ashed -7040
+▁awareness -7041
+lik -7042
+▁Building -7043
+▁Bon -7044
+▁Corporation -7045
+CD -7046
+▁primarily -7047
+▁Pot -7048
+▁loan -7049
+▁refuge -7050
+▁Wel -7051
+tw -7052
+▁increases -7053
+ournament -7054
+▁Eastern -7055
+▁behalf -7056
+▁zero -7057
+=' -7058
+▁sole -7059
+▁appointed -7060
+}^{ -7061
+да -7062
+▁Emer -7063
+▁mo -7064
+▁Comput -7065
+▁stood -7066
+▁Brad -7067
+▁provider -7068
+TV -7069
+▁Dead -7070
+▁prepare -7071
+▁bag -7072
+▁Rose -7073
+arters -7074
+▁react -7075
+▁providers -7076
+▁poly -7077
+▁Death -7078
+▁trouble -7079
+▁deaths -7080
+then -7081
+awa -7082
+▁dropped -7083
+▁facilit -7084
+▁Hou -7085
+urities -7086
+▁incredible -7087
+▁NAS -7088
+▁informed -7089
+dated -7090
+▁fold -7091
+▁remote -7092
+▁exceed -7093
+▁exposure -7094
+▁lies -7095
+▁Low -7096
+icity -7097
+▁§ -7098
+quir -7099
+\\ -7100
+▁Pen -7101
+▁hate -7102
+▁Figure -7103
+▁enth -7104
+▁frequently -7105
+▁diversity -7106
+▁Da -7107
+▁Vill -7108
+▁Singapore -7109
+▁exhibition -7110
+▁opposition -7111
+▁Univers -7112
+▁Bol -7113
+▁athlet -7114
+▁basket -7115
+razy -7116
+▁generated -7117
+▁missed -7118
+▁tracks -7119
+▁attended -7120
+▁rele -7121
+▁Prince -7122
+▁Hong -7123
+▁Parliament -7124
+igrants -7125
+▁Kat -7126
+▁mal -7127
+▁formal -7128
+▁Editor -7129
+▁upcoming -7130
+▁Scotland -7131
+▁Toronto -7132
+apped -7133
+▁stim -7134
+▁Systems -7135
+return -7136
+bel -7137
+▁NFL -7138
+▁smooth -7139
+▁Site -7140
+▁entreprene -7141
+▁Mach -7142
+▁Among -7143
+▁scheduled -7144
+▁Wars -7145
+RC -7146
+ylvania -7147
+▁assign -7148
+▁Area -7149
+Error -7150
+▁faster -7151
+▁Unf -7152
+anguages -7153
+itzer -7154
+▁AI -7155
+▁exists -7156
+▁busy -7157
+▁settings -7158
+▁losing -7159
+▁); -7160
+vy -7161
+▁por -7162
+▁decor -7163
+▁Pers -7164
+▁engaged -7165
+▁gro -7166
+▁tack -7167
+▁immediate -7168
+sigma -7169
+▁exception -7170
+▁register -7171
+▁degrees -7172
+asts -7173
+▁Bang -7174
+▁Cru -7175
+▁graduate -7176
+▁updates -7177
+ega -7178
+▁wedding -7179
+▁Return -7180
+▁expertise -7181
+▁mainly -7182
+ologist -7183
+▁reduction -7184
+▁Link -7185
+▁YouTube -7186
+ependent -7187
+▁controvers -7188
+▁anywhere -7189
+▁discipl -7190
+bro -7191
+▁pharm -7192
+▁surgery -7193
+ifest -7194
+RI -7195
+▁yield -7196
+▁relief -7197
+▁thick -7198
+▁champions -7199
+▁granted -7200
+▁fiction -7201
+uts -7202
+▁Santa -7203
+aka -7204
+▁stake -7205
+▁uncertain -7206
+ulating -7207
+onic -7208
+▁inspect -7209
+▁Change -7210
+▁Illinois -7211
+▁Kent -7212
+▁Cub -7213
+eping -7214
+▁alongside -7215
+▁broadcast -7216
+▁Major -7217
+▁engage -7218
+AF -7219
+▁Islamic -7220
+map -7221
+Com -7222
+umes -7223
+▁procedure -7224
+opher -7225
+▁Prom -7226
+▁Friend -7227
+post -7228
+ersion -7229
+▁DO -7230
+worth -7231
+Event -7232
+▁knowing -7233
+▁Davis -7234
+▁ordered -7235
+▁(\ -7236
+▁counsel -7237
+des -7238
+▁impressive -7239
+▁challenging -7240
+ceived -7241
+imental -7242
+▁Select -7243
+▁thread -7244
+anes -7245
+▁Perhaps -7246
+▁rural -7247
+abet -7248
+▁profit -7249
+▁buying -7250
+▁sight -7251
+▁Method -7252
+erg -7253
+▁presentation -7254
+▁Jordan -7255
+▁tested -7256
+burgh -7257
+▁clar -7258
+▁architecture -7259
+ali -7260
+▁obviously -7261
+▁neither -7262
+▁valuable -7263
+▁laugh -7264
+▁competitive -7265
+▁depending -7266
+▁choices -7267
+▁constit -7268
+Add -7269
+umin -7270
+▁Ariz -7271
+▁Study -7272
+VD -7273
+▁ok -7274
+▁egg -7275
+endar -7276
+▁fundamental -7277
+▁provision -7278
+▁scr -7279
+▁Sn -7280
+eren -7281
+▁landscape -7282
+▁bomb -7283
+▁Ah -7284
+▁messages -7285
+[' -7286
+▁shopping -7287
+▁vary -7288
+▁Place -7289
+Go -7290
+▁na -7291
+▁nurs -7292
+▁kinds -7293
+▁undert -7294
+▁guidance -7295
+ira -7296
+▁synt -7297
+▁sick -7298
+ishop -7299
+▁resistance -7300
+▁Ukraine -7301
+orne -7302
+▁DNA -7303
+▁comparison -7304
+▁plastic -7305
+rip -7306
+bra -7307
+▁Image -7308
+▁typical -7309
+▁adopted -7310
+▁lin -7311
+▁Nations -7312
+▁Has -7313
+▁Syria -7314
+▁Edward -7315
+▁assault -7316
+▁seriously -7317
+▁lux -7318
+▁mob -7319
+osm -7320
+▁Rh -7321
+▁featuring -7322
+▁Foreign -7323
+▁studied -7324
+▁disappoint -7325
+▁performing -7326
+▁killing -7327
+▁potentially -7328
+%. -7329
+rt -7330
+▁Saint -7331
+▁(“ -7332
+▁recognize -7333
+▁prompt -7334
+IO -7335
+▁Lic -7336
+▁layer -7337
+▁licens -7338
+▁algorith -7339
+▁Run -7340
+▁Ra -7341
+▁vital -7342
+▁Bad -7343
+++ -7344
+▁Soft -7345
+star -7346
+acks -7347
+atics -7348
+▁Personal -7349
+▁dinner -7350
+▁efficiency -7351
+▁Agre -7352
+▁С -7353
+▁assume -7354
+ogether -7355
+▁MD -7356
+▁Event -7357
+ilty -7358
+▁unknown -7359
+▁regularly -7360
+▁listening -7361
+▁continuing -7362
+▁ED -7363
+▁scenes -7364
+earing -7365
+▁Cro -7366
+▁taste -7367
+▁compris -7368
+▁accused -7369
+▁courts -7370
+▁proposal -7371
+▁root -7372
+▁accordance -7373
+▁rub -7374
+▁explan -7375
+iable -7376
+-\ -7377
+▁Happ -7378
+▁operate -7379
+▁faces -7380
+RS -7381
+▁bon -7382
+▁taxes -7383
+▁accurate -7384
+ifier -7385
+▁necessarily -7386
+▁BBC -7387
+▁Stock -7388
+gov -7389
+▁involving -7390
+▁ending -7391
+▁patterns -7392
+▁friendly -7393
+▁sky -7394
+▁CON -7395
+▁Miller -7396
+found -7397
+▁Turkey -7398
+▁Ball -7399
+▁cheap -7400
+▁spiritual -7401
+▁Android -7402
+rav -7403
+▁anticip -7404
+claim -7405
+NS -7406
+never -7407
+▁Short -7408
+▁horse -7409
+▁resc -7410
+▁Conc -7411
+▁brands -7412
+▁Word -7413
+▁answers -7414
+▁Marc -7415
+▁soci -7416
+▁Columbia -7417
+php -7418
+▁Matthew -7419
+Not -7420
+ALL -7421
+mann -7422
+UP -7423
+making -7424
+▁parameters -7425
+▁Future -7426
+▁Ba -7427
+▁guest -7428
+(), -7429
+Requ -7430
+Or -7431
+▁Works -7432
+▁yesterday -7433
+▁Princ -7434
+▁Ven -7435
+▁walls -7436
+rooms -7437
+▁singer -7438
+anged -7439
+▁veget -7440
+▁consequences -7441
+▁controlled -7442
+▁beach -7443
+AND -7444
+▁Tony -7445
+ificate -7446
+▁Download -7447
+▁hell -7448
+...] -7449
+cerning -7450
+▁Members -7451
+▁legend -7452
+adelph -7453
+Phone -7454
+▁fraud -7455
+▁bridge -7456
+sylvania -7457
+▁papers -7458
+▁Israeli -7459
+▁picked -7460
+▁soldiers -7461
+▁oblig -7462
+▁contribute -7463
+▁SC -7464
+▁wearing -7465
+▁moves -7466
+▁applicable -7467
+▁sought -7468
+▁association -7469
+core -7470
+iac -7471
+▁specified -7472
+'. -7473
+▁Trade -7474
+▁Ali -7475
+▁Lin -7476
+▁discrim -7477
+Up -7478
+▁ju -7479
+▁authent -7480
+▁у -7481
+oding -7482
+▁boat -7483
+▁realized -7484
+rell -7485
+ront -7486
+▁drivers -7487
+aire -7488
+▁riv -7489
+▁Les -7490
+▁Account -7491
+▁interface -7492
+owntown -7493
+▁Palestin -7494
+▁communications -7495
+case -7496
+▁nations -7497
+▁Affairs -7498
+EE -7499
+▁somewhat -7500
+aret -7501
+}{\ -7502
+ls -7503
+ilon -7504
+▁strike -7505
+▁visited -7506
+OV -7507
+▁contest -7508
+▁alive -7509
+▁surprising -7510
+▁festival -7511
+▁pra -7512
+▁purs -7513
+▁consent -7514
+▁Farm -7515
+uous -7516
+▁entr -7517
+▁apparently -7518
+▁Scient -7519
+reens -7520
+▁pray -7521
+▁elev -7522
+▁Fall -7523
+abs -7524
+isition -7525
+AV -7526
+true -7527
+▁monitoring -7528
+adelphia -7529
+▁voters -7530
+▁tickets -7531
+ailed -7532
+▁reader -7533
+away -7534
+▁Fun -7535
+ints -7536
+▁alcohol -7537
+▁б -7538
+▁Greg -7539
+▁understood -7540
+▁entitled -7541
+gener -7542
+▁Magazine -7543
+▁attribut -7544
+▁strict -7545
+▁promise -7546
+▁percentage -7547
+▁organized -7548
+▁$( -7549
+▁payments -7550
+▁empty -7551
+lay -7552
+▁PS -7553
+▁Following -7554
+▁roof -7555
+athe -7556
+▁von -7557
+Of -7558
+▁Summer -7559
+▁Sep -7560
+?? -7561
+▁excess -7562
+bec -7563
+▁zone -7564
+"> -7565
+▁performances -7566
+▁foundation -7567
+ión -7568
+▁permanent -7569
+▁engagement -7570
+▁invent -7571
+▁replaced -7572
+▁repair -7573
+▁dynamic -7574
+▁drawn -7575
+front -7576
+▁pulled -7577
+▁visiting -7578
+▁trained -7579
+▁scored -7580
+oz -7581
+▁Culture -7582
+▁measured -7583
+▁delight -7584
+▁feedback -7585
+▁Lewis -7586
+▁contribution -7587
+▁habit -7588
+pson -7589
+▁provisions -7590
+isd -7591
+▁platforms -7592
+▁Should -7593
+When -7594
+▁McG -7595
+▁Bat -7596
+▁deals -7597
+▁installed -7598
+▁extension -7599
+owa -7600
+▁extreme -7601
+▁narrative -7602
+▁plane -7603
+anny -7604
+▁southern -7605
+▁scope -7606
+▁contributions -7607
+itarian -7608
+▁molec -7609
+iami -7610
+▁Cond -7611
+▁surprised -7612
+▁hat -7613
+obby -7614
+▁narrow -7615
+otype -7616
+etts -7617
+▁adds -7618
+▁grade -7619
+▁fantastic -7620
+aza -7621
+▁Josh -7622
+▁auto -7623
+▁parking -7624
+▁Pennsylvania -7625
+count -7626
+▁newly -7627
+▁module -7628
+▁prohib -7629
+▁Rome -7630
+▁evil -7631
+▁battery -7632
+illiant -7633
+▁liked -7634
+▁EX -7635
+▁generate -7636
+makers -7637
+▁subjects -7638
+cm -7639
+PL -7640
+▁Hay -7641
+▁Band -7642
+▁Early -7643
+▁~ -7644
+▁trail -7645
+open -7646
+▁node -7647
+wing -7648
+heim -7649
+wa -7650
+ART -7651
+heast -7652
+▁producer -7653
+world -7654
+▁Requ -7655
+▁Hope -7656
+▁Box -7657
+▁Having -7658
+▁finance -7659
+▁Ten -7660
+nav -7661
+cers -7662
+'; -7663
+▁Jean -7664
+▁LA -7665
+▁opposite -7666
+▁phenomen -7667
+doi -7668
+arg -7669
+▁celeb -7670
+▁limits -7671
+▁Lady -7672
+▁hasn -7673
+Def -7674
+FF -7675
+▁walked -7676
+cling -7677
+▁nearby -7678
+▁wal -7679
+▁Tech -7680
+▁hopes -7681
+▁tail -7682
+▁suffering -7683
+▁writes -7684
+▁GM -7685
+▁submitted -7686
+ны -7687
+▁climb -7688
+▁Arizona -7689
+▁dealing -7690
+anth -7691
+asp -7692
+achus -7693
+sex -7694
+Ref -7695
+olis -7696
+▁transportation -7697
+▁samples -7698
+▁Bible -7699
+▁Holy -7700
+▁conscious -7701
+▁actors -7702
+▁atmosphere -7703
+▁Small -7704
+▁sentence -7705
+lix -7706
+▁yards -7707
+you -7708
+▁situations -7709
+▁Houston -7710
+▁é -7711
+cor -7712
+▁Things -7713
+▁Patrick -7714
+▁accessible -7715
+ials -7716
+Context -7717
+▁Transport -7718
+▁templ -7719
+▁Treat -7720
+▁drama -7721
+▁expectations -7722
+▁bare -7723
+▁emissions -7724
+American -7725
+▁myst -7726
+▁Avenue -7727
+▁compliance -7728
+▁’ -7729
+▁moral -7730
+▁faculty -7731
+▁contained -7732
+▁firms -7733
+PC -7734
+weight -7735
+▁principal -7736
+▁approaches -7737
+▁П -7738
+▁integrated -7739
+▁appreciate -7740
+oval -7741
+▁painting -7742
+▁subsid -7743
+▁Pack -7744
+▁outcomes -7745
+▁rising -7746
+▁illness -7747
+▁define -7748
+▁invited -7749
+▁Cancer -7750
+▁dismiss -7751
+▁injuries -7752
+▁compos -7753
+acle -7754
+▁bear -7755
+version -7756
+▁Records -7757
+▁Meanwhile -7758
+▁participation -7759
+'] -7760
+itionally -7761
+press -7762
+aska -7763
+missions -7764
+▁helpful -7765
+▁initiative -7766
+Conn -7767
+pg -7768
+▁colour -7769
+▁Roll -7770
+fall -7771
+▁Environmental -7772
+▁swim -7773
+▁Bell -7774
+▁driven -7775
+lient -7776
+▁Ess -7777
+▁Comments -7778
+scale -7779
+alo -7780
+▁Model -7781
+▁automatically -7782
+inals -7783
+▁outstanding -7784
+▁Sav -7785
+folio -7786
+▁sees -7787
+▁Beh -7788
+▁heads -7789
+▁plug -7790
+▁truck -7791
+▁outcome -7792
+▁Crit -7793
+▁Dark -7794
+▁Name -7795
+▁doors -7796
+▁abandon -7797
+bsite -7798
+▁aggress -7799
+▁combat -7800
+▁improvement -7801
+▁los -7802
+beta -7803
+▁capture -7804
+*} -7805
+▁watched -7806
+▁Sov -7807
+LS -7808
+▁cup -7809
+▁Sche -7810
+▁Father -7811
+▁Attorney -7812
+▁neuro -7813
+▁dates -7814
+▁registration -7815
+▁Grant -7816
+BN -7817
+▁prospect -7818
+▁faced -7819
+▁celebrate -7820
+▁Ak -7821
+▁pip -7822
+▁basically -7823
+iot -7824
+▁broke -7825
+pa -7826
+mony -7827
+▁Line -7828
+▁plate -7829
+vo -7830
+▁restaurants -7831
+angers -7832
+▁NS -7833
+erate -7834
+How -7835
+▁perfectly -7836
+vely -7837
+including -7838
+▁talked -7839
+▁mountain -7840
+izz -7841
+▁Safety -7842
+▁consideration -7843
+▁crucial -7844
+▁escape -7845
+▁Gallery -7846
+▁Anne -7847
+▁lect -7848
+Over -7849
+▁funny -7850
+LO -7851
+▁enthus -7852
+▁Ros -7853
+rient -7854
+▁personally -7855
+▁organis -7856
+▁Unfortunately -7857
+▁sin -7858
+▁tum -7859
+▁hidden -7860
+▁introduction -7861
+brid -7862
+▁Effect -7863
+▁founder -7864
+▁enemy -7865
+▁keeps -7866
+riers -7867
+▁rein -7868
+▁containing -7869
+sole -7870
+▁width -7871
+▁Plus -7872
+▁Dream -7873
+▁Albert -7874
+▁representation -7875
+▁Hold -7876
+▁fairly -7877
+title -7878
+GA -7879
+Read -7880
+oven -7881
+owing -7882
+tle -7883
+ova -7884
+▁joining -7885
+Bu -7886
+▁fruit -7887
+▁drag -7888
+▁hits -7889
+▁deeply -7890
+rations -7891
+▁eating -7892
+▁split -7893
+▁liquid -7894
+iously -7895
+▁lawyer -7896
+▁western -7897
+▁align -7898
+alty -7899
+▁marked -7900
+▁diseases -7901
+gage -7902
+▁presents -7903
+letter -7904
+eld -7905
+annels -7906
+▁ly -7907
+▁Jay -7908
+prot -7909
+▁Forest -7910
+▁Kir -7911
+▁extract -7912
+▁vess -7913
+▁ingred -7914
+ikip -7915
+▁weren -7916
+Inst -7917
+▁cutting -7918
+▁constantly -7919
+nesota -7920
+▁coronavirus -7921
+pool -7922
+vol -7923
+ventional -7924
+▁Gi -7925
+det -7926
+▁pointed -7927
+▁gene -7928
+▁spin -7929
+▁Victoria -7930
+Wh -7931
+ulpt -7932
+▁offices -7933
+▁rig -7934
+LA -7935
+▁adventure -7936
+▁Pict -7937
+▁exclusive -7938
+▁formation -7939
+PD -7940
+▁Fig -7941
+ito -7942
+▁equivalent -7943
+plete -7944
+▁neut -7945
+▁controls -7946
+ographer -7947
+▁Resources -7948
+crib -7949
+▁Os -7950
+▁frust -7951
+aze -7952
+▁Defense -7953
+icing -7954
+▁fulf -7955
+ji -7956
+▁Interest -7957
+▁Philadelphia -7958
+▁Learn -7959
+▁acknowled -7960
+rary -7961
+▁versions -7962
+▁creates -7963
+▁suffered -7964
+▁gay -7965
+▁Governor -7966
+UD -7967
+▁designs -7968
+▁Ont -7969
+bell -7970
+▁airport -7971
+▁Oxford -7972
+▁Golden -7973
+▁Kelly -7974
+▁solo -7975
+▁Climate -7976
+▁concerning -7977
+▁Liter -7978
+big -7979
+▁hurt -7980
+▁Navy -7981
+,’ -7982
+▁В -7983
+▁strange -7984
+▁soil -7985
+▁Images -7986
+igious -7987
+expected -7988
+▁wins -7989
+▁catal -7990
+▁Ast -7991
+▁menu -7992
+▁attempts -7993
+elle -7994
+oln -7995
+▁giant -7996
+▁Miami -7997
+▁noticed -7998
+▁regime -7999
+har -8000
+()) -8001
+▁newspaper -8002
+▁magic -8003
+▁Heart -8004
+▁declared -8005
+achusetts -8006
+▁slowly -8007
+▁northern -8008
+▁vice -8009
+iley -8010
+▁Airport -8011
+▁initially -8012
+▁colors -8013
+mathrm -8014
+Node -8015
+▁apps -8016
+otten -8017
+inking -8018
+yo -8019
+▁purchased -8020
+▁Wales -8021
+▁Barb -8022
+▁Marg -8023
+▁Sky -8024
+▁representative -8025
+▁batt -8026
+▁desk -8027
+plom -8028
+▁categories -8029
+▁infect -8030
+▁Brother -8031
+▁websites -8032
+▁tiss -8033
+pton -8034
+▁describes -8035
+ector -8036
+At -8037
+▁intellectual -8038
+ocated -8039
+ante -8040
+MM -8041
+▁interf -8042
+TO -8043
+ikipedia -8044
+▁je -8045
+▁carefully -8046
+▁dogs -8047
+▁flag -8048
+agan -8049
+▁indicate -8050
+▁YO -8051
+▁voting -8052
+▁Credit -8053
+▁Six -8054
+▁carrying -8055
+RL -8056
+▁privile -8057
+roduction -8058
+▁trav -8059
+▁ticket -8060
+▁amounts -8061
+▁restrictions -8062
+▁filter -8063
+▁Dave -8064
+ync -8065
+whel -8066
+erent -8067
+▁sequence -8068
+tical -8069
+▁Judge -8070
+tau -8071
+▁unus -8072
+******** -8073
+▁poverty -8074
+▁consumption -8075
+icken -8076
+▁Collection -8077
+udes -8078
+▁imper -8079
+▁android -8080
+boy -8081
+onder -8082
+▁Room -8083
+▁Rail -8084
+▁Analysis -8085
+▁una -8086
+figure -8087
+▁Cooper -8088
+▁Jam -8089
+▁Ut -8090
+▁improving -8091
+▁exposed -8092
+▁submit -8093
+▁export -8094
+pie -8095
+image -8096
+web -8097
+py -8098
+▁Five -8099
+▁indicated -8100
+▁democracy -8101
+▁hur -8102
+▁prime -8103
+▁Map -8104
+▁likes -8105
+▁Na -8106
+▁Represent -8107
+after -8108
+▁Alexander -8109
+▁proport -8110
+▁inner -8111
+eties -8112
+▁Prior -8113
+▁Jason -8114
+ospitals -8115
+▁spokes -8116
+Path -8117
+uties -8118
+▁Berlin -8119
+▁Spirit -8120
+▁Minnesota -8121
+▁reliable -8122
+▁Republicans -8123
+▁hyper -8124
+▁Without -8125
+▁wheel -8126
+▁conclusion -8127
+▁solve -8128
+▁Working -8129
+onna -8130
+▁doctors -8131
+▁universe -8132
+▁thousand -8133
+▁ru -8134
+igenous -8135
+▁strongly -8136
+▁industries -8137
+▁institution -8138
+▁Leon -8139
+▁rose -8140
+▁Jane -8141
+▁Student -8142
+vi -8143
+▁diam -8144
+▁aims -8145
+▁explo -8146
+▁trends -8147
+▁suddenly -8148
+▁PRO -8149
+vas -8150
+▁Sem -8151
+hist -8152
+▁injured -8153
+▁Football -8154
+▁domin -8155
+▁tor -8156
+vision -8157
+▁Annual -8158
+▁Type -8159
+aration -8160
+▁protocol -8161
+acher -8162
+▁Station -8163
+▁Veg -8164
+▁returning -8165
+▁Authority -8166
+esy -8167
+times -8168
+▁reputation -8169
+▁gap -8170
+▁aside -8171
+ennis -8172
+Service -8173
+▁myth -8174
+▁permission -8175
+▁eligible -8176
+▁Legal -8177
+▁titles -8178
+▁membership -8179
+▁lights -8180
+▁managing -8181
+▁warrant -8182
+apse -8183
+ilde -8184
+▁presidential -8185
+▁chart -8186
+▁browser -8187
+▁folks -8188
+▁tips -8189
+▁deploy -8190
+▁troops -8191
+▁monthly -8192
+▁windows -8193
+oured -8194
+▁franch -8195
+## -8196
+▁characteristics -8197
+▁iPhone -8198
+owed -8199
+▁guidelines -8200
+▁colleagues -8201
+api -8202
+▁Base -8203
+Form -8204
+omic -8205
+▁perm -8206
+▁Massachusetts -8207
+▁Arm -8208
+ilst -8209
+▁[...] -8210
+raction -8211
+▁chose -8212
+▁suitable -8213
+▁crash -8214
+▁Cry -8215
+▁Protection -8216
+iah -8217
+itis -8218
+▁stations -8219
+dney -8220
+▁electricity -8221
+▁ratio -8222
+▁childhood -8223
+anchester -8224
+So -8225
+▁Gro -8226
+▁visible -8227
+▁affordable -8228
+abb -8229
+▁Living -8230
+allas -8231
+▁Yeah -8232
+▁noise -8233
+▁Prop -8234
+▁reserved -8235
+▁Being -8236
+▁consists -8237
+eman -8238
+▁Simon -8239
+▁Ross -8240
+urse -8241
+▁Gree -8242
+▁Anton -8243
+▁warning -8244
+▁priority -8245
+▁covering -8246
+▁guilty -8247
+isted -8248
+▁ages -8249
+▁surf -8250
+▁feelings -8251
+▁Sound -8252
+▁skill -8253
+▁reward -8254
+▁Final -8255
+ificial -8256
+▁crack -8257
+▁crimes -8258
+▁silver -8259
+▁pitch -8260
+▁merely -8261
+ви -8262
+▁tall -8263
+▁admitted -8264
+achelor -8265
+oked -8266
+▁seasons -8267
+▁Moreover -8268
+CP -8269
+▁arguments -8270
+▁objective -8271
+▁Innov -8272
+▁Constitution -8273
+ni -8274
+inator -8275
+▁secondary -8276
+▁languages -8277
+▁Need -8278
+▁Learning -8279
+▁SO -8280
+▁connections -8281
+fty -8282
+▁technique -8283
+▁philosophy -8284
+onsin -8285
+▁phr -8286
+▁Anthony -8287
+▁discount -8288
+▁sup -8289
+▁Oak -8290
+base -8291
+▁fifth -8292
+▁farmers -8293
+▁EN -8294
+▁glad -8295
+▁voc -8296
+▁infection -8297
+▁cabin -8298
+▁acquired -8299
+▁Recent -8300
+olean -8301
+rolled -8302
+▁somewhere -8303
+▁criteria -8304
+IB -8305
+▁Corn -8306
+FA -8307
+▁Cert -8308
+Comm -8309
+start -8310
+▁Sometimes -8311
+▁Saudi -8312
+▁causing -8313
+▁paragraph -8314
+▁drawing -8315
+isconsin -8316
+▁SS -8317
+tainment -8318
+oly -8319
+▁interior -8320
+▁judgment -8321
+isode -8322
+▁surve -8323
+olid -8324
+aturally -8325
+▁proved -8326
+. -8327
+uted -8328
+▁brilliant -8329
+▁adj -8330
+java -8331
+▁Advis -8332
+▁Assembly -8333
+ding -8334
+rine -8335
+▁anniversary -8336
+▁PH -8337
+▁producing -8338
+▁Moh -8339
+check -8340
+▁CS -8341
+▁principle -8342
+▁Ult -8343
+▁Number -8344
+EV -8345
+▁Finance -8346
+spe -8347
+▁insert -8348
+▁implemented -8349
+block -8350
+^\ -8351
+▁PA -8352
+called -8353
+una -8354
+▁Relations -8355
+▁ocean -8356
+▁regulatory -8357
+lar -8358
+▁frequency -8359
+▁requirement -8360
+▁laid -8361
+▁=== -8362
+non -8363
+▁Kansas -8364
+low -8365
+▁tiny -8366
+▁WW -8367
+▁recommendations -8368
+lers -8369
+▁grab -8370
+▁literally -8371
+▁obst -8372
+▁Michel -8373
+ishment -8374
+▁blind -8375
+▁ET -8376
+▁Harris -8377
+▁Bry -8378
+▁arbit -8379
+ixture -8380
+▁Agreement -8381
+▁Based -8382
+gamma -8383
+▁Der -8384
+▁contracts -8385
+▁lawsu -8386
+istent -8387
+▁hoping -8388
+▁meat -8389
+▁tact -8390
+ds -8391
+pdf -8392
+▁Price -8393
+▁depends -8394
+▁Atlantic -8395
+▁anyway -8396
+▁thinks -8397
+▁Tam -8398
+▁Third -8399
+,' -8400
+roit -8401
+emb -8402
+▁matches -8403
+▁distributed -8404
+▁CR -8405
+▁cm -8406
+▁legit -8407
+▁lunch -8408
+!-- -8409
+last -8410
+▁seats -8411
+ypes -8412
+sect -8413
+▁breaking -8414
+▁responses -8415
+▁gent -8416
+yr -8417
+▁Industry -8418
+yard -8419
+▁regulation -8420
+▁installation -8421
+▁TR -8422
+▁Roy -8423
+▁Marketing -8424
+range -8425
+▁NEW -8426
+▁cinem -8427
+▁Grow -8428
+▁genu -8429
+▁Nigeria -8430
+▁sections -8431
+▁OS -8432
+▁neck -8433
+▁buff -8434
+tics -8435
+▁podcast -8436
+▁comedy -8437
+ugh -8438
+▁opposed -8439
+▁Gh -8440
+▁vit -8441
+ching -8442
+▁attached -8443
+▁Las -8444
+▁Memorial -8445
+▁$$ -8446
+wind -8447
+▁corresponding -8448
+rition -8449
+config -8450
++\ -8451
+▁Website -8452
+oga -8453
+▁tea -8454
+craft -8455
+sters -8456
+▁tox -8457
+▁stead -8458
+input -8459
+ainer -8460
+ocratic -8461
+▁interact -8462
+▁requests -8463
+▁destination -8464
+▁mac -8465
+▁inspiration -8466
+▁Budd -8467
+▁Square -8468
+▁Front -8469
+being -8470
+▁Naz -8471
+▁Nob -8472
+)} -8473
+anz -8474
+▁Vietnam -8475
+▁Stone -8476
+▁equity -8477
+System -8478
+▁proven -8479
+▁Doctor -8480
+▁Ga -8481
+incoln -8482
+▁guarantee -8483
+▁Regional -8484
+]{ -8485
+ika -8486
+▁apartment -8487
+▁reducing -8488
+▁votes -8489
+▁HD -8490
+▁Diego -8491
+je -8492
+▁centers -8493
+▁grass -8494
+▁Lib -8495
+shire -8496
+▁saved -8497
+▁pleased -8498
+▁absor -8499
+break -8500
+pen -8501
+▁resulted -8502
+▁frag -8503
+▁Process -8504
+▁Ho -8505
+▁Marsh -8506
+▁Revolution -8507
+="../../ -8508
+▁Politics -8509
+▁mg -8510
+▁interaction -8511
+▁decline -8512
+▁Gun -8513
+▁exclud -8514
+▁involves -8515
+▁LLC -8516
+▁Il -8517
+▁Natural -8518
+▁distingu -8519
+She -8520
+ockey -8521
+ASS -8522
+Config -8523
+▁immun -8524
+▁Nature -8525
+▁insight -8526
+chi -8527
+▁demands -8528
+▁Manchester -8529
+Co -8530
+▁Edition -8531
+▁capabilities -8532
+▁jurisd -8533
+▁organic -8534
+Field -8535
+ounded -8536
+▁territory -8537
+▁Training -8538
+▁branch -8539
+thm -8540
+elfare -8541
+▁Mother -8542
+▁Soviet -8543
+uce -8544
+▁kil -8545
+▁immigration -8546
+aya -8547
+▁personnel -8548
+▁brown -8549
+▁evaluation -8550
+lected -8551
+▁Moon -8552
+▁employed -8553
+esh -8554
+edge -8555
+▁Championship -8556
+ailability -8557
+Att -8558
+▁Gil -8559
+▁memories -8560
+▁entity -8561
+▁assistant -8562
+▁pilot -8563
+▁Comment -8564
+▁boot -8565
+▁falling -8566
+cul -8567
+▁Cele -8568
+▁stable -8569
+▁gained -8570
+sq -8571
+▁Drug -8572
+ando -8573
+▁absolute -8574
+▁Ocean -8575
+Index -8576
+space -8577
+axy -8578
+▁interviews -8579
+▁Afghanistan -8580
+Item -8581
+▁vert -8582
+▁sessions -8583
+▁intention -8584
+▁transaction -8585
+▁Entertainment -8586
+▁settlement -8587
+▁Seattle -8588
+▁Peace -8589
+▁explos -8590
+rat -8591
+▁regardless -8592
+UB -8593
+▁Past -8594
+info -8595
+▁Interview -8596
+▁ble -8597
+▁substantial -8598
+▁Sydney -8599
+▁marks -8600
+ée -8601
+▁sugar -8602
+▁channels -8603
+▁Chall -8604
+▁Years -8605
+▁compensation -8606
+▁alert -8607
+▁recover -8608
+▁supplies -8609
+▁Empire -8610
+▁gal -8611
+▁Share -8612
+▁trib -8613
+▁diet -8614
+▁concrete -8615
+▁crazy -8616
+ORE -8617
+▁Lyn -8618
+▁expanded -8619
+▁Rick -8620
+▁Hig -8621
+▁horror -8622
+enders -8623
+▁dating -8624
+ige -8625
+▁targets -8626
+▁forever -8627
+andy -8628
+▁corn -8629
+▁overwhel -8630
+vard -8631
+▁Sex -8632
+iscal -8633
+▁cyber -8634
+▁collective -8635
+▁AC -8636
+▁Sant -8637
+igate -8638
+▁ceremony -8639
+▁rear -8640
+▁dies -8641
+dep -8642
+▁emerging -8643
+▁appointment -8644
+▁rapidly -8645
+▁inject -8646
+▁puts -8647
+max -8648
+▁Anderson -8649
+▁basketball -8650
+▁everybody -8651
+▁tables -8652
+organ -8653
+iffs -8654
+oen -8655
+▁subsequent -8656
+▁relating -8657
+▁behaviour -8658
+sim -8659
+▁Diff -8660
+▁Wisconsin -8661
+▁tasks -8662
+▁opens -8663
+▁soph -8664
+▁Political -8665
+ami -8666
+ди -8667
+eh -8668
+▁shouldn -8669
+Sp -8670
+▁sending -8671
+olly -8672
+▁confident -8673
+▁qualified -8674
+herent -8675
+Hand -8676
+regon -8677
+partial -8678
+ordinary -8679
+▁Ontario -8680
+▁extend -8681
+▁equally -8682
+pay -8683
+▁Mountain -8684
+abil -8685
+▁instructions -8686
+riving -8687
+▁Furthermore -8688
+▁bike -8689
+▁Christians -8690
+roke -8691
+▁essentially -8692
+▁sacr -8693
+▁Sales -8694
+▁Mess -8695
+roller -8696
+nab -8697
+▁Burn -8698
+▁lessons -8699
+rection -8700
+▁acceler -8701
+▁errors -8702
+▁bands -8703
+▁waters -8704
+covery -8705
+▁essay -8706
+TD -8707
+sea -8708
+▁Az -8709
+engers -8710
+PE -8711
+▁destroyed -8712
+▁Silver -8713
+▁absence -8714
+roph -8715
+otted -8716
+▁trials -8717
+▁vulnerable -8718
+▁Tai -8719
+▁Hind -8720
+”) -8721
+▁stages -8722
+▁Additionally -8723
+▁Object -8724
+▁operator -8725
+▁occurs -8726
+rep -8727
+▁refused -8728
+onds -8729
+▁Architect -8730
+▁pace -8731
+▁ax -8732
+inch -8733
+▁awesome -8734
+len -8735
+rett -8736
+▁copyright -8737
+▁crypt -8738
+ULL -8739
+▁hole -8740
+▁milk -8741
+▁HIV -8742
+mo -8743
+eding -8744
+▁universities -8745
+▁inhib -8746
+thur -8747
+▁confront -8748
+node -8749
+▁File -8750
+▁EV -8751
+flix -8752
+▁developers -8753
+ori -8754
+▁stored -8755
+inar -8756
+▁occas -8757
+ictions -8758
+nie -8759
+▁Mit -8760
+▁tournament -8761
+▁scores -8762
+▁stronger -8763
+▁roy -8764
+agen -8765
+▁affili -8766
+▁IV -8767
+▁reveal -8768
+▁focuses -8769
+▁Austin -8770
+▁adequ -8771
+atern -8772
+▁weekly -8773
+▁ruling -8774
+Request -8775
+▁machines -8776
+MC -8777
+Fr -8778
+▁Opp -8779
+Ps -8780
+▁rum -8781
+▁withdraw -8782
+▁Visit -8783
+HT -8784
+Out -8785
+▁Month -8786
+osystem -8787
+ма -8788
+▁bat -8789
+▁episodes -8790
+▁documentary -8791
+▁Schools -8792
+likely -8793
+▁Gene -8794
+essee -8795
+▁contributed -8796
+▁raw -8797
+▁heavily -8798
+▁Talk -8799
+cha -8800
+rait -8801
+▁satell -8802
+▁pit -8803
+first -8804
+▁Integ -8805
+▁encouraged -8806
+▁cha -8807
+elson -8808
+▁tag -8809
+orable -8810
+One -8811
+▁roads -8812
+ão -8813
+▁voted -8814
+▁lifest -8815
+▁Cambridge -8816
+▁thorough -8817
+object -8818
+eland -8819
+▁compare -8820
+▁yellow -8821
+▁Laure -8822
+intend -8823
+asty -8824
+▁struck -8825
+exp -8826
+▁variable -8827
+▁Balt -8828
+▁Limited -8829
+▁premium -8830
+▁ethnic -8831
+▁Youth -8832
+leq -8833
+▁Bridge -8834
+iy -8835
+Info -8836
+▁functional -8837
+▁outdoor -8838
+Table -8839
+▁dreams -8840
+▁initiatives -8841
+▁everywhere -8842
+▁burd -8843
+delta -8844
+▁movements -8845
+erk -8846
+▁boss -8847
+▁discovery -8848
+▁assigned -8849
+▁Gard -8850
+▁Dog -8851
+▁bench -8852
+▁survive -8853
+ME -8854
+▁fabric -8855
+▁survival -8856
+nu -8857
+▁(! -8858
+ools -8859
+OK -8860
+▁uniform -8861
+▁Nether -8862
+Map -8863
+▁Keep -8864
+encing -8865
+▁displ -8866
+▁Morgan -8867
+▁Lincoln -8868
+▁] -8869
+▁announcement -8870
+▁cuts -8871
+odge -8872
+▁Private -8873
+▁Given -8874
+▁addressed -8875
+watch -8876
+//// -8877
+▁Very -8878
+▁representing -8879
+▁Garden -8880
+ulum -8881
+▁Bit -8882
+▁shots -8883
+▁cooperation -8884
+▁residential -8885
+▁transmission -8886
+▁birthday -8887
+ingham -8888
+isms -8889
+▁tort -8890
+▁argue -8891
+▁breath -8892
+GE -8893
+▁radical -8894
+cut -8895
+xiety -8896
+From -8897
+▁loves -8898
+Array -8899
+orted -8900
+▁fewer -8901
+ben -8902
+allel -8903
+▁flying -8904
+▁None -8905
+unicipal -8906
+▁threats -8907
+fire -8908
+bass -8909
+▁Text -8910
+coin -8911
+▁violent -8912
+▁Moore -8913
+dt -8914
+▁Norm -8915
+▁closing -8916
+iatric -8917
+▁trick -8918
+▁fault -8919
+▁preced -8920
+▁Ide -8921
+▁nic -8922
+▁Tok -8923
+▁investments -8924
+▁commonly -8925
+▁officially -8926
+▁sharp -8927
+▁theatre -8928
+▁Allen -8929
+▁legacy -8930
+▁complicated -8931
+▁Pharm -8932
+▁divided -8933
+▁demonstrated -8934
+andal -8935
+▁mm -8936
+▁Hawai -8937
+strong -8938
+▁retirement -8939
+ía -8940
+▁Bow -8941
+▁meets -8942
+▁antib -8943
+▁Labour -8944
+ouch -8945
+▁ma -8946
+▁spr -8947
+▁integration -8948
+▁raising -8949
+Reg -8950
+eli -8951
+aceut -8952
+quot -8953
+imp -8954
+onymous -8955
+IST -8956
+asant -8957
+▁Bruce -8958
+▁Drag -8959
+▁FL -8960
+▁hosted -8961
+▁administrative -8962
+▁overse -8963
+▁PhD -8964
+▁breast -8965
+don -8966
+▁aimed -8967
+▁estimates -8968
+has -8969
+▁ele -8970
+▁jun -8971
+▁Atlanta -8972
+▁programming -8973
+pling -8974
+▁Similar -8975
+▁Oregon -8976
+▁captured -8977
+▁portfolio -8978
+emed -8979
+▁Covid -8980
+lessly -8981
+▁Movie -8982
+orship -8983
+ustain -8984
+▁Rub -8985
+▁armed -8986
+Gener -8987
+yes -8988
+▁embod -8989
+HL -8990
+holder -8991
+▁demonstrate -8992
+▁ко -8993
+..... -8994
+▁pushed -8995
+▁resid -8996
+create -8997
+▁Hart -8998
+▁grounds -8999
+tery -9000
+▁Korean -9001
+▁Planning -9002
+▁generations -9003
+stal -9004
+ми -9005
+▁`` -9006
+▁prev -9007
+▁Dor -9008
+ologists -9009
+▁expenses -9010
+icking -9011
+▁Dallas -9012
+ERE -9013
+▁margin -9014
+▁Dise -9015
+▁dust -9016
+.- -9017
+▁Would -9018
+security -9019
+▁ownership -9020
+▁Nation -9021
+umps -9022
+apache -9023
+▁transformation -9024
+▁Bureau -9025
+▁Ott -9026
+fraid -9027
+▁sevent -9028
+▁agenda -9029
+▁cream -9030
+summary -9031
+oyd -9032
+▁Sport -9033
+▁Barn -9034
+▁Prim -9035
+▁BC -9036
+▁incredibly -9037
+▁tomorrow -9038
+▁Vegas -9039
+eron -9040
+▁seed -9041
+▁bread -9042
+▁salt -9043
+▁IM -9044
+van -9045
+iate -9046
+▁denied -9047
+▁NO -9048
+▁defendant -9049
+enter -9050
+imens -9051
+▁Chairman -9052
+▁afraid -9053
+▁requested -9054
+▁normally -9055
+define -9056
+▁genre -9057
+▁Weekly -9058
+▁baseball -9059
+▁Captain -9060
+aga -9061
+▁Leave -9062
+ouri -9063
+▁knock -9064
+▁temporary -9065
+▁chat -9066
+▁somehow -9067
+▁beer -9068
+▁blocks -9069
+▁Military -9070
+▁mask -9071
+▁organisation -9072
+Size -9073
+▁champion -9074
+▁Opt -9075
+season -9076
+▁drinking -9077
+▁Pur -9078
+pled -9079
+▁une -9080
+▁maintained -9081
+$- -9082
+google -9083
+asm -9084
+▁decent -9085
+▁supporters -9086
+itative -9087
+▁Tro -9088
+▁falls -9089
+UE -9090
+ader -9091
+oca -9092
+ibl -9093
+▁singing -9094
+▁а -9095
+iger -9096
+▁explicit -9097
+▁€ -9098
+▁Obs -9099
+Code -9100
+▁ultimate -9101
+▁Dub -9102
+▁Indeed -9103
+▁rating -9104
+▁statistics -9105
+▁PL -9106
+iology -9107
+▁hadn -9108
+▁Bes -9109
+▁Agric -9110
+Inter -9111
+▁disaster -9112
+▁Turk -9113
+ista -9114
+▁wra -9115
+▁Battle -9116
+ushing -9117
+▁sensitive -9118
+▁Kennedy -9119
+▁furniture -9120
+chers -9121
+▁Hur -9122
+urches -9123
+aware -9124
+tag -9125
+▁Winter -9126
+▁pul -9127
+ifting -9128
+afe -9129
+▁pleasure -9130
+lib -9131
+▁Dean -9132
+▁liability -9133
+ateful -9134
+▁smile -9135
+▁Detroit -9136
+▁Deb -9137
+▁murd -9138
+▁Christopher -9139
+▁Py -9140
+angel -9141
+iere -9142
+▁peak -9143
+▁till -9144
+▁CB -9145
+▁targeted -9146
+▁segment -9147
+▁Could -9148
+▁mining -9149
+▁Move -9150
+▁Pitt -9151
+▁publicly -9152
+▁tank -9153
+▁SD -9154
+▁stuck -9155
+▁Ever -9156
+▁Tennessee -9157
+▁Nich -9158
+▁Available -9159
+▁Former -9160
+akh -9161
+▁fro -9162
+▁Assistant -9163
+▁unusual -9164
+▁acquisition -9165
+▁resident -9166
+▁employer -9167
+▁Iowa -9168
+▁tap -9169
+olk -9170
+▁losses -9171
+▁Episode -9172
+▁personality -9173
+▁studying -9174
+▁sculpt -9175
+▁Smart -9176
+▁politicians -9177
+▁rely -9178
+▁Record -9179
+abama -9180
+di -9181
+▁estimate -9182
+▁CF -9183
+▁reaching -9184
+▁deleg -9185
+▁brothers -9186
+▁Netherlands -9187
+▁explanation -9188
+▁everyday -9189
+▁elsewhere -9190
+▁thin -9191
+ois -9192
+▁Cra -9193
+▁Ped -9194
+▁pump -9195
+▁Paper -9196
+▁Dur -9197
+▁Come -9198
+▁liberal -9199
+▁centuries -9200
+▁Mayor -9201
+▁Title -9202
+▁gate -9203
+▁Indones -9204
+▁worker -9205
+quarters -9206
+▁prominent -9207
+▁electron -9208
+proof -9209
+▁organisations -9210
+▁True -9211
+▁Store -9212
+▁Hal -9213
+▁birds -9214
+orce -9215
+search -9216
+▁luxury -9217
+▁Drive -9218
+▁flash -9219
+▁Advent -9220
+TP -9221
+▁theore -9222
+▁poetry -9223
+▁clothes -9224
+▁Beaut -9225
+▁Pu -9226
+GBT -9227
+▁Prize -9228
+aud -9229
+orph -9230
+▁favourite -9231
+ifted -9232
+▁entering -9233
+▁golf -9234
+▁Walker -9235
+orig -9236
+▁Wolf -9237
+▁tone -9238
+▁Jacob -9239
+▁wet -9240
+flow -9241
+▁Ec -9242
+▁concepts -9243
+inth -9244
+▁Mars -9245
+▁conservative -9246
+▁alter -9247
+Do -9248
+▁java -9249
+▁engaging -9250
+▁tab -9251
+iors -9252
+month -9253
+▁Less -9254
+▁Help -9255
+winning -9256
+▁Pod -9257
+▁analyz -9258
+▁lists -9259
+erver -9260
+▁overs -9261
+erd -9262
+▁discussions -9263
+▁Jonathan -9264
+▁composition -9265
+▁gang -9266
+▁hospitals -9267
+inf -9268
+▁sheet -9269
+▁Cloud -9270
+idays -9271
+▁за -9272
+uther -9273
+Count -9274
+▁Clark -9275
+▁handling -9276
+athered -9277
+istical -9278
+▁Olympic -9279
+▁horiz -9280
+▁intervention -9281
+▁recall -9282
+▁asks -9283
+▁Standard -9284
+▁Howard -9285
+▁density -9286
+▁lovely -9287
+▁hook -9288
+▁Beat -9289
+▁directors -9290
+aria -9291
+▁Consult -9292
+▁versus -9293
+▁analyst -9294
+▁luck -9295
+▁asset -9296
+aceutical -9297
+▁heritage -9298
+▁complaint -9299
+TY -9300
+▁diplom -9301
+▁Om -9302
+▁Collect -9303
+ordon -9304
+▁cable -9305
+▁Hop -9306
+▁Own -9307
+▁permit -9308
+▁concentration -9309
+▁ships -9310
+supp -9311
+doc -9312
+rape -9313
+▁Maryland -9314
+▁Looking -9315
+▁reven -9316
+▁AF -9317
+▁Hous -9318
+▁Marine -9319
+▁jack -9320
+▁Jess -9321
+▁Belg -9322
+▁designer -9323
+▁DVD -9324
+▁Performance -9325
+▁Gab -9326
+▁Netflix -9327
+uten -9328
+▁innoc -9329
+▁supplement -9330
+▁Bh -9331
+▁(@ -9332
+▁toler -9333
+▁Pier -9334
+▁factory -9335
+achment -9336
+▁src -9337
+▁minim -9338
+▁copies -9339
+▁applies -9340
+▁Forum -9341
+▁Vin -9342
+▁(" -9343
+▁desper -9344
+isl -9345
+bles -9346
+▁Malays -9347
+▁focusing -9348
+▁Software -9349
+▁aged -9350
+▁Hamilton -9351
+▁dialogue -9352
+▁ordinary -9353
+▁Archives -9354
+cs -9355
+▁FBI -9356
+▁Alan -9357
+▁Reviews -9358
+▁reveals -9359
+▁Osc -9360
+▁retired -9361
+▁Beck -9362
+▁involvement -9363
+▁introduce -9364
+▁FA -9365
+upid -9366
+▁summary -9367
+▁unexpected -9368
+Rep -9369
+▁humanity -9370
+▁semi -9371
+imm -9372
+▁Oil -9373
+▁hang -9374
+agger -9375
+▁CP -9376
+mates -9377
+▁replacement -9378
+▁finger -9379
+▁disag -9380
+▁downtown -9381
+▁() -9382
+▁Portug -9383
+▁actress -9384
+▁anymore -9385
+▁Buff -9386
+▁Core -9387
+▁photographs -9388
+▁Mind -9389
+▁Dutch -9390
+▁Try -9391
+ultane -9392
+▁petition -9393
+HA -9394
+▁Exam -9395
+▁bid -9396
+ateral -9397
+track -9398
+hr -9399
+▁headed -9400
+▁impacts -9401
+anted -9402
+▁resort -9403
+words -9404
+▁cleaning -9405
+▁prosecut -9406
+▁trem -9407
+high -9408
+▁dict -9409
+ILL -9410
+▁interpretation -9411
+▁Maria -9412
+▁unlikely -9413
+▁concluded -9414
+▁Gall -9415
+▁Trib -9416
+▁para -9417
+▁AG -9418
+▁Insurance -9419
+haust -9420
+▁Raj -9421
+etch -9422
+▁Events -9423
+abetes -9424
+▁Crim -9425
+▁Exchange -9426
+▁ensuring -9427
+▁Had -9428
+▁maps -9429
+▁chronic -9430
+▁jew -9431
+gt -9432
+▁Tagged -9433
+▁NBA -9434
+▁weapon -9435
+▁argued -9436
+▁Tit -9437
+ITY -9438
+▁К -9439
+▁dad -9440
+▁tur -9441
+ounce -9442
+ocolate -9443
+▁periods -9444
+looking -9445
+▁preparation -9446
+▁gaming -9447
+▁q -9448
+▁superior -9449
+aved -9450
+▁genetic -9451
+▁managers -9452
+▁Material -9453
+▁deeper -9454
+▁journalist -9455
+▁lawyers -9456
+pes -9457
+▁Cho -9458
+▁continuous -9459
+atar -9460
+ikes -9461
+default -9462
+▁User -9463
+▁arrive -9464
+onia -9465
+▁addresses -9466
+▁developments -9467
+rea -9468
+▁vocal -9469
+INE -9470
+▁template -9471
+▁roots -9472
+▁Islands -9473
+Trans -9474
+▁comprom -9475
+▁loud -9476
+▁:= -9477
+▁forecast -9478
+▁electrical -9479
+▁instruments -9480
+only -9481
+ipt -9482
+▁manip -9483
+▁mistake -9484
+▁OK -9485
+▁Everything -9486
+blog -9487
+▁hardware -9488
+▁respective -9489
+▁McK -9490
+amps -9491
+▁saving -9492
+▁volunteers -9493
+Build -9494
+dem -9495
+▁hack -9496
+ourse -9497
+kind -9498
+site -9499
+▁bass -9500
+="+ -9501
+Query -9502
+who -9503
+graduate -9504
+bott -9505
+theta -9506
+▁establishment -9507
+▁opinions -9508
+▁ren -9509
+▁Self -9510
+▁investigate -9511
+▁Ath -9512
+▁substance -9513
+writer -9514
+▁province -9515
+]) -9516
+▁indu -9517
+че -9518
+▁extends -9519
+▁displayed -9520
+▁Ka -9521
+▁fishing -9522
+▁--> -9523
+bourne -9524
+▁pour -9525
+cr -9526
+▁viewed -9527
+isdom -9528
+▁transactions -9529
+▁Email -9530
+▁myster -9531
+▁franchise -9532
+orous -9533
+▁loans -9534
+oker -9535
+▁lucky -9536
+▁Alabama -9537
+▁Alf -9538
+▁Total -9539
+▁dining -9540
+▁doi -9541
+▁Han -9542
+▁spark -9543
+▁Viol -9544
+!!! -9545
+esis -9546
+hol -9547
+▁Andy -9548
+▁inches -9549
+atin -9550
+eling -9551
+▁highlights -9552
+▁Mission -9553
+▁Ident -9554
+▁uncle -9555
+▁usage -9556
+▁Kath -9557
+uf -9558
+ricts -9559
+adesh -9560
+inction -9561
+▁Tick -9562
+▁harder -9563
+enz -9564
+▁underlying -9565
+▁Terms -9566
+▁flowers -9567
+eras -9568
+▁Ban -9569
+▁Several -9570
+Dis -9571
+dam -9572
+▁obl -9573
+▁satisfact -9574
+▁modified -9575
+▁Freedom -9576
+▁abroad -9577
+▁representatives -9578
+aki -9579
+▁enterprise -9580
+▁indicates -9581
+jpg -9582
+aning -9583
+Oh -9584
+quiry -9585
+▁quote -9586
+▁conservation -9587
+▁printed -9588
+DE -9589
+▁stability -9590
+▁promoting -9591
+▁wake -9592
+ete -9593
+iders -9594
+▁Economics -9595
+▁Update -9596
+▁charity -9597
+imore -9598
+tic -9599
+▁offense -9600
+▁remarkable -9601
+▁populations -9602
+▁Communications -9603
+Log -9604
+wp -9605
+Equ -9606
+▁vend -9607
+KE -9608
+▁pursue -9609
+▁lad -9610
+▁fest -9611
+▁loyal -9612
+▁Indiana -9613
+▁meal -9614
+▁clubs -9615
+▁Path -9616
+ста -9617
+▁BY -9618
+eqref -9619
+▁themes -9620
+▁Phill -9621
+▁ingredients -9622
+▁ -26300
+▁boutique -26301
+▁staffing -26302
+▁spokeswoman -26303
+▁drafting -26304
+UGH -26305
+owns -26306
+~~~~ -26307
+chart -26308
+▁Levy -26309
+▁boil -26310
+▁wonderfully -26311
+▁clo -26312
+iston -26313
+▁Bere -26314
+▁mRNA -26315
+▁Barber -26316
+▁inaccurate -26317
+hp -26318
+▁Stack -26319
+▁Offers -26320
+▁Willis -26321
+▁parte -26322
+▁shouting -26323
+▁Ming -26324
+▁insulation -26325
+esque -26326
+.__ -26327
+etc -26328
+▁Glou -26329
+sid -26330
+▁salv -26331
+Delete -26332
+▁frightening -26333
+▁nationalist -26334
+zip -26335
+▁proclaim -26336
+▁weighted -26337
+▁detecting -26338
+sha -26339
+▁unpaid -26340
+▁Factors -26341
+obin -26342
+▁PAC -26343
+▁generators -26344
+▁LT -26345
+▁AFC -26346
+native -26347
+▁cathedral -26348
+▁Presentation -26349
+▁Fors -26350
+▁burnt -26351
+▁enzymes -26352
+▁malaria -26353
+hetical -26354
+▁Attend -26355
+▁wallpaper -26356
+▁identifier -26357
+lb -26358
+▁wag -26359
+▁Lance -26360
+▁Mixed -26361
+▁Rafael -26362
+▁chopped -26363
+▁affinity -26364
+▁Suppose -26365
+▁Portrait -26366
+▁constructing -26367
+▁fing -26368
+ethoven -26369
+▁expresses -26370
+▁geometric -26371
+▁laboratories -26372
+func -26373
+poke -26374
+▁Sikh -26375
+▁skiing -26376
+▁Boulder -26377
+▁Fee -26378
+itles -26379
+▁Sail -26380
+▁cafe -26381
+▁filtering -26382
+▁wheelchair -26383
+rising -26384
+sports -26385
+▁greens -26386
+▁forensic -26387
+▁Sout -26388
+▁Rouge -26389
+▁Secrets -26390
+▁circuits -26391
+▁marketers -26392
+▁premiered -26393
+▁receivers -26394
+▁emphasizes -26395
+abl -26396
+utan -26397
+▁CDs -26398
+still -26399
+▁sigh -26400
+▁Costs -26401
+▁deferred -26402
+brain -26403
+▁Butt -26404
+▁Viking -26405
+▁impressions -26406
+▁upwards -26407
+enum -26408
+▁Tunisia -26409
+▁labeling -26410
+▁artillery -26411
+▁expansive -26412
+▁progressed -26413
+▁disappearance -26414
+üh -26415
+Send -26416
+▁forged -26417
+▁pillars -26418
+▁publishes -26419
+untary -26420
+▁Older -26421
+rapeutics -26422
+▁repertoire -26423
+▁Conservatives -26424
+ILITY -26425
+agara -26426
+Selector -26427
+ego -26428
+hamed -26429
+▁Freddie -26430
+▁encompasses -26431
+▁tuber -26432
+▁cheating -26433
+▁lecturer -26434
+▁HV -26435
+rosse -26436
+▁Mama -26437
+▁Patel -26438
+▁parcel -26439
+ractions -26440
+▁remarked -26441
+▁Hospitals -26442
+Master -26443
+ustomed -26444
+▁Plains -26445
+▁liquor -26446
+▁seizure -26447
+▁beginnings -26448
+▁Bav -26449
+Micro -26450
+▁offspring -26451
+▁aesthetics -26452
+▁Highly -26453
+▁ethn -26454
+▁intervene -26455
+prep -26456
+tube -26457
+▁calculating -26458
+cie -26459
+▁ghosts -26460
+▁tactic -26461
+▁distracted -26462
+кра -26463
+▁feminine -26464
+++; -26465
+neq -26466
+▁headache -26467
+▁inspires -26468
+▁heightened -26469
+▁altar -26470
+▁Fixed -26471
+▁adjud -26472
+▁Jerome -26473
+▁Parking -26474
+▁Augustine -26475
+▁systematically -26476
+ICO -26477
+zan -26478
+▁Trudeau -26479
+ENG -26480
+▁Freud -26481
+▁Vince -26482
+vering -26483
+▁SELECT -26484
+▁Monetary -26485
+circle -26486
+▁metic -26487
+▁insider -26488
+▁DOM -26489
+▁Lebanese -26490
+Area -26491
+amba -26492
+Washington -26493
+&& -26494
+▁Hed -26495
+▁Difference -26496
+)}$ -26497
+▁Graphic -26498
+Tex -26499
+gars -26500
+▁Killer -26501
+buf -26502
+mom -26503
+▁thee -26504
+▁Oliv -26505
+testing -26506
+▁dosage -26507
+▁nickel -26508
+▁shootings -26509
+flat -26510
+▁Accounts -26511
+ная -26512
+liner -26513
+▁invari -26514
+▁Cynthia -26515
+▁granite -26516
+▁limb -26517
+embrance -26518
+▁Recognition -26519
+ван -26520
+▁peas -26521
+▁Turning -26522
+_* -26523
+▁Hawk -26524
+▁Rocket -26525
+thinking -26526
+▁Defendant -26527
+▁Gap -26528
+▁Lies -26529
+▁leaning -26530
+linewidth -26531
+▁tolerate -26532
+▁akin -26533
+planes -26534
+ifornia -26535
+ajax -26536
+▁cone -26537
+lace -26538
+▁Tin -26539
+iaison -26540
+▁Stress -26541
+гу -26542
+stri -26543
+▁complexes -26544
+▁lucrative -26545
+▁nem -26546
+enburg -26547
+▁Bosnia -26548
+▁anthem -26549
+▁Millions -26550
+▁carriage -26551
+▁DL -26552
+Have -26553
+blank -26554
+▁genus -26555
+ynchron -26556
+▁Danielle -26557
+▁disposition -26558
+▁enhances -26559
+management -26560
+▁Aviv -26561
+▁Brendan -26562
+▁duplicate -26563
+Sym -26564
+▁си -26565
+heed -26566
+▁NET -26567
+▁ensl -26568
+▁Hearts -26569
+▁cruelty -26570
+▁Byzantine -26571
+▁Kerala -26572
+▁Lambert -26573
+▁Bangalore -26574
+moil -26575
+▁[]; -26576
+themed -26577
+▁BAS -26578
+▁vow -26579
+▁Orth -26580
+▁Forms -26581
+▁cousins -26582
+▁spectra -26583
+▁Pole -26584
+Muslim -26585
+asonic -26586
+▁SpaceX -26587
+▁leaned -26588
+▁Inner -26589
+▁Memor -26590
+▁lamb -26591
+▁workflow -26592
+▁computation -26593
+▁pads -26594
+paragraph -26595
+trl -26596
+Maybe -26597
+▁plat -26598
+liness -26599
+▁Humph -26600
+▁swear -26601
+▁MLS -26602
+▁Voc -26603
+toggle -26604
+▁hasht -26605
+itchens -26606
+▁passions -26607
+▁Wikimedia -26608
+guide -26609
+▁flown -26610
+▁Ranger -26611
+▁fue -26612
+▁tailor -26613
+Lim -26614
+▁Moody -26615
+▁locom -26616
+umerable -26617
+▁metallic -26618
+VERS -26619
+centered -26620
+.** -26621
+▁CBC -26622
+▁Gloria -26623
+▁rim -26624
+▁hacking -26625
+inki -26626
+omez -26627
+▁versa -26628
+▁appellant -26629
+▁Gle -26630
+▁soda -26631
+culosis -26632
+▁python -26633
+▁tabs -26634
+inatory -26635
+▁blades -26636
+Cast -26637
+▁minors -26638
+▁grandson -26639
+▁Wrestling -26640
+▁segregation -26641
+eas -26642
+▁Nolan -26643
+▁chapel -26644
+▁Daughter -26645
+▁figuring -26646
+SET -26647
+Cond -26648
+▁Casa -26649
+NavBar -26650
+ropract -26651
+▁dances -26652
+▁lol -26653
+▁Disclosure -26654
+▁Isaiah -26655
+▁Calling -26656
+▁insightful -26657
+IDENT -26658
+▁Cull -26659
+▁Doors -26660
+▁Thames -26661
+▁paralle -26662
+hig -26663
+Dist -26664
+▁Zoe -26665
+ensely -26666
+▁Raven -26667
+created -26668
+▁Peoples -26669
+▁Suffolk -26670
+▁Naturally -26671
+irrel -26672
+▁Borg -26673
+▁Robb -26674
+aspoon -26675
+▁vascular -26676
+▁WORLD -26677
+Donnell -26678
+▁tornado -26679
+▁stewards -26680
+odynamic -26681
+▁LIM -26682
+queue -26683
+▁grape -26684
+▁supra -26685
+▁mandates -26686
+▁Headquarters -26687
+!' -26688
+▁Ivy -26689
+olving -26690
+anye -26691
+luent -26692
+▁buys -26693
+▁saga -26694
+▁OTHER -26695
+urai -26696
+▁collateral -26697
+▁Eg -26698
+School -26699
+▁atroc -26700
+▁vastly -26701
+▁Broncos -26702
+▁balances -26703
+onge -26704
+▁Abe -26705
+sites -26706
+▁Hert -26707
+▁году -26708
+▁Laurie -26709
+▁multif -26710
+▁typing -26711
+▁feminism -26712
+MIN -26713
+Tab -26714
+▁Vanessa -26715
+(* -26716
+amen -26717
+▁GmbH -26718
+▁banning -26719
+▁prud -26720
+▁Reven -26721
+▁Roberto -26722
+▁notation -26723
+))) -26724
+▁Hole -26725
+character -26726
+▁Pharmacy -26727
+▁YES -26728
+ucket -26729
+▁textile -26730
+▁Somerset -26731
+alogy -26732
+▁themed -26733
+▁weddings -26734
+▁neighbouring -26735
+▁endpoint -26736
+▁Shap -26737
+Player -26738
+▁promo -26739
+Help -26740
+helm -26741
+iour -26742
+▁novelist -26743
+▁Carpenter -26744
+▁retrieved -26745
+▁MUS -26746
+▁Manitoba -26747
+▁evenings -26748
+orb -26749
+▁ante -26750
+▁Kurdish -26751
+▁lithium -26752
+▁upstairs -26753
+fb -26754
+ции -26755
+▁Cory -26756
+▁Bulld -26757
+springframework -26758
+▁Tac -26759
+▁hone -26760
+Stand -26761
+imates -26762
+▁Vehicles -26763
+▁filtered -26764
+▁Implementation -26765
+▁Trav -26766
+▁fetch -26767
+▁gastro -26768
+▁Panther -26769
+▁Satellite -26770
+▁heck -26771
+▁hull -26772
+▁Hunger -26773
+▁asympt -26774
+▁regain -26775
+subscribe -26776
+▁Ib -26777
+▁Nach -26778
+▁Quint -26779
+raising -26780
+▁elites -26781
+parameter -26782
+fts -26783
+nor -26784
+▁Sophia -26785
+protobuf -26786
+▁spells -26787
+▁fireworks -26788
+▁Builder -26789
+ulo -26790
+heng -26791
+andel -26792
+stadt -26793
+▁bloggers -26794
+▁ventilation -26795
+keepers -26796
+▁monkey -26797
+environment -26798
+Bean -26799
+▁grac -26800
+▁pedag -26801
+▁disadvantage -26802
+▁Dancing -26803
+heard -26804
+undai -26805
+▁Idea -26806
+angled -26807
+icient -26808
+▁Pixel -26809
+▁Schwar -26810
+▁waking -26811
+▁genetically -26812
+▁john -26813
+needed -26814
+▁dynasty -26815
+▁Rehabilitation -26816
+▁<- -26817
+▁Gad -26818
+▁Gos -26819
+▁для -26820
+actor -26821
+▁Revere -26822
+▁crunch -26823
+▁portrayal -26824
+major -26825
+▁forfe -26826
+▁indicative -26827
+▁terminology -26828
+olver -26829
+encers -26830
+python -26831
+▁dealership -26832
+▁slam -26833
+▁rebuilt -26834
+▁stresses -26835
+▁unbelievable -26836
+Reuters -26837
+▁Output -26838
+optional -26839
+▁builders -26840
+▁validated -26841
+▁Increasing -26842
+▁rebuilding -26843
+arset -26844
+ractive -26845
+theorem -26846
+▁Katrina -26847
+would -26848
+▁Irvine -26849
+▁Café -26850
+▁quiz -26851
+▁Morton -26852
+▁showcasing -26853
+zig -26854
+▁clan -26855
+▁Crypto -26856
+▁pseudo -26857
+▁Logistics -26858
+opin -26859
+▁lust -26860
+▁rugged -26861
+▁teaspoon -26862
+bilt -26863
+▁Rip -26864
+▁Deer -26865
+▁artery -26866
+▁PF -26867
+▁$(" -26868
+Chief -26869
+umann -26870
+▁Indo -26871
+ourced -26872
+▁poles -26873
+▁Roland -26874
+▁sickness -26875
+▁Preservation -26876
+.“ -26877
+▁ш -26878
+heit -26879
+▁bans -26880
+▁herd -26881
+amacare -26882
+cke -26883
+▁nour -26884
+▁frown -26885
+▁jihad -26886
+▁rationale -26887
+▁Opposition -26888
+▁congestion -26889
+сле -26890
+▁VS -26891
+▁Dir -26892
+Frank -26893
+▁Quin -26894
+▁dazz -26895
+▁Pregn -26896
+▁supermarket -26897
+ANN -26898
+▁HAVE -26899
+▁Residence -26900
+▁Successful -26901
+▁Governments -26902
+▁retrospective -26903
+▁plum -26904
+▁Penguin -26905
+▁Electoral -26906
+▁Psychiatry -26907
+▁plaque -26908
+▁spirituality -26909
+ха -26910
+hyth -26911
+Program -26912
+▁rabbit -26913
+▁reopen -26914
+▁dreamed -26915
+zyn -26916
+▁Lund -26917
+▁Mats -26918
+▁crus -26919
+▁raped -26920
+▁stubborn -26921
+▁Login -26922
+▁Midnight -26923
+controller -26924
+▁Attorneys -26925
+MY -26926
+fred -26927
+Debug -26928
+eners -26929
+▁Signed -26930
+▁clicks -26931
+▁Merchant -26932
+ounty -26933
+▁beers -26934
+▁sourced -26935
+▁afterward -26936
+Site -26937
+Plugin -26938
+▁Judges -26939
+▁payload -26940
+punk -26941
+▁clone -26942
+▁frost -26943
+▁highs -26944
+▁Schneider -26945
+▁CITY -26946
+▁Qaeda -26947
+▁revital -26948
+▁advertised -26949
+▁Hyde -26950
+▁quint -26951
+▁Cunning -26952
+Statement -26953
+▁vulnerabilities -26954
+▁"' -26955
+▁Nem -26956
+geons -26957
+remlin -26958
+▁fleeing -26959
+▁wandering -26960
+▁substantive -26961
+▁penal -26962
+▁Enough -26963
+▁Romantic -26964
+▁Slovenia -26965
+▁Identification -26966
+gre -26967
+Panel -26968
+▁semif -26969
+▁ranged -26970
+▁folding -26971
+▁hectares -26972
+▁Cody -26973
+arious -26974
+▁Byr -26975
+▁Gin -26976
+ushima -26977
+▁evenly -26978
+▁Lifestyle -26979
+stre -26980
+▁intox -26981
+▁Vector -26982
+▁curved -26983
+▁RELATED -26984
+▁Survival -26985
+▁restrictive -26986
+▁conveniently -26987
+▁Psychological -26988
+▁JC -26989
+▁xmlns -26990
+▁utmost -26991
+▁NIH -26992
+▁Wei -26993
+phthal -26994
+▁Gothic -26995
+▁polite -26996
+▁Courtney -26997
+▁scattering -26998
+ème -26999
+hene -27000
+driver -27001
+▁excel -27002
+▁brokers -27003
+▁durability -27004
+▁circumstance -27005
+sell -27006
+▁Oman -27007
+▁gems -27008
+▁Bella -27009
+▁Perspectives -27010
+▁Bien -27011
+▁Kens -27012
+▁Warrior -27013
+▁resh -27014
+usement -27015
+▁daring -27016
+▁Registry -27017
+▁Brotherhood -27018
+Feed -27019
+▁Charge -27020
+▁deduction -27021
+▁bios -27022
+formerly -27023
+▁Firstly -27024
+tte -27025
+▁DUI -27026
+chemical -27027
+Dest -27028
+▁unexpectedly -27029
+▁Randall -27030
+▁Volkswagen -27031
+▁fulfillment -27032
+▁placeholder -27033
+▁retains -27034
+▁statues -27035
+▁conflicting -27036
+=- -27037
+▁knot -27038
+▁REVIEW -27039
+▁analyse -27040
+▁Hartford -27041
+▁Liability -27042
+!), -27043
+▁ga -27044
+ducers -27045
+▁policymakers -27046
+opian -27047
+▁veto -27048
+blance -27049
+▁FIRST -27050
+▁Julius -27051
+▁Eleanor -27052
+▁striker -27053
+▁alarming -27054
+rique -27055
+▁Sunset -27056
+▁Krishna -27057
+umberland -27058
+▁continents -27059
+▁hus -27060
+▁bacon -27061
+Results -27062
+onential -27063
+odd -27064
+▁Crack -27065
+▁Wolver -27066
+▁clutch -27067
+▁Perkins -27068
+▁haunting -27069
+▁disclaim -27070
+▁organism -27071
+------------ -27072
+▁marched -27073
+tsd -27074
+arna -27075
+esthesia -27076
+▁Cleaning -27077
+▁Starbucks -27078
+▁cultivation -27079
+Rich -27080
+deen -27081
+▁degr -27082
+▁Proud -27083
+▁Deadline -27084
+▁permitting -27085
+▁Nile -27086
+▁sandwic -27087
+▁highways -27088
+▁injected -27089
+▁amplitude -27090
+▁blunt -27091
+▁fabrics -27092
+▁Chess -27093
+▁suites -27094
+▁Clint -27095
+▁Auction -27096
+Center -27097
+▁bosses -27098
+▁Exploring -27099
+inflammatory -27100
+Crit -27101
+LAND -27102
+Member -27103
+▁Clerk -27104
+ivation -27105
+▁ironic -27106
+▁upheld -27107
+▁enlisted -27108
+▁aggressively -27109
+▁lays -27110
+▁schooling -27111
+ibus -27112
+▁Resil -27113
+▁hurry -27114
+▁nause -27115
+loat -27116
+▁lieu -27117
+▁buddy -27118
+Channel -27119
+itating -27120
+▁digits -27121
+▁Maximum -27122
+▁shrink -27123
+▁procedural -27124
+▁riots -27125
+▁eviden -27126
+▁Napoleon -27127
+▁replicate -27128
+UTF -27129
+}-\ -27130
+Sports -27131
+▁lyric -27132
+▁rests -27133
+▁Incred -27134
+▁Rivera -27135
+▁Churches -27136
+tu -27137
+ANA -27138
+▁Iz -27139
+ETHOD -27140
+▁Sart -27141
+▁Lennon -27142
+▁Observer -27143
+▁inhibition -27144
+▁Acquisition -27145
+▁preaching -27146
+▁Peer -27147
+▁alley -27148
+▁dy -27149
+enta -27150
+ophys -27151
+▁Harley -27152
+▁Hos -27153
+▁spice -27154
+▁lattice -27155
+kon -27156
+▁tossed -27157
+▁Bridges -27158
+▁Divorce -27159
+▁Sunshine -27160
+▁cosmetic -27161
+meg -27162
+▁socks -27163
+atorium -27164
+largest -27165
+requency -27166
+hh -27167
+entity -27168
+▁Boost -27169
+▁floral -27170
+▁Lindsey -27171
+▁isolate -27172
+▁\( -27173
+▁Combined -27174
+▁proclaimed -27175
+typ -27176
+Take -27177
+▁zum -27178
+▁teamed -27179
+▁Seminar -27180
+▁premiums -27181
+tm -27182
+▁Eb -27183
+▁kay -27184
+▁idiot -27185
+▁unpleasant -27186
+▁FD -27187
+▁Lounge -27188
+▁conjug -27189
+▁Pioneer -27190
+▁differentiate -27191
+▁accomplishment -27192
+IPS -27193
+}}_ -27194
+their -27195
+inders -27196
+sponsored -27197
+▁Perspective -27198
+бу -27199
+Scroll -27200
+oil -27201
+▁Cham -27202
+▁authored -27203
+▁wer -27204
+Print -27205
+▁Beethoven -27206
+▁tray -27207
+icidal -27208
+▁blows -27209
+▁mentors -27210
+Series -27211
+▁vapor -27212
+▁friendships -27213
+▁FI -27214
+enden -27215
+▁Disp -27216
+jected -27217
+orting -27218
+▁terra -27219
+▁FX -27220
+incre -27221
+▁cakes -27222
+▁Extreme -27223
+▁Wick -27224
+▁hemp -27225
+inently -27226
+▁oneself -27227
+▁defenses -27228
+▁insecurity -27229
+▁chin -27230
+alytic -27231
+▁Symbol -27232
+▁Ibrahim -27233
+▁Enterprises -27234
+▁subdivision -27235
+▁Orient -27236
+hythms -27237
+▁cursor -27238
+▁radically -27239
+mia -27240
+▁reprint -27241
+▁Instrument -27242
+▁amidst -27243
+negative -27244
+arf -27245
+bish -27246
+▁Dru -27247
+▁Canter -27248
+racellular -27249
+leigh -27250
+Second -27251
+▁Franç -27252
+▁undis -27253
+▁monastery -27254
+▁Collaboration -27255
+Div -27256
+▁Kub -27257
+▁tracing -27258
+regulation -27259
+▁KY -27260
+▁BRO -27261
+▁tensor -27262
+▁brighter -27263
+▁homosexuality -27264
+IPT -27265
+▁Elon -27266
+▁Guns -27267
+▁heir -27268
+erential -27269
+▁advisers -27270
+NN -27271
+ца -27272
+▁од -27273
+ller -27274
+▁Indie -27275
+|^ -27276
+aleb -27277
+▁Forget -27278
+▁Sculpt -27279
+▁oversees -27280
+▁assistants -27281
+▁commenting -27282
+▁MAY -27283
+vered -27284
+▁adjustable -27285
+▁Instruction -27286
+▁evaluations -27287
+FIG -27288
+▁Compan -27289
+▁miserable -27290
+▁Tus -27291
+▁lender -27292
+▁plastics -27293
+▁vocalist -27294
+▁Lunch -27295
+drive -27296
+▁Mits -27297
+▁hurts -27298
+▁onions -27299
+▁paused -27300
+▁Figures -27301
+▁timeout -27302
+▁fracture -27303
+▁seamlessly -27304
+▁Rene -27305
+▁fren -27306
+▁yeast -27307
+ARK -27308
+▁stamps -27309
+▁accidental -27310
+▁Ler -27311
+alach -27312
+▁fibre -27313
+boarding -27314
+▁Majesty -27315
+Americans -27316
+▁''' -27317
+ervlet -27318
+▁connector -27319
+▁controllers -27320
+;" -27321
+▁LOG -27322
+▁trusts -27323
+resolution -27324
+▁Ко -27325
+▁CAM -27326
+agher -27327
+heter -27328
+▁Worst -27329
+oulos -27330
+▁pope -27331
+▁descended -27332
+UCK -27333
+▁FF -27334
+▁Closed -27335
+▁graves -27336
+▁thirst -27337
+Alex -27338
+▁Soldier -27339
+▁Liberation -27340
+▁relentless -27341
+wyn -27342
+▁beams -27343
+▁overc -27344
+gart -27345
+▁locker -27346
+▁pitches -27347
+▁posture -27348
+▁intermedi -27349
+▁Fuj -27350
+▁cozy -27351
+essages -27352
+▁Happened -27353
+▁imperfect -27354
+▁restraint -27355
+▁sunk -27356
+▁Fuller -27357
+▁schema -27358
+▁readiness -27359
+reb -27360
+para -27361
+▁duly -27362
+▁nephew -27363
+▁disturbed -27364
+▁nostalgia -27365
+▁organizer -27366
+▁JM -27367
+Times -27368
+▁releg -27369
+▁Called -27370
+▁invaded -27371
+▁longevity -27372
+▁devastated -27373
+cards -27374
+▁holdings -27375
+▁treasures -27376
+▁staggering -27377
+▁Kai -27378
+▁Lum -27379
+▁mans -27380
+exports -27381
+▁Willow -27382
+Germ -27383
+adjust -27384
+▁intersect -27385
+’) -27386
+▁BEST -27387
+▁Lynd -27388
+▁steer -27389
+▁toured -27390
+▁Mortgage -27391
+▁resembles -27392
+tf -27393
+▁Mum -27394
+▁drying -27395
+osterone -27396
+▁daylight -27397
+shots -27398
+packed -27399
+▁Drink -27400
+▁Edwin -27401
+attribute -27402
+▁hello -27403
+trained -27404
+▁Scotia -27405
+atchewan -27406
+▁penetration -27407
+azer -27408
+▁Taken -27409
+▁Diploma -27410
+▁engages -27411
+bootstrap -27412
+▁crystals -27413
+▁endurance -27414
+▁Lal -27415
+▁Coca -27416
+▁Denis -27417
+▁regulating -27418
+itic -27419
+▁rebound -27420
+▁promoter -27421
+▁distributors -27422
+▁Adj -27423
+▁weighs -27424
+BLE -27425
+bull -27426
+▁Thur -27427
+▁Assass -27428
+▁seldom -27429
+▁Roads -27430
+▁tortured -27431
+anza -27432
+▁Disorders -27433
+▁commercials -27434
+awk -27435
+jas -27436
+▁ONLY -27437
+▁upstream -27438
+▁diffusion -27439
+▁Consortium -27440
+▁SJ -27441
+▁Cran -27442
+arb -27443
+▁Ci -27444
+arat -27445
+▁Alto -27446
+▁Pist -27447
+dominal -27448
+mediately -27449
+▁acquaint -27450
+▁za -27451
+coal -27452
+▁abbre -27453
+emy -27454
+plate -27455
+▁Predict -27456
+▁interchange -27457
+▁ли -27458
+▁Jub -27459
+▁tram -27460
+ologie -27461
+osexual -27462
+usually -27463
+▁Salmon -27464
+▁anomal -27465
+▁Mohammad -27466
+▁affirmed -27467
+associated -27468
+wana -27469
+▁Cumm -27470
+▁Sidney -27471
+▁circus -27472
+▁č -27473
+opo -27474
+bold -27475
+opers -27476
+Social -27477
+metics -27478
+▁Manor -27479
+▁tapes -27480
+ellation -27481
+▁laptops -27482
+▁cites -27483
+▁Hassan -27484
+▁Museums -27485
+Scale -27486
+oters -27487
+aciones -27488
+ourning -27489
+▁resemble -27490
+▁initiation -27491
+▁blended -27492
+▁RR -27493
+▁Zambia -27494
+ULD -27495
+▁LSU -27496
+▁REAL -27497
+▁Acceler -27498
+▁upright -27499
+GMT -27500
+▁Plat -27501
+geries -27502
+living -27503
+▁Naomi -27504
+▁Raleigh -27505
+▁renovated -27506
+feat -27507
+▁geop -27508
+▁Warwick -27509
+_-> -27510
+tier -27511
+▁muc -27512
+meter -27513
+▁Sultan -27514
+ingen -27515
+▁Pence -27516
+▁elders -27517
+▁perturb -27518
+▁Jorge -27519
+▁flush -27520
+zbollah -27521
+▁Trials -27522
+▁hardship -27523
+ocon -27524
+staff -27525
+▁Rapt -27526
+▁Means -27527
+DNA -27528
+tools -27529
+▁Kaiser -27530
+▁Brennan -27531
+▁Appropri -27532
+▁Edu -27533
+▁greed -27534
+▁affidav -27535
+▁Petition -27536
+▁Malaysian -27537
+▁spoon -27538
+▁Landing -27539
+lavery -27540
+▁mandated -27541
+▁Inclusion -27542
+Week -27543
+▁wipe -27544
+▁Scots -27545
+▁backlash -27546
+▁DOC -27547
+pound -27548
+▁Plants -27549
+▁groove -27550
+▁playful -27551
+▁brutality -27552
+▁injunction -27553
+Prom -27554
+▁Nay -27555
+finals -27556
+▁palette -27557
+▁stumbled -27558
+▁entertained -27559
+▁messy -27560
+callback -27561
+▁Carolyn -27562
+▁XV -27563
+▁pistol -27564
+▁Wheeler -27565
+▁pleasing -27566
+▁viewpoint -27567
+▁ISSN -27568
+▁Kavan -27569
+picture -27570
+▁inland -27571
+▁decorations -27572
+oug -27573
+forces -27574
+rescent -27575
+▁creditors -27576
+▁assertEquals -27577
+LM -27578
+änd -27579
+▁MU -27580
+▁При -27581
+▁BOOK -27582
+tun -27583
+▁ве -27584
+Bron -27585
+▁wander -27586
+▁handing -27587
+Ben -27588
+▁Moy -27589
+▁ashamed -27590
+developed -27591
+▁scarce -27592
+▁SER -27593
+▁allied -27594
+orescence -27595
+▁dreaming -27596
+Tur -27597
+▁simultaneous -27598
+▁Skip -27599
+director -27600
+▁Trustee -27601
+▁discrep -27602
+▁acknowledging -27603
+OF -27604
+▁Soil -27605
+▁carp -27606
+▁dice -27607
+▁Lob -27608
+▁Corey -27609
+▁Logic -27610
+▁Rooms -27611
+▁Holidays -27612
+▁hormones -27613
+▁billionaire -27614
+▁Occupational -27615
+▁Sod -27616
+▁Wyn -27617
+▁Cait -27618
+▁presc -27619
+▁rotate -27620
+▁locking -27621
+▁Ethnic -27622
+▁encl -27623
+▁Meteor -27624
+▁Fi -27625
+avis -27626
+▁chefs -27627
+▁throm -27628
+▁ineffective -27629
+▁Bom -27630
+▁Gel -27631
+▁Obl -27632
+▁tempo -27633
+▁gardening -27634
+acial -27635
+izoph -27636
+▁Afro -27637
+▁scans -27638
+▁domest -27639
+▁tranqu -27640
+▁currents -27641
+▁superstar -27642
+▁Recommended -27643
+does -27644
+▁ASC -27645
+▁Berm -27646
+▁sham -27647
+▁registering -27648
+Hot -27649
+▁kon -27650
+uchar -27651
+▁logos -27652
+▁Europa -27653
+▁Honors -27654
+▁amusing -27655
+▁wolves -27656
+▁poisoning -27657
+▁possessions -27658
+▁eclectic -27659
+▁Greenland -27660
+oS -27661
+(-- -27662
+▁HQ -27663
+▁LD -27664
+trad -27665
+▁Jag -27666
+money -27667
+Review -27668
+digital -27669
+earance -27670
+▁Kristen -27671
+▁turmoil -27672
+▁Payne -27673
+ocaly -27674
+▁squeeze -27675
+Proxy -27676
+oscope -27677
+▁liking -27678
+▁splitting -27679
+▁withstand -27680
+▁Archaeology -27681
+▁[], -27682
+▁wed -27683
+▁Goal -27684
+▁Roots -27685
+▁Hyundai -27686
+▁Providing -27687
+▁Legislation -27688
+▁bil -27689
+poons -27690
+Internal -27691
+▁Candidate -27692
+Foot -27693
+death -27694
+▁Devils -27695
+▁Recruit -27696
+▁Straight -27697
+▁caregivers -27698
+\}$ -27699
+Hello -27700
+compet -27701
+inform -27702
+▁Madonna -27703
+▁materially -27704
+▁XP -27705
+adan -27706
+▁vain -27707
+▁Insight -27708
+▁predicts -27709
+}}, -27710
+▁MH -27711
+▁Lill -27712
+▁Tyson -27713
+▁Healing -27714
+▁copying -27715
+▁thinkers -27716
+collection -27717
+▁circulating -27718
+▁MAX -27719
+▁avail -27720
+iltr -27721
+▁stint -27722
+▁infrared -27723
+worm -27724
+▁NAME -27725
+▁Polar -27726
+orously -27727
+▁deprived -27728
+▁Veterinary -27729
+▁Mai -27730
+▁SUM -27731
+▁avant -27732
+terrorism -27733
+▁Nicaragua -27734
+▁consortium -27735
+raged -27736
+Family -27737
+Vector -27738
+▁Closing -27739
+▁separating -27740
+▁limbs -27741
+biology -27742
+faction -27743
+▁(‘ -27744
+mits -27745
+quote -27746
+▁Fiscal -27747
+approved -27748
+informed -27749
+▁thigh -27750
+▁feasibility -27751
+ographs -27752
+▁Lowe -27753
+▁Debut -27754
+▁sorrow -27755
+▁termed -27756
+nob -27757
+ipro -27758
+shape -27759
+▁dean -27760
+cycling -27761
+▁Farmer -27762
+▁Prairie -27763
+{{\ -27764
+▁Transl -27765
+exception -27766
+▁passwords -27767
+▁CAL -27768
+▁Bates -27769
+▁VIDEO -27770
+reviewed -27771
+▁takeover -27772
+vc -27773
+▁splash -27774
+Il -27775
+®, -27776
+=== -27777
+▁oz -27778
+inen -27779
+▁Turt -27780
+typeof -27781
+▁pouring -27782
+▁Professionals -27783
+▁Maker -27784
+▁Newark -27785
+▁Towards -27786
+▁hurting -27787
+▁Romanian -27788
+▁cerebral -27789
+▁playlist -27790
+▁citations -27791
+atu -27792
+▁gestures -27793
+vir -27794
+▁bun -27795
+▁wow -27796
+▁Debate -27797
+▁Chronic -27798
+▁Console -27799
+Hard -27800
+▁Sofia -27801
+▁broch -27802
+▁picturesque -27803
+ibern -27804
+assium -27805
+▁reuse -27806
+▁bowling -27807
+▁fined -27808
+▁Elliot -27809
+▁fueled -27810
+▁contend -27811
+Sen -27812
+▁ps -27813
+▁NFC -27814
+▁Bios -27815
+▁psyched -27816
+etermined -27817
+▁Parenthood -27818
+Trust -27819
+▁Paso -27820
+Fields -27821
+▁taxable -27822
+▁assemble -27823
+▁USE -27824
+▁lakh -27825
+umping -27826
+storage -27827
+▁clauses -27828
+ugu -27829
+▁Berger -27830
+▁Offices -27831
+▁disruptive -27832
+^{-\ -27833
+▁toes -27834
+ussions -27835
+"` -27836
+▁Ink -27837
+▁folders -27838
+▁nutrient -27839
+▁textbook -27840
+▁accustomed -27841
+utf -27842
+leader -27843
+▁Chest -27844
+▁Titan -27845
+▁Claude -27846
+▁headquartered -27847
+▁Wen -27848
+▁terminals -27849
+▁feud -27850
+▁Chennai -27851
+}}} -27852
+▁Shim -27853
+▁Warm -27854
+▁graz -27855
+▁pollut -27856
+▁curator -27857
+jun -27858
+Total -27859
+crime -27860
+olson -27861
+▁directs -27862
+▁contempor -27863
+▁Comparative -27864
+▁Surf -27865
+ickets -27866
+▁immensely -27867
+▁formidable -27868
+▁под -27869
+rings -27870
+▁Throw -27871
+derived -27872
+▁Brooke -27873
+▁adolescent -27874
+▁cruc -27875
+▁hooks -27876
+▁LB -27877
+zbek -27878
+▁Tub -27879
+▁Cats -27880
+▁Advocate -27881
+Sy -27882
+▁seg -27883
+▁Meat -27884
+seeing -27885
+▁Module -27886
+▁apparel -27887
+▁Photographer -27888
+ranging -27889
+▁Atomic -27890
+▁Consultation -27891
+▁disadvantaged -27892
+▁feder -27893
+▁Chandler -27894
+▁Pavilion -27895
+▁subscript -27896
+▁clinicians -27897
+=""> -27898
+▁receipts -27899
+▁Influence -27900
+▁dashboard -27901
+James -27902
+▁tuning -27903
+ATT -27904
+upon -27905
+▁disco -27906
+▁faded -27907
+▁dispatch -27908
+▁dividing -27909
+▁criterion -27910
+▁Established -27911
+ -27912
+▁Pratt -27913
+▁semiconductor -27914
+bread -27915
+▁wield -27916
+▁Worker -27917
+afa -27918
+▁Bie -27919
+▁Bram -27920
+cliffe -27921
+▁strives -27922
+▁ASE -27923
+Loader -27924
+▁Blade -27925
+▁Cells -27926
+▁Robot -27927
+▁Jensen -27928
+▁Length -27929
+▁attent -27930
+▁slices -27931
+produced -27932
+▁HK -27933
+▁bids -27934
+▁obey -27935
+▁Fruit -27936
+▁Andreas -27937
+▁OC -27938
+▁Tud -27939
+phalt -27940
+▁Fees -27941
+▁zombie -27942
+Little -27943
+▁saints -27944
+▁Winning -27945
+cmd -27946
+Flags -27947
+▁skirt -27948
+reciation -27949
+▁differing -27950
+▁mug -27951
+▁Hits -27952
+▁remix -27953
+▁scoop -27954
+▁tougher -27955
+▁Chronicles -27956
+▁Theology -27957
+▁diminished -27958
+▁screenplay -27959
+▁campaigning -27960
+▁Paw -27961
+▁rag -27962
+posts -27963
+thren -27964
+claimer -27965
+▁Killing -27966
+▁morally -27967
+drew -27968
+▁Yuk -27969
+▁Cove -27970
+▁Sims -27971
+▁lamps -27972
+▁onwards -27973
+IQ -27974
+▁CHAR -27975
+▁Trees -27976
+▁Urugu -27977
+▁cripp -27978
+▁framing -27979
+▁SOC -27980
+▁smo -27981
+▁Entity -27982
+subseteq -27983
+president -27984
+▁BRE -27985
+▁Hipp -27986
+▁Rover -27987
+▁clust -27988
+illation -27989
+▁tunnels -27990
+▁Spectrum -27991
+▁Steelers -27992
+▁Plaintiff -27993
+▁abortions -27994
+▁completes -27995
+▁Trying -27996
+▁ultras -27997
+Selected -27998
+aliation -27999
+▁Pension -28000
+▁informs -28001
+▁stacked -28002
+▁concessions -28003
+▁prow -28004
+▁Coral -28005
+▁cabinets -28006
+Tom -28007
+▁pains -28008
+▁Marvin -28009
+▁Parsons -28010
+▁compiler -28011
+Cert -28012
+cass -28013
+▁zoo -28014
+▁Hammond -28015
+▁nursery -28016
+▁Disorder -28017
+ichte -28018
+▁Dong -28019
+▁replica -28020
+▁outrageous -28021
+▁Wiley -28022
+?. -28023
+essel -28024
+issues -28025
+external -28026
+won -28027
+▁бо -28028
+combe -28029
+▁Giul -28030
+▁Pond -28031
+▁traps -28032
+▁Hastings -28033
+▁resurrection -28034
+rican -28035
+▁debated -28036
+▁silicon -28037
+▁colourful -28038
+år -28039
+cop -28040
+▁diarr -28041
+▁obsess -28042
+▁styling -28043
+▁courtroom -28044
+▁troubling -28045
+▁overseeing -28046
+ilot -28047
+▁Berks -28048
+▁Cannon -28049
+▁Phantom -28050
+ibi -28051
+▁glor -28052
+▁wors -28053
+▁Leather -28054
+▁Antarctica -28055
+▁Lamp -28056
+▁rapp -28057
+▁Candy -28058
+▁slopes -28059
+ellaneous -28060
+▁posterior -28061
+ков -28062
+▁Gujar -28063
+▁motto -28064
+▁Messiah -28065
+▁Suzanne -28066
+▁Saskatchewan -28067
+▁yog -28068
+▁bloc -28069
+▁Capacity -28070
+▁Reviewed -28071
+▁McConnell -28072
+▁deadlines -28073
+▁MV -28074
+iento -28075
+▁consultancy -28076
+▁constituency -28077
+▁transformations -28078
+CoV -28079
+USD -28080
+Mill -28081
+lemn -28082
+▁Dix -28083
+▁Kad -28084
+▁Protein -28085
+zu -28086
+rak -28087
+▁%} -28088
+Display -28089
+ToString -28090
+▁repetitive -28091
+▁(_ -28092
+Layer -28093
+▁casc -28094
+amazon -28095
+▁seventy -28096
+▁Uttar -28097
+▁Submit -28098
+▁embarrassed -28099
+▁unreasonable -28100
+▁Seah -28101
+▁pens -28102
+▁coloured -28103
+▁Antarctic -28104
+▁Curriculum -28105
+ERAL -28106
+cias -28107
+esta -28108
+▁HAR -28109
+▁Unc -28110
+▁OECD -28111
+▁Choir -28112
+▁Wizard -28113
+▁telecom -28114
+▁tribunal -28115
+liber -28116
+)” -28117
+ych -28118
+esan -28119
+office -28120
+▁Fishing -28121
+▁Friedrich -28122
+▁Speak -28123
+historic -28124
+▁garnered -28125
+▁demolition -28126
+Tests -28127
+▁Huss -28128
+▁glanced -28129
+▁Presidents -28130
+▁compulsory -28131
+▁monumental -28132
+▁Parliamentary -28133
+Pi -28134
+berra -28135
+worker -28136
+violent -28137
+▁denotes -28138
+▁inauguration -28139
+▁governors -28140
+Hol -28141
+acs -28142
+▁ms -28143
+▁TIM -28144
+▁chi -28145
+▁rang -28146
+▁sleek -28147
+▁paints -28148
+▁Mustang -28149
+▁sinking -28150
+estion -28151
+▁Credits -28152
+▁Presbyterian -28153
+Too -28154
+tags -28155
+▁ebook -28156
+▁fantas -28157
+▁melted -28158
+▁stroll -28159
+▁freshly -28160
+▁realism -28161
+threatening -28162
+▁affiliation -28163
+▁Twelve -28164
+▁tempted -28165
+▁thyroid -28166
+▁Proposed -28167
+atio -28168
+▁hen -28169
+▁Filter -28170
+▁Launches -28171
+▁advocated -28172
+▁ABS -28173
+▁voiced -28174
+▁Duchess -28175
+▁Weapons -28176
+▁adapter -28177
+▁sympath -28178
+▁attained -28179
+Ma -28180
+▁peek -28181
+▁Exactly -28182
+▁Harvest -28183
+seven -28184
+▁Nass -28185
+▁Tail -28186
+▁Hyder -28187
+ivalent -28188
+*. -28189
+OTT -28190
+▁Xu -28191
+fell -28192
+▁Tian -28193
+upunct -28194
+▁Clause -28195
+▁Imaging -28196
+▁textures -28197
+▁anthology -28198
+Fire -28199
+▁advises -28200
+▁Governors -28201
+▁ú -28202
+fony -28203
+▁IPv -28204
+otide -28205
+pretation -28206
+▁Majority -28207
+▁Provides -28208
+▁conducts -28209
+phasis -28210
+▁Annex -28211
+▁daunting -28212
+▁"$ -28213
+▁Nasdaq -28214
+ieri -28215
+▁penny -28216
+▁Editors -28217
+▁averages -28218
+ENSE -28219
+isman -28220
+▁Alexa -28221
+▁insign -28222
+izophren -28223
+Univers -28224
+▁Abdullah -28225
+▁Lomb -28226
+▁Texans -28227
+▁Concrete -28228
+Cat -28229
+род -28230
+▁expire -28231
+▁cervical -28232
+▁Nationals -28233
+éc -28234
+▁NFT -28235
+▁JUST -28236
+▁Clock -28237
+▁Platinum -28238
+▁jewellery -28239
+▁NP -28240
+▁Ricky -28241
+▁Shooting -28242
+▁spectacle -28243
+▁Cunningham -28244
+▁brightness -28245
+leaf -28246
+Package -28247
+ensable -28248
+▁Portfolio -28249
+Match -28250
+topic -28251
+▁Licensed -28252
+▁Institutional -28253
+flows -28254
+▁Ramsey -28255
+bd -28256
+▁Seed -28257
+▁Bulls -28258
+▁kinderg -28259
+jay -28260
+▁Stuff -28261
+▁einer -28262
+rendered -28263
+▁Distance -28264
+throws -28265
+▁spins -28266
+▁Designs -28267
+▁reconsider -28268
+▁compartment -28269
+▁Cuomo -28270
+▁sprink -28271
+▁Hawkins -28272
+▁knocking -28273
+▁vitamins -28274
+▁io -28275
+▁McLe -28276
+▁Angie -28277
+▁stern -28278
+▁Frontier -28279
+▁professionalism -28280
+▁deportation -28281
+▁Damon -28282
+▁dumped -28283
+▁sticky -28284
+▁stunned -28285
+▁predators -28286
+ник -28287
+FILE -28288
+were -28289
+rupted -28290
+▁lunar -28291
+▁Workplace -28292
+▁Vanderbilt -28293
+▁cultivated -28294
+▁formulated -28295
+▁Mali -28296
+ayette -28297
+cycles -28298
+▁Michele -28299
+▁unanimously -28300
+aughs -28301
+▁graft -28302
+▁Cheney -28303
+▁bracket -28304
+ritt -28305
+illin -28306
+▁Ling -28307
+▁Piet -28308
+▁sailors -28309
+▁documentaries -28310
+▁GST -28311
+▁IPO -28312
+EMBER -28313
+▁toast -28314
+▁mp -28315
+trade -28316
+▁кон -28317
+▁Coul -28318
+angles -28319
+▁Negro -28320
+▁mercury -28321
+▁midfielder -28322
+▁precautions -28323
+▁Spurs -28324
+▁Nights -28325
+▁Pepper -28326
+Qaeda -28327
+▁Diaz -28328
+▁Teach -28329
+▁adorn -28330
+▁Monaco -28331
+▁coated -28332
+▁robotic -28333
+▁defeating -28334
+{$ -28335
+▁misuse -28336
+▁nominal -28337
+Force -28338
+brief -28339
+theme -28340
+blower -28341
+solete -28342
+▁Booker -28343
+▁Mozart -28344
+▁Percent -28345
+Light -28346
+▁Millennium -28347
+▁lan -28348
+login -28349
+▁Stam -28350
+▁cyst -28351
+▁trailers -28352
+▁Transactions -28353
+▁Emil -28354
+▁Sick -28355
+▁coil -28356
+▁pillow -28357
+▁dislike -28358
+▁rivalry -28359
+▁attacker -28360
+▁militant -28361
+▁$\{ -28362
+▁Pall -28363
+▁misses -28364
+▁believer -28365
+▁diagrams -28366
+▁notebook -28367
+▁suppressed -28368
+▁influencing -28369
+▁Aval -28370
+▁Sabb -28371
+▁Fargo -28372
+▁WATCH -28373
+▁combo -28374
+builder -28375
+}' -28376
+▁mund -28377
+▁Vital -28378
+▁Assault -28379
+▁courtyard -28380
+Region -28381
+acting -28382
+▁Sachs -28383
+Extension -28384
+▁Chrysler -28385
+▁Including -28386
+▁Kardash -28387
+WAY -28388
+▁bury -28389
+olithic -28390
+notation -28391
+▁luggage -28392
+▁alleviate -28393
+▁polynomial -28394
+▁IST -28395
+▁scrub -28396
+▁Metall -28397
+fat -28398
+capt -28399
+▁shines -28400
+▁lineback -28401
+Metadata -28402
+oby -28403
+atrix -28404
+▁stagn -28405
+▁Archer -28406
+contents -28407
+▁simplify -28408
+▁telescope -28409
+▁favourites -28410
+▁bob -28411
+▁Falk -28412
+▁Phen -28413
+▁Semi -28414
+abolic -28415
+▁Saddam -28416
+▁quirky -28417
+▁chassis -28418
+touch -28419
+▁Origins -28420
+▁ArrayList -28421
+▁Cognitive -28422
+▁informing -28423
+▁Bean -28424
+obbies -28425
+▁rains -28426
+colored -28427
+▁PGA -28428
+▁chord -28429
+▁fixes -28430
+▁Keller -28431
+▁evacuation -28432
+▁Mé -28433
+▁Buyer -28434
+▁Diary -28435
+▁seizures -28436
+aram -28437
+ultz -28438
+Commerce -28439
+xis -28440
+▁Bj -28441
+icas -28442
+▁DIR -28443
+broken -28444
+league -28445
+▁Include -28446
+▁spectro -28447
+▁BD -28448
+▁audi -28449
+▁popped -28450
+▁Norwich -28451
+▁graffiti -28452
+▁да -28453
+Stat -28454
+▁FILE -28455
+кі -28456
+ман -28457
+▁Rotary -28458
+▁synth -28459
+omorphic -28460
+▁deposition -28461
+."[ -28462
+igi -28463
+▁Ferry -28464
+astered -28465
+▁reinst -28466
+▁weakened -28467
+▁adherence -28468
+▁contradiction -28469
+Wait -28470
+▁Noel -28471
+▁geared -28472
+▁mushrooms -28473
+▁distributing -28474
+▁vor -28475
+▁Silk -28476
+▁Conrad -28477
+▁Donovan -28478
+▁Measures -28479
+▁revelations -28480
+/" -28481
+Mor -28482
+▁Romeo -28483
+matched -28484
+▁someday -28485
+▁packaged -28486
+ongs -28487
+rolog -28488
+▁Aunt -28489
+▁pics -28490
+schema -28491
+▁Barton -28492
+▁overhaul -28493
+piracy -28494
+▁Camden -28495
+▁behold -28496
+▁Pressure -28497
+▁stationed -28498
+▁Hanc -28499
+transfer -28500
+Lou -28501
+uzzy -28502
+▁BAR -28503
+▁Vand -28504
+▁lett -28505
+▁Elena -28506
+▁inverse -28507
+▁distraction -28508
+)— -28509
+ERC -28510
+▁phon -28511
+▁Catalogue -28512
+▁sewer -28513
+▁timer -28514
+ornings -28515
+paralle -28516
+▁discounted -28517
+▁technician -28518
+limit -28519
+▁rainy -28520
+▁Buhari -28521
+▁Siem -28522
+▁Yosh -28523
+▁gossip -28524
+▁surname -28525
+Ts -28526
+▁LM -28527
+▁Yas -28528
+▁Bucks -28529
+▁sender -28530
+▁recognizable -28531
+entin -28532
+▁Nicola -28533
+▁declines -28534
+▁pensions -28535
+▁пра -28536
+▁TEST -28537
+▁mais -28538
+▁seals -28539
+▁melodies -28540
+Final -28541
+▁Dion -28542
+ibia -28543
+ussed -28544
+Success -28545
+▁Deliver -28546
+▁Offering -28547
+▁alcoholic -28548
+спо -28549
+▁Harri -28550
+▁Invol -28551
+▁Gather -28552
+▁Episcopal -28553
+▁terrified -28554
+▁Vs -28555
+mock -28556
+▁Atmosp -28557
+establish -28558
+▁afforded -28559
+▁Submitted -28560
+independent -28561
+▁descriptive -28562
+▁hospitalized -28563
+Sa -28564
+aval -28565
+▁Wan -28566
+▁prefers -28567
+▁Carnival -28568
+▁extracts -28569
+▁Elaine -28570
+▁Suggest -28571
+umbered -28572
+LEY -28573
+▁Burk -28574
+obiles -28575
+▁spices -28576
+▁contextual -28577
+aso -28578
+▁Sgt -28579
+▁PTSD -28580
+▁Clubs -28581
+privacy -28582
+▁Soldiers -28583
+▁Tracking -28584
+▁Tir -28585
+▁miniature -28586
+▁coordinating -28587
+Hy -28588
+▁Depot -28589
+ikers -28590
+▁Brom -28591
+izzard -28592
+▁doomed -28593
+▁humorous -28594
+▁ambiguous -28595
+▁undergone -28596
+▁tid -28597
+Selection -28598
+▁(£ -28599
+wand -28600
+▁MIC -28601
+▁platinum -28602
+▁noteworthy -28603
+mong -28604
+▁Ella -28605
+▁rude -28606
+▁Bentley -28607
+▁Practical -28608
+▁dependencies -28609
+▁WORK -28610
+▁figur -28611
+▁Hezbollah -28612
+▁Berk -28613
+▁swiftly -28614
+▁Yun -28615
+▁snipp -28616
+▁woven -28617
+▁villains -28618
+▁longitudinal -28619
+orrh -28620
+prod -28621
+could -28622
+prefix -28623
+▁Sociology -28624
+designed -28625
+▁saturated -28626
+▁Installation -28627
+oliber -28628
+▁Anime -28629
+▁blink -28630
+▁Harlem -28631
+▁transformative -28632
+bat -28633
+▁Oscars -28634
+▁misdem -28635
+▁Midlands -28636
+▁documenting -28637
+▁Classification -28638
+▁что -28639
+ettle -28640
+owell -28641
+▁Fighter -28642
+▁Lifetime -28643
+▁villagers -28644
+inia -28645
+▁goat -28646
+▁imprint -28647
+Okay -28648
+▁relic -28649
+hibition -28650
+▁Sergeant -28651
+,, -28652
+Talk -28653
+avez -28654
+▁Aires -28655
+▁Kosovo -28656
+▁Inquiry -28657
+▁cleanup -28658
+▁Required -28659
+▁redesign -28660
+▁splendid -28661
+▁outbreaks -28662
+▁Restaurants -28663
+▁Gomez -28664
+▁Airbus -28665
+▁Latvia -28666
+▁pairing -28667
+▁cc -28668
+Border -28669
+assets -28670
+▁Pharma -28671
+▁hepatitis -28672
+▁statistic -28673
+▁Glory -28674
+▁Diocese -28675
+▁Ethiopian -28676
+[$ -28677
+vez -28678
+▁Hof -28679
+▁coronary -28680
+▁revisions -28681
+▁XI -28682
+Rober -28683
+asers -28684
+games -28685
+▁Lori -28686
+▁lubric -28687
+▁capsule -28688
+▁liberties -28689
+▁$| -28690
+▁Rao -28691
+▁lent -28692
+▁Married -28693
+▁Quartet -28694
+▁monopoly -28695
+Fre -28696
+Mex -28697
+▁Bid -28698
+▁Kro -28699
+ority -28700
+Russian -28701
+▁misery -28702
+▁refrain -28703
+▁Entrepreneur -28704
+▁marginalized -28705
+▁Nickel -28706
+affected -28707
+▁Consolid -28708
+▁electronically -28709
+Inf -28710
+▁AMER -28711
+▁Sunny -28712
+▁Navigation -28713
+▁existential -28714
+▁ted -28715
+▁glue -28716
+community -28717
+oire -28718
+▁cate -28719
+▁bucks -28720
+▁boosting -28721
+▁Helsinki -28722
+▁spectators -28723
+inho -28724
+▁Byron -28725
+▁insol -28726
+▁peril -28727
+▁dentist -28728
+▁narciss -28729
+▁detached -28730
+risy -28731
+▁discarded -28732
+opens -28733
+▁STAR -28734
+▁pins -28735
+▁Separ -28736
+▁Resume -28737
+▁boiling -28738
+▁gallons -28739
+▁impulse -28740
+▁Sections -28741
+▁expiration -28742
+arton -28743
+▁reboot -28744
+▁wastewater -28745
+▁unavailable -28746
+▁AH -28747
+atsu -28748
+▁Cain -28749
+▁marketed -28750
+▁accreditation -28751
+▁RI -28752
+▁Volvo -28753
+▁redef -28754
+▁infiltr -28755
+▁articulate -28756
+▁classmates -28757
+▁neurological -28758
+NF -28759
+emplates -28760
+orescent -28761
+username -28762
+▁wasting -28763
+heat -28764
+▁pity -28765
+▁Shock -28766
+▁Niagara -28767
+▁motivate -28768
+▁digitally -28769
+Ill -28770
+▁Divid -28771
+println -28772
+▁Weaver -28773
+▁echoed -28774
+▁candles -28775
+▁Crossing -28776
+▁closures -28777
+▁reminding -28778
+Ct -28779
+▁Prepare -28780
+▁Deutsche -28781
+oing -28782
+▁dod -28783
+▁Tweet -28784
+▁china -28785
+▁indemn -28786
+▁indoors -28787
+▁landlords -28788
+▁volleyball -28789
+yu -28790
+ді -28791
+▁KR -28792
+ithm -28793
+▁FAA -28794
+quette -28795
+▁rentals -28796
+▁Ernst -28797
+▁Reserv -28798
+▁securely -28799
+▁evidenced -28800
+▁navigating -28801
+▁Responsibility -28802
+Mov -28803
+▁FP -28804
+▁Anita -28805
+▁Reeves -28806
+▁hailed -28807
+▁Sentinel -28808
+lude -28809
+▁Quran -28810
+▁forehead -28811
+mson -28812
+▁Palin -28813
+▁sobre -28814
+▁Strang -28815
+▁jailed -28816
+▁parser -28817
+asus -28818
+growth -28819
+▁Hawks -28820
+▁Rutgers -28821
+▁accusing -28822
+▁replication -28823
+▁experimentation -28824
+URCE -28825
+quel -28826
+▁Meh -28827
+▁Tuls -28828
+▁Fiona -28829
+▁commencement -28830
+riminal -28831
+▁PayPal -28832
+▁confisc -28833
+▁flourish -28834
+▁wardrobe -28835
+▁admitting -28836
+▁extremist -28837
+Ser -28838
+▁Geb -28839
+▁Til -28840
+innamon -28841
+▁ideally -28842
+lis -28843
+cong -28844
+imple -28845
+▁PMID -28846
+▁Lemon -28847
+▁cartoons -28848
+▁vertices -28849
+▁stationary -28850
+Cur -28851
+▁arrows -28852
+▁cracking -28853
+▁customary -28854
+▁profoundly -28855
+▁refrigerator -28856
+▁Commissioners -28857
+.”[ -28858
+▁beads -28859
+▁Steele -28860
+▁invoked -28861
+▁df -28862
+etus -28863
+ewood -28864
+otions -28865
+▁Humans -28866
+▁coping -28867
+▁Geoffrey -28868
+▁viability -28869
+▁antibiotic -28870
+cheon -28871
+▁Fritz -28872
+▁archival -28873
+▁generalized -28874
+▁Kanye -28875
+mediated -28876
+▁modeled -28877
+▁fundraiser -28878
+▁ci -28879
+▁DON -28880
+▁PRES -28881
+▁Holder -28882
+▁scaled -28883
+▁spider -28884
+▁Canterbury -28885
+▁NAV -28886
+verance -28887
+▁backbone -28888
+▁OA -28889
+▁diets -28890
+▁assaulted -28891
+▁", -28892
+bullet -28893
+▁Sprint -28894
+▁Genetics -28895
+▁standalone -28896
+uire -28897
+▁boast -28898
+▁towel -28899
+▁resolving -28900
+scenes -28901
+▁mould -28902
+Fun -28903
+▁OD -28904
+▁Autism -28905
+▁tapped -28906
+▁Patterns -28907
+▁Ngu -28908
+serial -28909
+▁Salon -28910
+▁ridge -28911
+▁Himself -28912
+▁burdens -28913
+▁Canberra -28914
+▁repaired -28915
+olla -28916
+rots -28917
+▁discs -28918
+▁Helena -28919
+atism -28920
+▁reacted -28921
+imony -28922
+▁Burma -28923
+▁Siege -28924
+▁postal -28925
+Pod -28926
+euro -28927
+tebr -28928
+▁lact -28929
+follow -28930
+▁Victims -28931
+▁teammate -28932
+▁pretending -28933
+▁eq -28934
+▁ml -28935
+Women -28936
+▁Pilgr -28937
+▁menus -28938
+▁naive -28939
+▁blends -28940
+Bad -28941
+STAT -28942
+lest -28943
+▁yacht -28944
+▁homicide -28945
+▁Illustrated -28946
+enium -28947
+▁Rough -28948
+▁alloy -28949
+▁bells -28950
+▁Behavioral -28951
+▁sentiments -28952
+Rs -28953
+▁LOL -28954
+▁Bing -28955
+▁cans -28956
+▁Siber -28957
+▁crashing -28958
+▁confession -28959
+▁decorating -28960
+▁humanities -28961
+▁emergencies -28962
+cry -28963
+▁су -28964
+▁Dud -28965
+▁Deck -28966
+▁monks -28967
+Details -28968
+▁encaps -28969
+▁judiciary -28970
+▁Vu -28971
+score -28972
+▁barbar -28973
+▁Squadron -28974
+▁Garage -28975
+▁Seventh -28976
+▁undersc -28977
+▁Wonderful -28978
+▁crossover -28979
+▁MAG -28980
+▁bak -28981
+▁turf -28982
+▁Owens -28983
+▁unused -28984
+▁broadcaster -28985
+▁Himal -28986
+▁Exhibit -28987
+▁Ensemble -28988
+▁trainers -28989
+▁demolished -28990
+▁witnessing -28991
+▁constrained -28992
+▁hypertension -28993
+Queue -28994
+▁chees -28995
+autical -28996
+▁turnout -28997
+▁shipment -28998
+▁impacting -28999
+kk -29000
+Sat -29001
+alan -29002
+blic -29003
+▁enum -29004
+▁extrav -29005
+▁upgrading -29006
+▁Characters -29007
+▁vegetarian -29008
+jug -29009
+▁Vig -29010
+▁cmd -29011
+choice -29012
+patrick -29013
+▁sinister -29014
+▁counselor -29015
+▁Eas -29016
+▁slogan -29017
+▁programmed -29018
+burst -29019
+▁packets -29020
+▁attributable -29021
+▁DD -29022
+▁Nets -29023
+▁Pour -29024
+beg -29025
+SERV -29026
+▁alterations -29027
+▁introductory -29028
+/# -29029
+▁café -29030
+▁chic -29031
+▁Convert -29032
+▁Warfare -29033
+▁peptide -29034
+▁kidnapped -29035
+▁catastrophe -29036
+▁persistence -29037
+hex -29038
+Mock -29039
+wives -29040
+▁Started -29041
+▁pumping -29042
+▁psychiat -29043
+uria -29044
+assic -29045
+▁Tatt -29046
+▁corrosion -29047
+ariate -29048
+▁booster -29049
+▁glowing -29050
+▁Elements -29051
+▁whispered -29052
+ROW -29053
+Clear -29054
+▁Cron -29055
+▁Femin -29056
+▁aided -29057
+▁beard -29058
+cfg -29059
+▁eu -29060
+▁CHAP -29061
+▁clicked -29062
+bill -29063
+▁Nah -29064
+▁від -29065
+inher -29066
+▁allergic -29067
+▁dye -29068
+▁Winc -29069
+▁harmless -29070
+▁Acid -29071
+▁Carmen -29072
+▁|\ -29073
+▁Brah -29074
+▁Jury -29075
+▁Poker -29076
+uren -29077
+Never -29078
+▁EDIT -29079
+▁Aging -29080
+▁inert -29081
+▁attackers -29082
+azar -29083
+▁Lighting -29084
+▁reversal -29085
+Three -29086
+ophag -29087
+astical -29088
+▁sharks -29089
+Friend -29090
+▁persona -29091
+▁accelerating -29092
+▁Rental -29093
+▁rallies -29094
+▁Aerospace -29095
+▁commanding -29096
+▁slap -29097
+facing -29098
+▁Bax -29099
+organic -29100
+▁coupons -29101
+▁earnest -29102
+▁guessed -29103
+Mil -29104
+▁ja -29105
+Camp -29106
+▁Bonnie -29107
+▁Contents -29108
+▁outgoing -29109
+(__ -29110
+▁infl -29111
+▁shrimp -29112
+▁sidewalk -29113
+addr -29114
+itone -29115
+▁Bali -29116
+▁cavity -29117
+▁Volunteers -29118
+Tor -29119
+▁Ned -29120
+▁Alexis -29121
+▁Vampire -29122
+▁obstruct -29123
+▁leveraging -29124
+▁Citation -29125
+▁Obamacare -29126
+acted -29127
+▁Gerry -29128
+▁universally -29129
+▁Ital -29130
+▁Terr -29131
+Double -29132
+▁Ludwig -29133
+▁costing -29134
+paralleled -29135
+▁admiration -29136
+ogly -29137
+▁CFR -29138
+anguard -29139
+▁antigen -29140
+▁Advisors -29141
+▁SEE -29142
+▁Daddy -29143
+▁Huntington -29144
+agements -29145
+▁Bahamas -29146
+▁marching -29147
+▁Approximately -29148
+blind -29149
+▁melan -29150
+▁Shared -29151
+▁contag -29152
+▁purported -29153
+▁Experiment -29154
+pull -29155
+▁iPod -29156
+▁Toxic -29157
+▁Answers -29158
+▁compost -29159
+▁Veget -29160
+▁strategically -29161
+жен -29162
+▁Tact -29163
+▁eloqu -29164
+▁Starring -29165
+ос -29166
+▁ö -29167
+▁(. -29168
+amar -29169
+numer -29170
+Walk -29171
+▁Benson -29172
+▁Royals -29173
+▁openness -29174
+GH -29175
+▁Lamar -29176
+▁raids -29177
+▁Algeria -29178
+▁Twilight -29179
+▁Respondent -29180
+tmp -29181
+▁SAL -29182
+▁Seal -29183
+isexual -29184
+▁Guides -29185
+▁Spread -29186
+▁outing -29187
+▁Anglican -29188
+▁honoured -29189
+▁intrigued -29190
+▁landmarks -29191
+▁nationality -29192
+▁Configuration -29193
+▁overwhelmingly -29194
+▁Fury -29195
+▁solitary -29196
+▁Dangerous -29197
+ís -29198
+▁FIL -29199
+uously -29200
+▁Runner -29201
+▁sewing -29202
+▁Genetic -29203
+▁reviewers -29204
+umbles -29205
+▁Editing -29206
+▁Pamela -29207
+▁Tradem -29208
+▁Buckingham -29209
+asian -29210
+▁Http -29211
+estial -29212
+▁Tucson -29213
+▁rhythms -29214
+Attributes -29215
+▁mural -29216
+▁Tweets -29217
+▁Dickens -29218
+▁Cameroon -29219
+▁embodied -29220
+Sah -29221
+flex -29222
+maybe -29223
+▁Abbas -29224
+▁redeem -29225
+▁IOException -29226
+ambo -29227
+▁Lack -29228
+▁Stad -29229
+▁Serve -29230
+▁Leaving -29231
+▁reclaim -29232
+▁CU -29233
+markets -29234
+▁hassle -29235
+▁exported -29236
+▁conceal -29237
+▁puppy -29238
+▁Infinity -29239
+▁Renewable -29240
+▁bourgeois -29241
+▁characterize -29242
+Var -29243
+tec -29244
+▁Jab -29245
+aduates -29246
+driving -29247
+▁Halifax -29248
+▁informational -29249
+▁CHR -29250
+▁LCD -29251
+▁Nielsen -29252
+▁routing -29253
+бе -29254
+▁DOJ -29255
+▁Biod -29256
+▁Bolivia -29257
+▁Empower -29258
+▁Investing -29259
+must -29260
+bearing -29261
+▁adulthood -29262
+▁inference -29263
+▁unusually -29264
+▁BET -29265
+grave -29266
+▁Fleming -29267
+▁intimacy -29268
+▁Fisheries -29269
+▁imped -29270
+▁lifts -29271
+▁sturdy -29272
+▁adapting -29273
+▁MAS -29274
+ieder -29275
+▁spun -29276
+ACP -29277
+▁Ré -29278
+▁Ads -29279
+▁tee -29280
+odiac -29281
+▁Beyon -29282
+▁replaces -29283
+▁Fusion -29284
+▁Meetings -29285
+▁elephants -29286
+▁Surveillance -29287
+oplus -29288
+▁gras -29289
+ituary -29290
+▁Ensure -29291
+▁Kelley -29292
+jin -29293
+aned -29294
+▁Auss -29295
+▁ethic -29296
+▁Rodney -29297
+▁psychic -29298
+▁Elim -29299
+filename -29300
+▁transforms -29301
+▁youngsters -29302
+▁interviewing -29303
+iten -29304
+Journal -29305
+percent -29306
+▁Lisbon -29307
+Category -29308
+▁webpage -29309
+▁implants -29310
+▁implication -29311
+KO -29312
+Ali -29313
+▁mortg -29314
+▁distorted -29315
+_( -29316
+atro -29317
+arers -29318
+▁Jeep -29319
+▁stereo -29320
+▁Honduras -29321
+▁extremists -29322
+▁TY -29323
+Radio -29324
+▁concluding -29325
+Move -29326
+▁foil -29327
+▁youths -29328
+▁Aberdeen -29329
+▁financed -29330
+▁presumed -29331
+▁Interface -29332
+ataka -29333
+estead -29334
+GN -29335
+erno -29336
+▁Boh -29337
+▁Toby -29338
+▁mills -29339
+▁Cancel -29340
+▁Rupert -29341
+▁consoles -29342
+▁inspector -29343
+Perm -29344
+▁ana -29345
+▁doctr -29346
+▁pertinent -29347
+▁stimulating -29348
+LAN -29349
+COMP -29350
+▁Geo -29351
+▁Elle -29352
+▁Claus -29353
+▁Increased -29354
+▁preschool -29355
+hun -29356
+▁Elm -29357
+▁Oslo -29358
+▁Kirst -29359
+▁mutant -29360
+▁Arabian -29361
+▁CAD -29362
+▁visas -29363
+▁Suzuki -29364
+▁uptake -29365
+▁withdrew -29366
+Assert -29367
+remark -29368
+▁Coron -29369
+▁Split -29370
+ент -29371
+▁Hancock -29372
+▁optimum -29373
+▁gum -29374
+▁Arbor -29375
+▁syrup -29376
+ocalyptic -29377
+▁Mug -29378
+posed -29379
+▁JOHN -29380
+mean -29381
+▁tsun -29382
+lasses -29383
+▁unravel -29384
+▁Outreach -29385
+▁multiplayer -29386
+rogate -29387
+▁inequalities -29388
+facts -29389
+▁striving -29390
+▁greatness -29391
+bool -29392
+eneg -29393
+▁Terra -29394
+▁Zucker -29395
+▁Futures -29396
+▁fountain -29397
+▁Gat -29398
+France -29399
+▁richer -29400
+▁resumed -29401
+▁pastoral -29402
+▁beet -29403
+▁Minority -29404
+ombre -29405
+▁recol -29406
+▁arrays -29407
+▁Darkness -29408
+▁missionary -29409
+▁Fischer -29410
+▁recruits -29411
+Prim -29412
+ться -29413
+▁Dund -29414
+dropdown -29415
+▁rejects -29416
+secondary -29417
+yy -29418
+▁illicit -29419
+▁outfits -29420
+▁disregard -29421
+oshop -29422
+racies -29423
+▁Fifty -29424
+▁Seasons -29425
+▁Normally -29426
+▁offseason -29427
+Vs -29428
+alsa -29429
+ibal -29430
+soon -29431
+▁MET -29432
+Thanks -29433
+olding -29434
+▁Albums -29435
+▁Becker -29436
+▁ancestry -29437
+▁bos -29438
+▁Salary -29439
+▁Accountability -29440
+uen -29441
+Contact -29442
+▁grapes -29443
+Descriptor -29444
+?). -29445
+lium -29446
+▁Twins -29447
+▁stunt -29448
+▁Barker -29449
+▁outset -29450
+▁enrichment -29451
+wed -29452
+▁ripe -29453
+▁Rolls -29454
+Library -29455
+▁Melanie -29456
+▁Statements -29457
+imps -29458
+vette -29459
+▁rhe -29460
+▁infantry -29461
+'} -29462
+fx -29463
+STEM -29464
+Common -29465
+▁Baltic -29466
+▁yielded -29467
+▁repetition -29468
+Synt -29469
+▁Slav -29470
+factor -29471
+▁Buenos -29472
+▁Destiny -29473
+▁nucleus -29474
+respective -29475
+▁perimeter -29476
+▁responders -29477
+train -29478
+handler -29479
+▁barred -29480
+fashioned -29481
+▁Kem -29482
+▁Elev -29483
+▁Hogan -29484
+▁hatch -29485
+▁Robbie -29486
+▁Attempt -29487
+ocumented -29488
+protected -29489
+▁violates -29490
+▁persuaded -29491
+▁tendencies -29492
+нии -29493
+cart -29494
+Socket -29495
+▁Crane -29496
+▁Woody -29497
+▁Wisdom -29498
+▁Duration -29499
+▁Neurolog -29500
+▁analysed -29501
+intestinal -29502
+▁disconnect -29503
+▁demographics -29504
+▁chalk -29505
+▁Receive -29506
+▁anonymity -29507
+▁philosophers -29508
+▁visualization -29509
+=” -29510
+Math -29511
+mort -29512
+▁factions -29513
+▁postseason -29514
+uti -29515
+props -29516
+▁Evel -29517
+icent -29518
+▁Fake -29519
+antics -29520
+▁Barcl -29521
+▁Spike -29522
+▁sandy -29523
+▁prosecuted -29524
+▁reimbursement -29525
+▁сво -29526
+▁Sham -29527
+▁stove -29528
+▁Cooperative -29529
+▁RP -29530
+faced -29531
+▁Lung -29532
+olkien -29533
+▁werden -29534
+Progress -29535
+Ber -29536
+kef -29537
+▁Laurent -29538
+▁Kod -29539
+mouse -29540
+those -29541
+▁algae -29542
+company -29543
+idences -29544
+▁Dwight -29545
+▁Proposal -29546
+▁finalist -29547
+affiliated -29548
+▁volunteered -29549
+eye -29550
+German -29551
+▁Buddy -29552
+▁sketches -29553
+▁examinations -29554
+Leg -29555
+▁Poe -29556
+▁Affili -29557
+▁Unlimited -29558
+nm -29559
+▁puls -29560
+▁Appalach -29561
+▁swelling -29562
+fied -29563
+urgy -29564
+fried -29565
+▁Maurit -29566
+▁posing -29567
+▁wary -29568
+▁Plato -29569
+▁facto -29570
+defense -29571
+▁prohibits -29572
+assembly -29573
+▁Kathryn -29574
+▁np -29575
+▁Ank -29576
+▁CAT -29577
+overe -29578
+Screen -29579
+▁harbour -29580
+▁Presents -29581
+▁Dirty -29582
+▁ornam -29583
+menting -29584
+▁Goldberg -29585
+▁enriched -29586
+▁Hyderabad -29587
+▁shattered -29588
+URI -29589
+▁Sevent -29590
+▁reconn -29591
+regation -29592
+▁Component -29593
+▁compressed -29594
+▁imaginative -29595
+▁AST -29596
+▁dal -29597
+ratic -29598
+folios -29599
+forming -29600
+▁Putting -29601
+POR -29602
+▁"- -29603
+▁Regina -29604
+▁Stocks -29605
+tershire -29606
+▁impending -29607
+▁rav -29608
+▁rubbish -29609
+▁Grill -29610
+▁Tobacco -29611
+▁escaping -29612
+mud -29613
+▁gust -29614
+▁gangs -29615
+▁Clarks -29616
+▁summarized -29617
+▁multicultural -29618
+peer -29619
+▁Guru -29620
+▁(...) -29621
+▁economical -29622
+▁ubiquitous -29623
+avage -29624
+▁dyst -29625
+Single -29626
+▁methane -29627
+▁handmade -29628
+▁Asc -29629
+acker -29630
+▁Gala -29631
+Lat -29632
+ftime -29633
+▁Sword -29634
+ference -29635
+▁selector -29636
+▁cocktails -29637
+▁laundering -29638
+▁abd -29639
+▁Graphics -29640
+▁fostering -29641
+▁patriarch -29642
+▁respecting -29643
+▁Drivers -29644
+▁descend -29645
+▁humility -29646
+▁patented -29647
+▁Gau -29648
+otism -29649
+▁Rapp -29650
+▁furious -29651
+▁Agencies -29652
+▁Islamist -29653
+▁mechanic -29654
+ATOR -29655
+ères -29656
+Short -29657
+▁Abrams -29658
+▁cortex -29659
+▁indisp -29660
+▁planetary -29661
+▁Kyoto -29662
+▁borne -29663
+▁baptism -29664
+▁Newspaper -29665
+erick -29666
+▁Theo -29667
+distance -29668
+▁reckless -29669
+{" -29670
+▁ча -29671
+iduc -29672
+▁nanop -29673
+▁Burger -29674
+▁construed -29675
+▁incapable -29676
+spect -29677
+▁Jiang -29678
+▁Saving -29679
+▁prevail -29680
+▁sorting -29681
+WP -29682
+nih -29683
+▁ETF -29684
+▁foc -29685
+▁caves -29686
+▁enlar -29687
+▁sailed -29688
+▁commits -29689
+▁commute -29690
+▁vibration -29691
+AX -29692
+▁Orb -29693
+▁Syl -29694
+ascar -29695
+▁Ding -29696
+▁skeptic -29697
+ABC -29698
+▁specs -29699
+▁Sutton -29700
+ructures -29701
+▁surfing -29702
+▁spur -29703
+▁mates -29704
+▁primer -29705
+▁sleeves -29706
+Fragment -29707
+▁poignant -29708
+inness -29709
+▁Maxim -29710
+▁Catalog -29711
+▁Variety -29712
+▁climax -29713
+TB -29714
+verett -29715
+="../../../ -29716
+▁manipulated -29717
+flag -29718
+▁bien -29719
+finder -29720
+▁Irene -29721
++) -29722
+rane -29723
+endas -29724
+shared -29725
+▁innate -29726
+▁elusive -29727
+▁supremacy -29728
+▁Hut -29729
+▁Pia -29730
+▁pes -29731
+▁bere -29732
+immune -29733
+▁Turns -29734
+▁purse -29735
+▁runtime -29736
+cov -29737
+▁DV -29738
+▁boycott -29739
+▁claimant -29740
+▁translator -29741
+▁Mineral -29742
+▁prompting -29743
+▁clinically -29744
+▁earthquakes -29745
+Sep -29746
+UID -29747
+▁Radical -29748
+yet -29749
+aryn -29750
+Users -29751
+▁Chick -29752
+▁toilets -29753
+Background -29754
+▁Greenwich -29755
+▁coastline -29756
+▁Blockchain -29757
+▁substituted -29758
+prem -29759
+▁Mia -29760
+▁Regg -29761
+igraph -29762
+▁disrupted -29763
+communication -29764
+▁Municipality -29765
+)! -29766
+nz -29767
+▁PCs -29768
+▁chrome -29769
+▁motivations -29770
+▁QC -29771
+▁Kush -29772
+▁Digest -29773
+▁strokes -29774
+tto -29775
+▁ni -29776
+idine -29777
+▁Seat -29778
+▁worsh -29779
+▁Buying -29780
+▁mortal -29781
+Xml -29782
+jer -29783
+uebl -29784
+ivism -29785
+▁Humanity -29786
+▁commemorate -29787
+▁Ves -29788
+thora -29789
+▁noun -29790
+Native -29791
+Spring -29792
+ankind -29793
+▁Levine -29794
+▁Odyssey -29795
+▁bonding -29796
+▁Printing -29797
+▁astronomy -29798
+▁peninsula -29799
+▁disparities -29800
+▁sept -29801
+ophile -29802
+▁Blank -29803
+▁Reduction -29804
+▁alias -29805
+▁prominence -29806
+getElementById -29807
+Wal -29808
+oine -29809
+▁Nos -29810
+▁crank -29811
+▁enforcing -29812
+▁Relationships -29813
+ства -29814
+▁xen -29815
+▁Heads -29816
+▁ratt -29817
+▁herald -29818
+sensitive -29819
+▁accessory -29820
+▁Emerson -29821
+▁ethanol -29822
+▁Bringing -29823
+▁Landroid -29824
+▁sé -29825
+▁Uzbek -29826
+cuts -29827
+▁Sunni -29828
+Girl -29829
+andi -29830
+becue -29831
+▁immortal -29832
+▁Streaming -29833
+▁athletics -29834
+▁unparalleled -29835
+▁Tao -29836
+▁Dock -29837
+▁Hulu -29838
+▁unsett -29839
+▁Macedonia -29840
+EFF -29841
+▁Zar -29842
+Arthur -29843
+ulsive -29844
+▁influx -29845
+download -29846
+tub -29847
+▁Pray -29848
+▁Reds -29849
+▁marital -29850
+▁Exploration -29851
+▁obstruction -29852
+$} -29853
+▁Sympt -29854
+▁SAM -29855
+▁Obst -29856
+▁Seas -29857
+▁Alicia -29858
+▁Graves -29859
+▁Sanctuary -29860
+▁visionary -29861
+▁harvesting -29862
+sz -29863
+ogi -29864
+inker -29865
+▁hamm -29866
+▁mixes -29867
+▁Totten -29868
+▁Dynasty -29869
+▁Silence -29870
+▁trailing -29871
+rf -29872
+▁Kyl -29873
+▁Ibid -29874
+▁Overs -29875
+▁Dolphins -29876
+PATH -29877
+amoto -29878
+▁Commit -29879
+bil -29880
+Cola -29881
+adle -29882
+став -29883
+▁hid -29884
+▁Seek -29885
+ILY -29886
+vspace -29887
+▁mapped -29888
+▁Johannesburg -29889
+▁supplemental -29890
+▁HU -29891
+thel -29892
+horse -29893
+jango -29894
+▁Gupta -29895
+▁drastic -29896
+▁Covenant -29897
+▁finalists -29898
+▁steak -29899
+British -29900
+▁typeof -29901
+▁glitter -29902
+blockList -29903
+▁Upcoming -29904
+▁Disclaimer -29905
+Bill -29906
+undy -29907
+▁prosperous -29908
+god -29909
+▁CENT -29910
+▁Cathy -29911
+▁prophecy -29912
+aston -29913
+Render -29914
+▁Jonas -29915
+▁cytok -29916
+olutions -29917
+rimental -29918
+▁melodic -29919
+▁mitochond -29920
+▁bolt -29921
+multicol -29922
+▁aquatic -29923
+rr -29924
+▁Mish -29925
+▁Burning -29926
+▁filings -29927
+▁Mobility -29928
+▁regeneration -29929
+▁MID -29930
+▁Kolk -29931
+▁acet -29932
+joining -29933
+▁Aristotle -29934
+▁Automatic -29935
+▁Consumers -29936
+▁centralized -29937
+shr -29938
+tel -29939
+▁eldest -29940
+▁diploma -29941
+▁GI -29942
+▁Sina -29943
+▁ions -29944
+▁crafting -29945
+▁Mare -29946
+abella -29947
+▁Turks -29948
+ortment -29949
+▁skating -29950
+▁downside -29951
+▁Applicants -29952
+▁ultrasound -29953
+▁commissioners -29954
+temp -29955
+▁elbow -29956
+▁zoning -29957
+?), -29958
+decl -29959
+maps -29960
+sexual -29961
+▁Owners -29962
+▁hallway -29963
+▁normalized -29964
+ños -29965
+unda -29966
+▁durch -29967
+Special -29968
+▁radial -29969
+▁Helping -29970
+▁converts -29971
+▁preventive -29972
+iae -29973
+limits -29974
+▁typedef -29975
+▁hydraulic -29976
+ulla -29977
+omaly -29978
+▁wagon -29979
+esthetic -29980
+Repository -29981
+idisciplinary -29982
+▁YA -29983
+icos -29984
+tabs -29985
+▁LNG -29986
+▁Renault -29987
+▁inventor -29988
+lg -29989
+ysc -29990
+▁Lama -29991
+smouth -29992
+licenses -29993
+▁alleging -29994
+▁protesting -29995
+▁CX -29996
+▁pioneers -29997
+▁Electricity -29998
+elius -29999
+▁Creed -30000
+▁vowed -30001
+boolean -30002
+▁guideline -30003
+▁professions -30004
+apsed -30005
+▁gigs -30006
+▁Conclusion -30007
+▁pornography -30008
+coe -30009
+ifty -30010
+▁Giovanni -30011
+▁Oriental -30012
+▁consultations -30013
+john -30014
+▁Coy -30015
+▁tiger -30016
+▁facilitates -30017
+truth -30018
+▁noisy -30019
+▁Routledge -30020
+▁collaborators -30021
+pred -30022
+▁Hmm -30023
+▁fax -30024
+occupied -30025
+▁Albania -30026
+▁Philharm -30027
+▁aggravated -30028
+▁distortion -30029
+▁кото -30030
+Jewish -30031
+▁attends -30032
+▁sanitation -30033
+▁predecessors -30034
+▁pg -30035
+tten -30036
+▁Alive -30037
+▁Presidency -30038
+UTE -30039
+avoid -30040
+▁lime -30041
+▁Supervis -30042
+▁Treasurer -30043
+▁broadcasts -30044
+ndra -30045
+above -30046
+monds -30047
+rades -30048
+▁USSR -30049
+▁slick -30050
+▁wraps -30051
+▁Amnesty -30052
+responsible -30053
+▁prioritize -30054
+▁brilliantly -30055
+▁jan -30056
+flags -30057
+exists -30058
+▁estates -30059
+▁farewell -30060
+dehy -30061
+esus -30062
+ursive -30063
+ffer -30064
+▁coli -30065
+average -30066
+▁Alberto -30067
+▁augmented -30068
+▁disagreement -30069
+▁xx -30070
+▁Sty -30071
+▁Managers -30072
+▁Examination -30073
+FLAG -30074
+▁Flo -30075
+▁Jays -30076
+▁Moor -30077
+▁slammed -30078
+▁Liberals -30079
+SSL -30080
+fle -30081
+cels -30082
+atore -30083
+▁aden -30084
+Remove -30085
+▁notch -30086
+▁Functional -30087
+▁irrational -30088
+eka -30089
+mington -30090
+▁tariff -30091
+▁relocation -30092
+▁AIR -30093
+▁Hide -30094
+▁Neck -30095
+▁Towers -30096
+▁cushion -30097
+▁chilling -30098
+▁specifies -30099
+▁uit -30100
+ctive -30101
+▁conveyed -30102
+▁pristine -30103
+▁blueprint -30104
+ULE -30105
+pid -30106
+pill -30107
+▁UNC -30108
+▁fluids -30109
+itlement -30110
+zzle -30111
+▁inh -30112
+▁Punk -30113
+▁roku -30114
+kowski -30115
+▁Wheat -30116
+▁bladder -30117
+▁Structural -30118
+GI -30119
+NL -30120
+yna -30121
+▁Fow -30122
+▁veterinary -30123
+▁DEF -30124
+▁Bold -30125
+▁epile -30126
+ertility -30127
+▁topical -30128
+▁reactive -30129
+GER -30130
+rost -30131
+digit -30132
+ippers -30133
+▁Walton -30134
+▁Gallagher -30135
+▁advertise -30136
+ully -30137
+meaning -30138
+▁doubling -30139
+▁Supervisor -30140
+▁fundamentals -30141
+Parse -30142
+▁Zhou -30143
+rically -30144
+▁Selling -30145
+▁Georgian -30146
+commercial -30147
+▁Gamb -30148
+▁devised -30149
+▁cyclists -30150
+▁Pediatric -30151
+multicolumn -30152
+▁decomposition -30153
+тив -30154
+▁Roe -30155
+▁recomb -30156
+directed -30157
+ographed -30158
+▁Included -30159
+▁Thornton -30160
+nine -30161
+▁Hau -30162
+()). -30163
+tops -30164
+▁Rit -30165
+▁rud -30166
+ollary -30167
+▁reaff -30168
+▁extrad -30169
+▁routines -30170
+▁depiction -30171
+hov -30172
+▁Fate -30173
+▁Carlson -30174
+forum -30175
+allets -30176
+▁Daisy -30177
+▁caval -30178
+▁Serena -30179
+▁solemn -30180
+▁perpetrators -30181
+▁gravitational -30182
+bg -30183
+▁Eh -30184
+▁PUR -30185
+Wire -30186
+ánd -30187
+▁invoice -30188
+▁turbine -30189
+▁Elsevier -30190
+Nov -30191
+▁Rox -30192
+▁São -30193
+▁Marri -30194
+▁agile -30195
+▁provocative -30196
+▁ц -30197
+neck -30198
+asaki -30199
+▁Seems -30200
+▁Sigma -30201
+eredith -30202
+▁Handle -30203
+▁plight -30204
+▁Discount -30205
+▁diagnose -30206
+▁fisheries -30207
+))); -30208
+▁biases -30209
+annotation -30210
+▁ALSO -30211
+▁Solic -30212
+▁epist -30213
+▁eagerly -30214
+▁Ceremony -30215
+▁extraord -30216
+▁Corinthians -30217
+Rob -30218
+duty -30219
+▁jed -30220
+sound -30221
+Follow -30222
+▁Bacon -30223
+▁reapp -30224
+▁Fathers -30225
+▁carbohyd -30226
+▁blur -30227
+▁tart -30228
+▁roofs -30229
+gil -30230
+onte -30231
+▁ceil -30232
+stroke -30233
+▁widget -30234
+▁Correction -30235
+/) -30236
+stay -30237
+▁Jol -30238
+ousse -30239
+▁Coup -30240
+▁Katy -30241
+▁playwright -30242
+Az -30243
+▁Katz -30244
+▁Byrne -30245
+▁Colts -30246
+▁Chavez -30247
+▁awhile -30248
+▁blonde -30249
+▁bureaucracy -30250
+▁Rim -30251
+ocard -30252
+▁Rear -30253
+▁Braves -30254
+▁Slovakia -30255
+▁Eck -30256
+▁eve -30257
+▁SPEC -30258
+▁orbital -30259
+▁referee -30260
+▁Optional -30261
+▁courageous -30262
+▁Fool -30263
+▁undes -30264
+▁banana -30265
+▁causal -30266
+▁vocational -30267
+bors -30268
+ollen -30269
+xspace -30270
+Services -30271
+▁Savings -30272
+▁curtains -30273
+▁utilizes -30274
+▁Screening -30275
+▁butterfly -30276
+▁Jaw -30277
+LEASE -30278
+▁Shark -30279
+▁grill -30280
+ampires -30281
+▁Homeless -30282
+▁alliances -30283
+▁stringent -30284
+▁outperform -30285
+Diff -30286
+▁Sole -30287
+▁uneven -30288
+▁Vij -30289
+▁Pret -30290
+artime -30291
+▁grabs -30292
+▁outwe -30293
+buy -30294
+iku -30295
+ourmet -30296
+▁Frame -30297
+▁PUBLIC -30298
+▁chickens -30299
+fd -30300
+ucc -30301
+scre -30302
+uish -30303
+sample -30304
+▁hates -30305
+However -30306
+NEW -30307
+▁ale -30308
+▁boo -30309
+perfect -30310
+▁extinct -30311
+▁BLACK -30312
+▁EVERY -30313
+▁rails -30314
+opoulos -30315
+▁ounces -30316
+upuncture -30317
+▁swinging -30318
+▁newsletters -30319
+▁PAT -30320
+boost -30321
+▁Imam -30322
+▁subpo -30323
+▁Paramount -30324
+▁{% -30325
+uala -30326
+▁Mayer -30327
+▁smoked -30328
+▁Improving -30329
+▁localized -30330
+▁specificity -30331
+▁sind -30332
+▁honoring -30333
+▁planners -30334
+▁inhabited -30335
+▁repression -30336
+amics -30337
+setting -30338
+▁Ronnie -30339
+▁spouses -30340
+▁ministries -30341
+▁ -31655
+? -31656
+< -31657
+* -31658
+! -31659
+– -31660
+[ -31661
+] -31662
+& -31663
+— -31664
+о -31665
+| -31666
+Q -31667
+% -31668
+а -31669
+X -31670
++ -31671
+Z -31672
+и -31673
+е -31674
+# -31675
+‘ -31676
+н -31677
+^ -31678
+р -31679
+é -31680
+т -31681
+@ -31682
+с -31683
+в -31684
+л -31685
+к -31686
+д -31687
+м -31688
+у -31689
+п -31690
+` -31691
+~ -31692
+á -31693
+г -31694
+я -31695
+і -31696
+• -31697
+з -31698
+ó -31699
+í -31700
+б -31701
+ь -31702
+й -31703
+ä -31704
+» -31705
+ü -31706
+ч -31707
+ы -31708
+ö -31709
+£ -31710
+© -31711
+х -31712
+è -31713
+ц -31714
+ж -31715
+à -31716
+· -31717
+° -31718
+â -31719
+« -31720
+ш -31721
+€ -31722
+ю -31723
+ł -31724
+® -31725
+ç -31726
+С -31727
+′ -31728
+§ -31729
+ф -31730
+ا -31731
+П -31732
+å -31733
+В -31734
+ñ -31735
+ -31736
+К -31737
+ã -31738
+ú -31739
+č -31740
diff --git a/data/alpaca/alpaca_data_cleaned_archive.json b/data/alpaca/alpaca_data_cleaned_archive.json
new file mode 100644
index 0000000000000000000000000000000000000000..331972195b8806b6d7a2a93624e74bf95b7ee478
--- /dev/null
+++ b/data/alpaca/alpaca_data_cleaned_archive.json
@@ -0,0 +1,37 @@
+[
+ {
+ "instruction": "Describe DDOS attack",
+ "input": "",
+ "output": "A Distributed Denial of Service (DDoS) attack is a malicious attempt to disrupt the normal functioning of a network, service, or website by overwhelming it with a flood of internet traffic. In a DDoS attack, multiple compromised computers or devices, often referred to as a botnet, are used to generate a massive amount of traffic that exceeds the target's capacity to handle it."
+ },
+ {
+ "instruction": "What are DDOS technique?",
+ "input": "",
+ "output": "Traffic flooding: The attacker overwhelms the target with a massive volume of network traffic, consuming all available resources. \nApplication layer attacks: The attacker targets specific applications or services, attempting to exhaust their resources by exploiting vulnerabilities or by initiating resource-intensive operations. \nProtocol attacks: The attacker exploits weaknesses in network protocols, such as the TCP/IP stack, to consume server resources or disrupt communication.\nReflective/amplified attacks: The attacker uses a technique where they send requests to a large number of vulnerable systems that respond by sending a much larger response back to the target, amplifying the attack's impact."
+ },
+ {
+ "instruction": "How to avoid DDOS attack",
+ "input": "",
+ "output": "DDoS mitigation services: Consider using a reputable DDoS mitigation service that specializes in detecting and mitigating attacks. These services can help filter out malicious traffic and ensure your network remains accessible during an attack. \nNetwork monitoring: Implement robust network monitoring systems to detect any unusual traffic patterns or sudden spikes in traffic. Early detection can allow you to respond quickly and mitigate the impact of an attack."
+ },
+ {
+ "instruction": "what to do when getting ddos-ed",
+ "input": "",
+ "output": "Contact your DDoS mitigation service: If you have a DDoS mitigation service in place, immediately contact them to activate their services and inform them about the ongoing attack. They can help filter out malicious traffic and provide guidance on mitigation strategies.\nMonitor network traffic: Use network monitoring tools or services to analyze and monitor the incoming traffic. Identify the characteristics of the attack, such as the source IP addresses, traffic patterns, and attack vectors. This information can be useful for further analysis and potential legal action."
+ },
+ {
+ "instruction": "What is DDOS attack",
+ "input": "",
+ "output": "is a malicious with a flood of internet traffic. In a DDoS attack, multiple compromised computers or devices, often referred to as a botnet, are used to generate a massive amount of traffic that exceeds the target's capacity to handle it."
+ },
+ {
+ "instruction": "Which are DDOS technique?",
+ "input": "Traffic flooding, Scanning, Viewing",
+ "output": "Traffic flooding"
+ },
+ {
+ "instruction": "Avoid DDOS attack",
+ "input": "",
+ "output": "DDoS mitigation services: Consider using a reputable DDoS mitigation service that specializes in detecting and mitigating attacks. These services can help filter out malicious traffic and ensure your network remains accessible during an attack. \nNetwork monitoring: Implement robust network monitoring systems to detect any unusual traffic patterns or sudden spikes in traffic. Early detection can allow you to respond quickly and mitigate the impact of an attack."
+ }
+]
diff --git a/data/alpaca/alpaca_data_cleaned_archive_origin.json b/data/alpaca/alpaca_data_cleaned_archive_origin.json
new file mode 100644
index 0000000000000000000000000000000000000000..65a9b2fb81f6fa49bc0733531281a4fe76927384
--- /dev/null
+++ b/data/alpaca/alpaca_data_cleaned_archive_origin.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:00c26b8da597c1aaa5a0bac023bdb8f26bbaa37a9ead7837df4aa7e51ad57459
+size 23573609
diff --git a/data/alpaca/cloud_cpu_benchmark_report_nipacloud_c035f97b62.pdf b/data/alpaca/cloud_cpu_benchmark_report_nipacloud_c035f97b62.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..39a3112b67e1a5e63ee02d23e22510da3750dc60
Binary files /dev/null and b/data/alpaca/cloud_cpu_benchmark_report_nipacloud_c035f97b62.pdf differ
diff --git a/data/alpaca/test.pt b/data/alpaca/test.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9a2cb9c0fffc7049ed1bbf3cc9db2550ee3a604e
--- /dev/null
+++ b/data/alpaca/test.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:51be5410edd01a0351eae4535769ff441ae4278d17a6a643bed3cfcad5888c1d
+size 4607
diff --git a/data/alpaca/train.pt b/data/alpaca/train.pt
new file mode 100644
index 0000000000000000000000000000000000000000..71e626c83672d481e70735be4af47db38bdfeb44
--- /dev/null
+++ b/data/alpaca/train.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4026f9d8b9f342b2d6938202e017bd5e8f81716e2c79b78a7b0de92861f15050
+size 10902
diff --git a/evaluate.py b/evaluate.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2472b668e6f40ad35c9a853980ed5e739f7e436
--- /dev/null
+++ b/evaluate.py
@@ -0,0 +1,145 @@
+# This mimics GPTQ's evaluation metrics: https://github.com/IST-DASLab/gptq/
+# Thanks to E. Frantar et al GPTQ: Accurate Post-training Compression for GPT, arXiv:2210.17323
+import math
+import sys
+import time
+from pathlib import Path
+from typing import Optional
+
+import lightning as L
+import torch
+import tqdm
+
+from lit_llama import LLaMA, Tokenizer
+from lit_llama.utils import EmptyInitOnDevice, llama_model_lookup
+
+from datasets import load_dataset
+
+
+def load_eval_data(dataset_name: str) -> str:
+ # this mimics gptq datautils
+ if dataset_name == "wikitext":
+ # traindata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='train')
+ testdata = load_dataset("wikitext", "wikitext-2-raw-v1", split="test")
+ testdata = "\n\n".join(testdata["text"])
+ elif dataset_name == "ptb":
+ testdata = load_dataset("ptb_text_only", "penn_treebank", split="test")
+ testdata = "\n\n".join(testdata["sentence"])
+ elif dataset_name == "c4":
+ testdata = load_dataset(
+ "allenai/c4",
+ "allenai--c4",
+ data_files={"validation": "en/c4-validation.00000-of-00008.json.gz"},
+ split="validation",
+ )
+ testdata = " ".join(testdata[:1100]["text"])
+
+ else:
+ raise ValueError("invalid dataset name (wikitext, ptb, c4 are allowed)")
+ return testdata
+
+
+def main(
+ datasets: str = "wikitext,ptb,c4",
+ *,
+ # compilation fails as it does not support torch.complex64 for RoPE
+ # compile: bool = False,
+ accelerator: str = "auto",
+ checkpoint_path: Optional[Path] = None,
+ tokenizer_path: Optional[Path] = None,
+ dtype: str = "float32",
+ quantize: Optional[str] = None,
+) -> None:
+ """Generates text samples based on a pre-trained LLaMA model and tokenizer.
+
+ Args:
+ datasets: The datasets to use as a comma separated string
+ # compile: Whether to compile the model.
+ accelerator: The hardware to run on. Possible choices are:
+ ``"cpu"``, ``"cuda"``, ``"mps"``, ``"gpu"``, ``"tpu"``, ``"auto"``.
+ checkpoint_path: The checkpoint path to load.
+ tokenizer_path: The tokenizer path to load.
+ quantize: Whether to quantize the model and using which method:
+ ``"llm.int8"``: LLM.int8() mode,
+ ``"gptq.int4"``: GPTQ 4-bit mode.
+ """
+ if not checkpoint_path:
+ checkpoint_path = Path(f"./checkpoints/lit-llama/7B/lit-llama.pth")
+ if not tokenizer_path:
+ tokenizer_path = Path("./checkpoints/lit-llama/tokenizer.model")
+ assert checkpoint_path.is_file()
+ assert tokenizer_path.is_file()
+
+ fabric = L.Fabric(accelerator=accelerator, devices=1)
+
+ dt = getattr(torch, dtype, None)
+ if not isinstance(dt, torch.dtype):
+ raise ValueError(f"{dtype} is not a valid dtype.")
+ dtype = dt
+
+ with EmptyInitOnDevice(
+ device=fabric.device, dtype=dtype, quantization_mode=quantize
+ ):
+ print("Loading model ...", file=sys.stderr)
+ t0 = time.time()
+ checkpoint = torch.load(checkpoint_path)
+ name = llama_model_lookup(checkpoint)
+ model = LLaMA.from_name(name)
+ model.load_state_dict(checkpoint)
+ print(f"Time to load model: {time.time() - t0:.02f} seconds.", file=sys.stderr)
+
+ model.eval()
+
+ # if compile:
+ # model = torch.compile(model)
+
+ total_toks = 0
+ model = fabric.setup_module(model)
+
+ tokenizer = Tokenizer(tokenizer_path)
+
+ for dsname in datasets.split(","):
+ test_string = load_eval_data(dsname)
+ encoded_text = tokenizer.encode(
+ test_string, bos=True, eos=False, device=fabric.device
+ )
+ encoded_text = encoded_text[
+ None, : 256 * model.config.block_size
+ ] # add batch dimension, trim like gptq implementation
+ t0 = time.perf_counter()
+
+ nlls = 0
+ toks = 0
+ with torch.inference_mode():
+ block_size = 2048 # this is for compat with gptq, and indeed we get much worse beyond this (https://github.com/facebookresearch/llama/blob/57b0eb62de0636e75af471e49e2f1862d908d9d8/llama/model.py#L30)
+ for i in tqdm.tqdm(range(0, encoded_text.shape[1], block_size)):
+ inp = encoded_text[:, i : i + block_size]
+ logits = model(inp)[0]
+ nll = torch.nn.functional.cross_entropy(
+ logits[:-1], inp[0, 1:].to(dtype=torch.long), reduction="sum"
+ )
+ toks += inp.size(1) - 1
+ nlls += nll.item()
+
+ print(encoded_text.shape, logits.shape)
+ encoded_text = encoded_text[:, : logits.shape[0]]
+ ppl = math.exp(nlls / toks)
+ print(f"Perplexity on {dsname}: {ppl:.2f}")
+ total_toks += toks
+
+ t = time.perf_counter() - t0
+ print(
+ f"\n\nTime for inference: {t:.02f} sec total, {total_toks / t:.02f} tokens/sec",
+ file=sys.stderr,
+ )
+ print(
+ f"Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB",
+ file=sys.stderr,
+ )
+
+
+if __name__ == "__main__":
+ from jsonargparse import CLI
+
+ torch.set_float32_matmul_precision("high")
+ CLI(main)
diff --git a/evaluate_adapter.py b/evaluate_adapter.py
new file mode 100644
index 0000000000000000000000000000000000000000..bafdc7456002c6c307f17d6831de0444d21785d7
--- /dev/null
+++ b/evaluate_adapter.py
@@ -0,0 +1,164 @@
+# This mimics GPTQ's evaluation metrics: https://github.com/IST-DASLab/gptq/
+# Thanks to E. Frantar et al GPTQ: Accurate Post-training Compression for GPT, arXiv:2210.17323
+import math
+import sys
+import time
+from pathlib import Path
+from typing import Optional
+
+import lightning as L
+import torch
+import tqdm
+
+from lit_llama import Tokenizer
+from lit_llama.adapter import LLaMA
+from lit_llama.utils import EmptyInitOnDevice, lazy_load, llama_model_lookup
+from scripts.prepare_alpaca import generate_prompt
+
+from datasets import load_dataset
+
+
+def load_eval_data(dataset_name: str) -> str:
+ # this mimics gptq datautils
+ if dataset_name == "wikitext":
+ # traindata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='train')
+ testdata = load_dataset("wikitext", "wikitext-2-raw-v1", split="test")
+ testdata = "\n\n".join(testdata["text"])
+ elif dataset_name == "ptb":
+ testdata = load_dataset("ptb_text_only", "penn_treebank", split="test")
+ testdata = "\n\n".join(testdata["sentence"])
+ elif dataset_name == "c4":
+ testdata = load_dataset(
+ "allenai/c4",
+ "allenai--c4",
+ data_files={"validation": "en/c4-validation.00000-of-00008.json.gz"},
+ split="validation",
+ )
+ testdata = " ".join(testdata[:1100]["text"])
+
+ else:
+ raise ValueError("invalid dataset name (wikitext, ptb, c4 are allowed)")
+ return testdata
+
+
+def main(
+ datasets: str = "wikitext,ptb,c4",
+ *,
+ # compilation fails as it does not support torch.complex64 for RoPE
+ # compile: bool = False,
+ accelerator: str = "auto",
+ adapter_path: Optional[Path] = None,
+ checkpoint_path: Optional[Path] = None,
+ tokenizer_path: Optional[Path] = None,
+ dtype: str = "float32",
+ quantize: Optional[str] = None,
+) -> None:
+ """Generates text samples based on a pre-trained LLaMA model and tokenizer.
+
+ Args:
+ datasets: The datasets to use as a comma separated string
+ # compile: Whether to compile the model.
+ accelerator: The hardware to run on. Possible choices are:
+ ``"cpu"``, ``"cuda"``, ``"mps"``, ``"gpu"``, ``"tpu"``, ``"auto"``.
+ adapter_path: Path to the checkpoint with trained adapter weights, which are the output of
+ `finetune_adapter.py`.
+ checkpoint_path: The checkpoint path to load.
+ tokenizer_path: The tokenizer path to load.
+ quantize: Whether to quantize the model and using which method:
+ ``"llm.int8"``: LLM.int8() mode,
+ ``"gptq.int4"``: GPTQ 4-bit mode.
+ """
+ if not adapter_path:
+ adapter_path = Path("out/adapter/alpaca/lit-llama-adapter-finetuned.pth")
+ if not checkpoint_path:
+ checkpoint_path = Path(f"./checkpoints/lit-llama/7B/lit-llama.pth")
+ if not tokenizer_path:
+ tokenizer_path = Path("./checkpoints/lit-llama/tokenizer.model")
+
+ assert adapter_path.is_file()
+ assert checkpoint_path.is_file()
+ assert tokenizer_path.is_file()
+
+ fabric = L.Fabric(accelerator=accelerator, devices=1)
+
+ dt = getattr(torch, dtype, None)
+ if not isinstance(dt, torch.dtype):
+ raise ValueError(f"{dtype} is not a valid dtype.")
+ dtype = dt
+
+ with EmptyInitOnDevice(
+ device=fabric.device, dtype=dtype, quantization_mode=quantize
+ ):
+ print("Loading model ...", file=sys.stderr)
+ t0 = time.time()
+ pretrained_checkpoint = lazy_load(checkpoint_path)
+ adapter_checkpoint = lazy_load(adapter_path)
+ name = llama_model_lookup(pretrained_checkpoint)
+ model = LLaMA.from_name(name)
+
+ # 1. Load the pretrained weights
+ model.load_state_dict(pretrained_checkpoint, strict=False)
+ # 2. Load the fine-tuned adapter weights
+ model.load_state_dict(adapter_checkpoint, strict=False)
+
+ print(f"Time to load model: {time.time() - t0:.02f} seconds.", file=sys.stderr)
+
+ model.eval()
+
+ # if compile:
+ # model = torch.compile(model)
+
+ total_toks = 0
+ model = fabric.setup_module(model)
+
+ tokenizer = Tokenizer(tokenizer_path)
+
+ for dsname in datasets.split(","):
+ test_string = load_eval_data(dsname)
+
+ sample = {"instruction": test_string, "input": input}
+ test_string = generate_prompt(sample)
+
+ encoded_text = tokenizer.encode(
+ test_string, bos=True, eos=False, device=fabric.device
+ )
+ encoded_text = encoded_text[
+ None, : 256 * model.config.block_size
+ ] # add batch dimension, trim like gptq implementation
+ t0 = time.perf_counter()
+
+ nlls = 0
+ toks = 0
+ with torch.inference_mode():
+ block_size = 2048 # this is for compat with gptq, and indeed we get much worse beyond this (https://github.com/facebookresearch/llama/blob/57b0eb62de0636e75af471e49e2f1862d908d9d8/llama/model.py#L30)
+ for i in tqdm.tqdm(range(0, encoded_text.shape[1], block_size)):
+ inp = encoded_text[:, i : i + block_size]
+ logits = model(inp)[0]
+ nll = torch.nn.functional.cross_entropy(
+ logits[:-1], inp[0, 1:].to(dtype=torch.long), reduction="sum"
+ )
+ toks += inp.size(1) - 1
+ nlls += nll.item()
+
+ print(encoded_text.shape, logits.shape)
+ encoded_text = encoded_text[:, : logits.shape[0]]
+ ppl = math.exp(nlls / toks)
+ print(f"Perplexity on {dsname}: {ppl:.2f}")
+ total_toks += toks
+
+ t = time.perf_counter() - t0
+ print(
+ f"\n\nTime for inference: {t:.02f} sec total, {total_toks / t:.02f} tokens/sec",
+ file=sys.stderr,
+ )
+ print(
+ f"Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB",
+ file=sys.stderr,
+ )
+
+
+if __name__ == "__main__":
+ from jsonargparse import CLI
+
+ torch.set_float32_matmul_precision("high")
+ CLI(main)
diff --git a/evaluate_full.py b/evaluate_full.py
new file mode 100644
index 0000000000000000000000000000000000000000..061a80c6bf7e0cf9efe9ba13c38ac6fe2cfb6128
--- /dev/null
+++ b/evaluate_full.py
@@ -0,0 +1,145 @@
+# This mimics GPTQ's evaluation metrics: https://github.com/IST-DASLab/gptq/
+# Thanks to E. Frantar et al GPTQ: Accurate Post-training Compression for GPT, arXiv:2210.17323
+import math
+import sys
+import time
+from pathlib import Path
+from typing import Optional
+
+import lightning as L
+import torch
+import tqdm
+
+from lit_llama import LLaMA, Tokenizer
+from lit_llama.utils import EmptyInitOnDevice
+
+from datasets import load_dataset
+
+
+def load_eval_data(dataset_name: str) -> str:
+ # this mimics gptq datautils
+ if dataset_name == "wikitext":
+ # traindata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='train')
+ testdata = load_dataset("wikitext", "wikitext-2-raw-v1", split="test")
+ testdata = "\n\n".join(testdata["text"])
+ elif dataset_name == "ptb":
+ testdata = load_dataset("ptb_text_only", "penn_treebank", split="test")
+ testdata = "\n\n".join(testdata["sentence"])
+ elif dataset_name == "c4":
+ testdata = load_dataset(
+ "allenai/c4",
+ "allenai--c4",
+ data_files={"validation": "en/c4-validation.00000-of-00008.json.gz"},
+ split="validation",
+ )
+ testdata = " ".join(testdata[:1100]["text"])
+
+ else:
+ raise ValueError("invalid dataset name (wikitext, ptb, c4 are allowed)")
+ return testdata
+
+
+def main(
+ datasets: str = "wikitext,ptb,c4",
+ *,
+ # compilation fails as it does not support torch.complex64 for RoPE
+ # compile: bool = False,
+ accelerator: str = "auto",
+ checkpoint_path: Optional[Path] = None,
+ tokenizer_path: Optional[Path] = None,
+ model_size: str = "7B",
+ dtype: str = "float32",
+ quantize: Optional[str] = None,
+) -> None:
+ """Generates text samples based on a pre-trained LLaMA model and tokenizer.
+
+ Args:
+ datasets: The datasets to use as a comma separated string
+ # compile: Whether to compile the model.
+ accelerator: The hardware to run on. Possible choices are:
+ ``"cpu"``, ``"cuda"``, ``"mps"``, ``"gpu"``, ``"tpu"``, ``"auto"``.
+ checkpoint_path: The checkpoint path to load.
+ tokenizer_path: The tokenizer path to load.
+ quantize: Whether to quantize the model and using which method:
+ ``"llm.int8"``: LLM.int8() mode,
+ ``"gptq.int4"``: GPTQ 4-bit mode.
+ """
+ if not checkpoint_path:
+ checkpoint_path = Path(f"./checkpoints/lit-llama/{model_size}/lit-llama.pth")
+ if not tokenizer_path:
+ tokenizer_path = Path("./checkpoints/lit-llama/tokenizer.model")
+ assert checkpoint_path.is_file()
+ assert tokenizer_path.is_file()
+
+ fabric = L.Fabric(accelerator=accelerator, devices=1)
+
+ dt = getattr(torch, dtype, None)
+ if not isinstance(dt, torch.dtype):
+ raise ValueError(f"{dtype} is not a valid dtype.")
+ dtype = dt
+
+ with EmptyInitOnDevice(
+ device=fabric.device, dtype=dtype, quantization_mode=quantize
+ ):
+ print("Loading model ...", file=sys.stderr)
+ t0 = time.time()
+ model = LLaMA.from_name(model_size)
+ checkpoint = torch.load(checkpoint_path)
+ model.load_state_dict(checkpoint)
+ print(f"Time to load model: {time.time() - t0:.02f} seconds.", file=sys.stderr)
+
+ model.eval()
+
+ # if compile:
+ # model = torch.compile(model)
+
+ total_toks = 0
+ model = fabric.setup_module(model)
+
+ tokenizer = Tokenizer(tokenizer_path)
+
+ for dsname in datasets.split(","):
+ test_string = load_eval_data(dsname)
+ encoded_text = tokenizer.encode(
+ test_string, bos=True, eos=False, device=fabric.device
+ )
+ encoded_text = encoded_text[
+ None, : 256 * model.config.block_size
+ ] # add batch dimension, trim like gptq implementation
+ t0 = time.perf_counter()
+
+ nlls = 0
+ toks = 0
+ with torch.inference_mode():
+ block_size = 2048 # this is for compat with gptq, and indeed we get much worse beyond this (https://github.com/facebookresearch/llama/blob/57b0eb62de0636e75af471e49e2f1862d908d9d8/llama/model.py#L30)
+ for i in tqdm.tqdm(range(0, encoded_text.shape[1], block_size)):
+ inp = encoded_text[:, i : i + block_size]
+ logits = model(inp)[0]
+ nll = torch.nn.functional.cross_entropy(
+ logits[:-1], inp[0, 1:].to(dtype=torch.long), reduction="sum"
+ )
+ toks += inp.size(1) - 1
+ nlls += nll.item()
+
+ print(encoded_text.shape, logits.shape)
+ encoded_text = encoded_text[:, : logits.shape[0]]
+ ppl = math.exp(nlls / toks)
+ print(f"Perplexity on {dsname}: {ppl:.2f}")
+ total_toks += toks
+
+ t = time.perf_counter() - t0
+ print(
+ f"\n\nTime for inference: {t:.02f} sec total, {total_toks / t:.02f} tokens/sec",
+ file=sys.stderr,
+ )
+ print(
+ f"Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB",
+ file=sys.stderr,
+ )
+
+
+if __name__ == "__main__":
+ from jsonargparse import CLI
+
+ torch.set_float32_matmul_precision("high")
+ CLI(main)
diff --git a/evaluate_lora.py b/evaluate_lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..7d26d397eeb89fca53c9f571a60f7712185dbe41
--- /dev/null
+++ b/evaluate_lora.py
@@ -0,0 +1,173 @@
+# This mimics GPTQ's evaluation metrics: https://github.com/IST-DASLab/gptq/
+# Thanks to E. Frantar et al GPTQ: Accurate Post-training Compression for GPT, arXiv:2210.17323
+import math
+import sys
+import time
+from pathlib import Path
+from typing import Optional
+
+import lightning as L
+import torch
+import tqdm
+
+from lit_llama import LLaMA, Tokenizer
+from lit_llama.utils import EmptyInitOnDevice, lazy_load, llama_model_lookup
+from lit_llama.lora import lora
+from scripts.prepare_alpaca import generate_prompt
+
+from datasets import load_dataset
+
+lora_r = 8
+lora_alpha = 16
+lora_dropout = 0.05
+
+
+def load_eval_data(dataset_name: str) -> str:
+ # this mimics gptq datautils
+ if dataset_name == "wikitext":
+ # traindata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='train')
+ testdata = load_dataset("wikitext", "wikitext-2-raw-v1", split="test")
+ testdata = "\n\n".join(testdata["text"])
+ elif dataset_name == "ptb":
+ testdata = load_dataset("ptb_text_only", "penn_treebank", split="test")
+ testdata = "\n\n".join(testdata["sentence"])
+ elif dataset_name == "c4":
+ testdata = load_dataset(
+ "allenai/c4",
+ "allenai--c4",
+ data_files={"validation": "en/c4-validation.00000-of-00008.json.gz"},
+ split="validation",
+ )
+ testdata = " ".join(testdata[:1100]["text"])
+
+ else:
+ raise ValueError("invalid dataset name (wikitext, ptb, c4 are allowed)")
+ return testdata
+
+
+def main(
+ datasets: str = "wikitext,ptb,c4",
+ *,
+ # compilation fails as it does not support torch.complex64 for RoPE
+ # compile: bool = False,
+ accelerator: str = "auto",
+ lora_path: Optional[Path] = None,
+ checkpoint_path: Optional[Path] = None,
+ tokenizer_path: Optional[Path] = None,
+ dtype: str = "float32",
+ quantize: Optional[str] = None,
+) -> None:
+ """Generates text samples based on a pre-trained LLaMA model and tokenizer
+ finetuned with LoRA.
+
+ Args:
+ datasets: The datasets to use as a comma separated string
+ # compile: Whether to compile the model.
+ accelerator: The hardware to run on. Possible choices are:
+ ``"cpu"``, ``"cuda"``, ``"mps"``, ``"gpu"``, ``"tpu"``, ``"auto"``.
+ lora_path: Path to the checkpoint with trained LoRA weights, which are the output of
+ `finetune_lora.py`.
+ checkpoint_path: The checkpoint path to load.
+ tokenizer_path: The tokenizer path to load.
+ quantize: Whether to quantize the model and using which method:
+ ``"llm.int8"``: LLM.int8() mode,
+ ``"gptq.int4"``: GPTQ 4-bit mode.
+ """
+ if not lora_path:
+ lora_path = Path("out/lora/alpaca/lit-llama-lora-finetuned.pth")
+ if not checkpoint_path:
+ checkpoint_path = Path(f"./checkpoints/lit-llama/7B/lit-llama.pth")
+ if not tokenizer_path:
+ tokenizer_path = Path("./checkpoints/lit-llama/tokenizer.model")
+ assert lora_path.is_file()
+ assert checkpoint_path.is_file()
+ assert tokenizer_path.is_file()
+
+ if quantize is not None:
+ raise NotImplementedError("Quantization in LoRA is not supported yet")
+
+ fabric = L.Fabric(accelerator=accelerator, devices=1)
+
+ dt = getattr(torch, dtype, None)
+ if not isinstance(dt, torch.dtype):
+ raise ValueError(f"{dtype} is not a valid dtype.")
+ dtype = dt
+
+ print("Loading model ...", file=sys.stderr)
+ t0 = time.time()
+
+ pretrained_checkpoint = lazy_load(checkpoint_path)
+ adapter_checkpoint = lazy_load(lora_path)
+ name = llama_model_lookup(pretrained_checkpoint)
+
+ with EmptyInitOnDevice(
+ device=fabric.device, dtype=dtype, quantization_mode=quantize
+ ), lora(r=lora_r, alpha=lora_alpha, dropout=lora_dropout, enabled=True):
+ model = LLaMA.from_name(name)
+
+ # 1. Load the pretrained weights
+ model.load_state_dict(pretrained_checkpoint, strict=False)
+ # 2. Load the fine-tuned adapter weights
+ model.load_state_dict(adapter_checkpoint, strict=False)
+
+ print(f"Time to load model: {time.time() - t0:.02f} seconds.", file=sys.stderr)
+
+ model.eval()
+
+ # if compile:
+ # model = torch.compile(model)
+
+ total_toks = 0
+ model = fabric.setup_module(model)
+
+ tokenizer = Tokenizer(tokenizer_path)
+
+ for dsname in datasets.split(","):
+ test_string = load_eval_data(dsname)
+
+ sample = {"instruction": test_string, "input": input}
+ test_string = generate_prompt(sample)
+
+ encoded_text = tokenizer.encode(
+ test_string, bos=True, eos=False, device=fabric.device
+ )
+ encoded_text = encoded_text[
+ None, : 256 * model.config.block_size
+ ] # add batch dimension, trim like gptq implementation
+ t0 = time.perf_counter()
+
+ nlls = 0
+ toks = 0
+ with torch.inference_mode():
+ block_size = 2048 # this is for compat with gptq, and indeed we get much worse beyond this (https://github.com/facebookresearch/llama/blob/57b0eb62de0636e75af471e49e2f1862d908d9d8/llama/model.py#L30)
+ for i in tqdm.tqdm(range(0, encoded_text.shape[1], block_size)):
+ inp = encoded_text[:, i : i + block_size]
+ logits = model(inp)[0]
+ nll = torch.nn.functional.cross_entropy(
+ logits[:-1], inp[0, 1:].to(dtype=torch.long), reduction="sum"
+ )
+ toks += inp.size(1) - 1
+ nlls += nll.item()
+
+ print(encoded_text.shape, logits.shape)
+ encoded_text = encoded_text[:, : logits.shape[0]]
+ ppl = math.exp(nlls / toks)
+ print(f"Perplexity on {dsname}: {ppl:.2f}")
+ total_toks += toks
+
+ t = time.perf_counter() - t0
+ print(
+ f"\n\nTime for inference: {t:.02f} sec total, {total_toks / t:.02f} tokens/sec",
+ file=sys.stderr,
+ )
+ print(
+ f"Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB",
+ file=sys.stderr,
+ )
+
+
+if __name__ == "__main__":
+ from jsonargparse import CLI
+
+ torch.set_float32_matmul_precision("high")
+ CLI(main)
diff --git a/finetune_adapter.py b/finetune_adapter.py
new file mode 100644
index 0000000000000000000000000000000000000000..e94b17a5aa7c1f9083bab24266101fb1f093ddb1
--- /dev/null
+++ b/finetune_adapter.py
@@ -0,0 +1,253 @@
+"""
+Instruction-tuning with LLaMA-Adapter on the Alpaca dataset following the paper
+
+LLaMA-Adapter: Efficient Fine-tuning of Language Models with Zero-init Attention
+https://arxiv.org/abs/2303.16199
+
+This script runs on a single GPU by default. You can adjust the `micro_batch_size` to fit your GPU memory.
+You can finetune within 1 hour as done in the original paper using DeepSpeed Zero-2 on 8 A100 GPUs by setting the
+devices variable to `devices = 8` and `micro_batch_size = 8` (or higher).
+
+Note: If you run into a CUDA error "Expected is_sm80 to be true, but got false", uncomment the line
+`torch.backends.cuda.enable_flash_sdp(False)` in the script below (see https://github.com/Lightning-AI/lit-llama/issues/101).
+"""
+import os
+import time
+from pathlib import Path
+import shutil
+
+import lightning as L
+import numpy as np
+import torch
+
+from generate import generate
+from lit_llama.adapter import LLaMA, LLaMAConfig, mark_only_adapter_as_trainable, adapter_state_from_state_dict
+from lit_llama.tokenizer import Tokenizer
+from scripts.prepare_alpaca import generate_prompt
+from lightning.fabric.strategies import DeepSpeedStrategy
+
+
+eval_interval = 600
+save_interval = 1000
+eval_iters = 100
+log_interval = 1
+devices = 1
+
+# Hyperparameters
+learning_rate = 9e-3
+batch_size = 64 / devices
+micro_batch_size = 4
+gradient_accumulation_steps = batch_size // micro_batch_size
+epoch_size = 50000 # train dataset size
+num_epochs = 5
+max_iters = num_epochs * epoch_size // devices
+weight_decay = 0.02
+max_seq_length = 256 # see scripts/prepare_alpaca.py
+warmup_steps = epoch_size * 2 // micro_batch_size // devices # 2 epochs
+
+ds_config = {
+ "train_micro_batch_size_per_gpu": micro_batch_size,
+ "gradient_accumulation_steps": gradient_accumulation_steps,
+ "zero_optimization": {"stage": 2},
+}
+
+
+def main(
+ data_dir: str = "data/alpaca",
+ pretrained_path: str = "checkpoints/lit-llama/7B/lit-llama.pth",
+ out_dir: str = "out/adapter/alpaca",
+):
+
+ fabric = L.Fabric(
+ accelerator="cuda",
+ devices=devices,
+ strategy=(DeepSpeedStrategy(config=ds_config) if devices > 1 else "auto"),
+ precision="bf16-true",
+ )
+ fabric.launch()
+ fabric.seed_everything(1337 + fabric.global_rank)
+
+ if fabric.global_rank == 0:
+ os.makedirs(out_dir, exist_ok=True)
+
+ train_data, val_data = load_datasets(data_dir=data_dir)
+
+ config = LLaMAConfig(block_size=max_seq_length)
+
+ if not os.path.isfile(pretrained_path):
+ raise FileNotFoundError(
+ f"Can't find the pretrained weights at {pretrained_path}."
+ " Please follow the instructions in the README to download them."
+ )
+ checkpoint = torch.load(pretrained_path)
+
+ with fabric.init_module():
+ model = LLaMA(config)
+ # strict=False because missing keys due to adapter weights not containted in state dict
+ model.load_state_dict(checkpoint, strict=False)
+
+ mark_only_adapter_as_trainable(model)
+
+ num_params = sum([p.numel() for p in model.parameters() if p.requires_grad])
+ print(f"Number of trainable parameters: {num_params}")
+
+ optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
+ model, optimizer = fabric.setup(model, optimizer)
+ train(fabric, model, optimizer, train_data, val_data, out_dir)
+
+ # Save the final checkpoint at the end of training
+ save_model_checkpoint(fabric, model, os.path.join(out_dir, "lit-llama-adapter-finetuned.pth"))
+
+
+def train(
+ fabric: L.Fabric,
+ model: torch.nn.Module,
+ optimizer: torch.optim.Optimizer,
+ train_data: np.ndarray,
+ val_data: np.ndarray,
+ out_dir: str,
+) -> None:
+ """The training loop.
+
+ Loosely based on the nanoGPT implementation: https://github.com/karpathy/nanoGPT.
+ """
+ step_count = 0
+
+ for iter_num in range(max_iters):
+
+ if step_count <= warmup_steps:
+ # linear warmup
+ lr = learning_rate * step_count / warmup_steps
+ for param_group in optimizer.param_groups:
+ param_group['lr'] = lr
+
+ t0 = time.time()
+
+ input_ids, targets = get_batch(fabric, train_data)
+ logits = model(input_ids)
+ loss = loss_fn(logits, targets)
+ with fabric.no_backward_sync(model, enabled=((iter_num + 1) % gradient_accumulation_steps != 0)):
+ fabric.backward(loss / gradient_accumulation_steps)
+
+ if (iter_num + 1) % gradient_accumulation_steps == 0:
+ optimizer.step()
+ optimizer.zero_grad()
+ step_count += 1
+
+ if step_count % eval_interval == 0:
+ val_loss = validate(fabric, model, val_data)
+ fabric.print(f"step {iter_num}: val loss {val_loss:.4f}")
+ fabric.barrier()
+
+ if step_count % save_interval == 0:
+ print(f"Saving adapter weights to {out_dir}")
+ # TODO: Provide a function/script to merge the adapter weights with pretrained weights
+ save_model_checkpoint(fabric, model, os.path.join(out_dir, f"iter-{iter_num:06d}.pth"))
+
+ dt = time.time() - t0
+ if iter_num % log_interval == 0:
+ fabric.print(f"iter {iter_num}: loss {loss.item():.4f}, time: {dt*1000:.2f}ms")
+
+
+def generate_response(model, instruction, input=""):
+ tokenizer = Tokenizer("checkpoints/lit-llama/tokenizer.model")
+ sample = {"instruction": instruction, "input": input}
+ prompt = generate_prompt(sample)
+ encoded = tokenizer.encode(prompt, bos=True, eos=False, device=model.device)
+
+ output = generate(
+ model,
+ idx=encoded,
+ max_seq_length=max_seq_length,
+ max_new_tokens=100,
+ temperature=0.8,
+ )
+ output = tokenizer.decode(output)
+ return output # output.split("### Response:")[1].strip()
+
+
+@torch.no_grad()
+def validate(fabric: L.Fabric, model: torch.nn.Module, val_data: np.ndarray) -> torch.Tensor:
+ fabric.print("Validating ...")
+ model.eval()
+ losses = torch.zeros(eval_iters)
+ for k in range(eval_iters):
+ input_ids, targets = get_batch(fabric, val_data)
+ logits = model(input_ids)
+ loss = loss_fn(logits, targets)
+ losses[k] = loss.item()
+ val_loss = losses.mean()
+
+ # produce an example:
+ instruction = "Recommend a movie for me to watch during the weekend and explain the reason."
+ output = generate_response(model, instruction)
+ fabric.print(instruction)
+ fabric.print(output)
+
+ model.train()
+ return val_loss.item()
+
+def loss_fn(logits, targets):
+ # shift the targets such that output n predicts token n+1
+ logits = logits[..., :-1, :].contiguous()
+ targets = targets[..., 1:].contiguous()
+ loss = torch.nn.functional.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
+ return loss
+
+
+def get_batch(fabric: L.Fabric, data: list):
+ ix = torch.randint(len(data), (micro_batch_size,))
+
+ input_ids = [data[i]["input_ids"].type(torch.int64) for i in ix]
+ labels = [data[i]["labels"].type(torch.int64) for i in ix]
+
+ max_len = max(len(s) for s in input_ids)
+
+ def pad_right(x, pad_id):
+ # pad right based on the longest sequence
+ n = max_len - len(x)
+ return torch.cat((x, torch.full((n,), pad_id, dtype=x.dtype)))
+
+ x = torch.stack([pad_right(x, pad_id=0) for x in input_ids])
+ y = torch.stack([pad_right(x, pad_id=-1) for x in labels])
+ x, y = fabric.to_device((x.pin_memory(), y.pin_memory()))
+ return x, y
+
+
+def load_datasets(data_dir):
+ train_data = torch.load(os.path.join(data_dir, "train.pt"))
+ val_data = torch.load(os.path.join(data_dir, "test.pt"))
+ return train_data, val_data
+
+
+def save_model_checkpoint(fabric, model, file_path):
+ file_path = Path(file_path)
+
+ if isinstance(fabric.strategy, DeepSpeedStrategy):
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+
+ tmp_path = file_path.with_suffix(".tmp")
+ fabric.save(tmp_path, {"model": model})
+ fabric.barrier()
+ if fabric.global_rank == 0:
+ # Create a consolidated checkpoint with the same name next to the deepspeed checkpoint
+ # and only keep the adapter weights
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(tmp_path)
+ state_dict = adapter_state_from_state_dict(state_dict)
+ torch.save(state_dict, file_path)
+ shutil.rmtree(tmp_path)
+ else:
+ state_dict = adapter_state_from_state_dict(model.state_dict())
+ if fabric.global_rank == 0:
+ torch.save(state_dict, file_path)
+ fabric.barrier()
+
+
+if __name__ == "__main__":
+ # Uncomment this line if you see an error: "Expected is_sm80 to be true, but got false"
+ # torch.backends.cuda.enable_flash_sdp(False)
+ torch.set_float32_matmul_precision("high")
+
+ from jsonargparse.cli import CLI
+
+ CLI(main)
diff --git a/finetune_full.py b/finetune_full.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1bd816b9d1dbe9c60eeeb48f4137a008f4074e1
--- /dev/null
+++ b/finetune_full.py
@@ -0,0 +1,214 @@
+"""
+Instruction-tuning on the Alpaca dataset using a regular finetuning procedure (updating all layers).
+
+Note: If you run into a CUDA error "Expected is_sm80 to be true, but got false", uncomment the line
+`torch.backends.cuda.enable_flash_sdp(False)` in the script below (see https://github.com/Lightning-AI/lit-llama/issues/101).
+"""
+import os
+import time
+from functools import partial
+
+import lightning as L
+from lightning.fabric.strategies import FSDPStrategy
+import numpy as np
+import torch
+from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
+
+from generate import generate
+from lit_llama.model import Block, LLaMA, LLaMAConfig
+from lit_llama.tokenizer import Tokenizer
+from lit_llama.utils import save_model_checkpoint
+from scripts.prepare_alpaca import generate_prompt
+
+
+eval_interval = 1000
+save_interval = 1000
+eval_iters = 100
+log_interval = 100
+devices = 4
+
+# Hyperparameters
+learning_rate = 3e-5
+batch_size = 128 / devices
+micro_batch_size = 4
+gradient_accumulation_steps = batch_size // micro_batch_size
+epoch_size = 50000 # train dataset size
+num_epochs = 5
+max_iters = num_epochs * epoch_size // micro_batch_size // devices
+weight_decay = 0.0
+block_size = 512
+warmup_steps = 100
+
+
+def main(
+ data_dir: str = "data/alpaca",
+ pretrained_path: str = "checkpoints/lit-llama/7B/lit-llama.pth",
+ out_dir: str = "out/full/alpaca",
+):
+
+ auto_wrap_policy = partial(transformer_auto_wrap_policy, transformer_layer_cls={Block})
+ strategy = FSDPStrategy(auto_wrap_policy=auto_wrap_policy, activation_checkpointing=Block)
+
+ fabric = L.Fabric(accelerator="cuda", devices=devices, precision="bf16-mixed", strategy=strategy)
+ fabric.launch()
+ fabric.seed_everything(1337 + fabric.global_rank)
+
+ if fabric.global_rank == 0:
+ os.makedirs(out_dir, exist_ok=True)
+
+ train_data, val_data = load_datasets(data_dir=data_dir)
+
+ config = LLaMAConfig.from_name("7B")
+ config.block_size = block_size
+
+ checkpoint = torch.load(pretrained_path)
+
+ with fabric.device:
+ torch.set_default_tensor_type(torch.HalfTensor)
+ model = LLaMA(config).bfloat16()
+ torch.set_default_tensor_type(torch.FloatTensor)
+ model.load_state_dict(checkpoint, strict=False)
+
+ model = fabric.setup_module(model)
+
+ optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
+ optimizer = fabric.setup_optimizers(optimizer)
+
+ train(fabric, model, optimizer, train_data, val_data, out_dir)
+
+ # Save the final checkpoint at the end of training
+ save_model_checkpoint(fabric, model, os.path.join(out_dir, "lit-llama-full-finetuned.pth"))
+
+
+def train(
+ fabric: L.Fabric,
+ model: torch.nn.Module,
+ optimizer: torch.optim.Optimizer,
+ train_data: np.ndarray,
+ val_data: np.ndarray,
+ out_dir: str,
+) -> None:
+ """The training loop.
+
+ Loosely based on the nanoGPT implementation: https://github.com/karpathy/nanoGPT.
+ """
+ step_count = 0
+ model.train()
+
+ for iter_num in range(max_iters):
+
+ is_accumulating = (iter_num + 1) % gradient_accumulation_steps == 0
+
+ if step_count <= warmup_steps:
+ # linear warmup
+ lr = learning_rate * step_count / warmup_steps
+ for param_group in optimizer.param_groups:
+ param_group['lr'] = lr
+
+ t0 = time.time()
+
+ with fabric.no_backward_sync(model, enabled=is_accumulating):
+ input_ids, targets = get_batch(fabric, train_data)
+ logits = model(input_ids)
+ loss = loss_fn(logits, targets)
+ fabric.backward(loss)
+
+ if not is_accumulating:
+ optimizer.step()
+ optimizer.zero_grad()
+ step_count += 1
+
+ if step_count % eval_interval == 0:
+ val_loss = validate(fabric, model, val_data)
+ fabric.print(f"step {iter_num}: val loss {val_loss:.4f}")
+ fabric.barrier()
+
+ if step_count % save_interval == 0:
+ print(f"Saving weights to {out_dir}")
+ save_model_checkpoint(fabric, model, os.path.join(out_dir, f"iter-{iter_num:06d}-ckpt.pth"))
+
+ dt = time.time() - t0
+ if iter_num % log_interval == 0:
+ fabric.print(f"iter {iter_num}: loss {loss.item():.4f}, time: {dt*1000:.2f}ms")
+
+
+def generate_response(model, instruction):
+ tokenizer = Tokenizer("checkpoints/lit-llama/tokenizer.model")
+ sample = {"instruction": instruction, "input": ""}
+ prompt = generate_prompt(sample)
+ encoded = tokenizer.encode(prompt, bos=True, eos=False, device=model.device)
+
+ output = generate(
+ model,
+ idx=encoded,
+ max_seq_length=block_size,
+ max_new_tokens=100,
+ )
+ output = tokenizer.decode(output)
+ return output # output.split("### Response:")[1].strip()
+
+
+@torch.no_grad()
+def validate(fabric: L.Fabric, model: torch.nn.Module, val_data: np.ndarray) -> torch.Tensor:
+ fabric.print("Validating ...")
+ model.eval()
+ losses = torch.zeros(eval_iters)
+ for k in range(eval_iters):
+ input_ids, targets = get_batch(fabric, val_data)
+ logits = model(input_ids)
+ loss = loss_fn(logits, targets)
+ losses[k] = loss.item()
+ out = losses.mean()
+
+ # produce an example:
+ instruction = "Recommend a movie for me to watch during the weekend and explain the reason."
+
+ output = generate_response(model, instruction)
+ fabric.print(instruction)
+ fabric.print(output)
+
+ model.train()
+ return out.item()
+
+
+def loss_fn(logits, targets):
+ # shift the targets such that output n predicts token n+1
+ logits = logits[..., :-1, :].contiguous()
+ targets = targets[..., 1:].contiguous()
+ loss = torch.nn.functional.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
+ return loss
+
+
+def get_batch(fabric: L.Fabric, data: list):
+ ix = torch.randint(len(data), (micro_batch_size,))
+
+ input_ids = [data[i]["input_ids"].type(torch.int64) for i in ix]
+ labels = [data[i]["labels"].type(torch.int64) for i in ix]
+
+ max_len = max(len(s) for s in input_ids)
+
+ def pad_right(x, pad_id):
+ # pad right based on the longest sequence
+ n = max_len - len(x)
+ return torch.cat((x, torch.full((n,), pad_id, dtype=x.dtype)))
+
+ x = torch.stack([pad_right(x, pad_id=0) for x in input_ids])
+ y = torch.stack([pad_right(x, pad_id=-1) for x in labels])
+ x, y = fabric.to_device((x.pin_memory(), y.pin_memory()))
+ return x, y
+
+
+def load_datasets(data_dir):
+ train_data = torch.load(os.path.join(data_dir, "train.pt"))
+ val_data = torch.load(os.path.join(data_dir, "test.pt"))
+ return train_data, val_data
+
+
+if __name__ == "__main__":
+ # Uncomment this line if you see an error: "Expected is_sm80 to be true, but got false"
+ # torch.backends.cuda.enable_flash_sdp(False)
+ torch.set_float32_matmul_precision("high")
+
+ from jsonargparse.cli import CLI
+
+ CLI(main)
diff --git a/finetune_lora.py b/finetune_lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e6dc01a5006dbe93c492c0455cd93d7852e597f
--- /dev/null
+++ b/finetune_lora.py
@@ -0,0 +1,211 @@
+"""
+Instruction-tuning with LoRA on the Alpaca dataset.
+
+Note: If you run into a CUDA error "Expected is_sm80 to be true, but got false", uncomment the line
+`torch.backends.cuda.enable_flash_sdp(False)` in the script below (see https://github.com/Lightning-AI/lit-llama/issues/101).
+"""
+import os
+import time
+
+import lightning as L
+import numpy as np
+import torch
+
+from generate import generate
+from lit_llama.lora import mark_only_lora_as_trainable, lora, lora_state_dict
+from lit_llama.model import LLaMA, LLaMAConfig
+from lit_llama.tokenizer import Tokenizer
+from scripts.prepare_alpaca import generate_prompt
+
+
+eval_interval = 100
+save_interval = 100
+eval_iters = 100
+log_interval = 1
+
+# Hyperparameters
+learning_rate = 3e-4
+batch_size = 128
+micro_batch_size = 4
+gradient_accumulation_steps = batch_size // micro_batch_size
+max_iters = 2 #50000 * 3 // micro_batch_size
+weight_decay = 0.0
+max_seq_length = 256 # see scripts/prepare_alpaca.py
+lora_r = 8
+lora_alpha = 16
+lora_dropout = 0.05
+warmup_steps = 100
+
+
+def main(
+ data_dir: str = "data/alpaca",
+ pretrained_path: str = "checkpoints/lit-llama/7B/lit-llama.pth",
+ out_dir: str = "out/lora/alpaca",
+):
+
+ #fabric = L.Fabric(accelerator="cuda", precision="bf16-true")
+ fabric = L.Fabric(accelerator="cpu", devices=2, precision="bf16-true")
+ fabric.launch()
+ fabric.seed_everything(1337 + fabric.global_rank)
+
+ if fabric.global_rank == 0:
+ os.makedirs(out_dir, exist_ok=True)
+ print("loading dataset ", data_dir)
+ train_data, val_data = load_datasets(data_dir=data_dir)
+ print("train data: ", len(train_data))
+ print("val data: ", len(val_data))
+ config = LLaMAConfig.from_name("7B")
+ config.block_size = max_seq_length
+ print("loading pretrained model ", pretrained_path)
+ checkpoint = torch.load(pretrained_path)
+
+ with fabric.init_module(), lora(r=lora_r, alpha=lora_alpha, dropout=lora_dropout, enabled=True):
+ model = LLaMA(config)
+ # strict=False because missing keys due to LoRA weights not contained in checkpoint state
+ model.load_state_dict(checkpoint, strict=False)
+
+ mark_only_lora_as_trainable(model)
+
+ optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
+ model, optimizer = fabric.setup(model, optimizer)
+ print("start training")
+ train(fabric, model, optimizer, train_data, val_data, out_dir)
+
+ # Save the final LoRA checkpoint at the end of training
+ print(f"Saving LoRA weights to {out_dir}")
+ checkpoint = lora_state_dict(model)
+ fabric.save(os.path.join(out_dir, "lit-llama-lora-finetuned.pth"), checkpoint)
+
+
+def train(
+ fabric: L.Fabric,
+ model: torch.nn.Module,
+ optimizer: torch.optim.Optimizer,
+ train_data: np.ndarray,
+ val_data: np.ndarray,
+ out_dir: str,
+) -> None:
+ """The training loop.
+
+ Loosely based on the nanoGPT implementation: https://github.com/karpathy/nanoGPT.
+ """
+ step_count = 0
+ print("max iters:", max_iters )
+
+ for iter_num in range(max_iters):
+ print("iter_num", iter_num)
+ if step_count <= warmup_steps:
+ # linear warmup
+ lr = learning_rate * step_count / warmup_steps
+ for param_group in optimizer.param_groups:
+ param_group['lr'] = lr
+
+ t0 = time.time()
+
+ input_ids, targets = get_batch(fabric, train_data)
+ logits = model(input_ids)
+ print("calculate loss")
+ loss = loss_fn(logits, targets)
+ print("backward")
+ fabric.backward(loss)
+
+ if (iter_num + 1) % gradient_accumulation_steps == 0:
+ print("step optimizer")
+ optimizer.step()
+ optimizer.zero_grad()
+ step_count += 1
+ if step_count % eval_interval == 0:
+ val_loss = validate(fabric, model, val_data)
+ fabric.print(f"step {iter_num}: val loss {val_loss:.4f}")
+ fabric.barrier()
+
+ if step_count % save_interval == 0:
+ print(f"Saving LoRA weights to {out_dir}")
+ # We are only saving the LoRA weights
+ # TODO: Provide a function/script to merge the LoRA weights with pretrained weights
+ checkpoint = lora_state_dict(model)
+ fabric.save(os.path.join(out_dir, f"iter-{iter_num:06d}-ckpt.pth"), checkpoint)
+
+ dt = time.time() - t0
+ if iter_num % log_interval == 0:
+ fabric.print(f"iter {iter_num}: loss {loss.item():.4f}, time: {dt*1000:.2f}ms")
+
+
+def generate_response(model, instruction):
+ tokenizer = Tokenizer("checkpoints/lit-llama/tokenizer.model")
+ sample = {"instruction": instruction, "input": ""}
+ prompt = generate_prompt(sample)
+ encoded = tokenizer.encode(prompt, bos=True, eos=False, device=model.device)
+
+ output = generate(
+ model,
+ idx=encoded,
+ max_seq_length=max_seq_length,
+ max_new_tokens=100,
+ )
+ output = tokenizer.decode(output)
+ return output # output.split("### Response:")[1].strip()
+
+
+@torch.no_grad()
+def validate(fabric: L.Fabric, model: torch.nn.Module, val_data: np.ndarray) -> torch.Tensor:
+ fabric.print("Validating ...")
+ model.eval()
+ losses = torch.zeros(eval_iters)
+ for k in range(eval_iters):
+ input_ids, targets = get_batch(fabric, val_data)
+ logits = model(input_ids)
+ loss = loss_fn(logits, targets)
+ losses[k] = loss.item()
+ out = losses.mean()
+
+ # produce an example:
+ instruction = "Recommend a movie for me to watch during the weekend and explain the reason."
+ output = generate_response(model, instruction)
+ fabric.print(instruction)
+ fabric.print(output)
+
+ model.train()
+ return out.item()
+
+def loss_fn(logits, targets):
+ # shift the targets such that output n predicts token n+1
+ logits = logits[..., :-1, :].contiguous()
+ targets = targets[..., 1:].contiguous()
+ loss = torch.nn.functional.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
+ return loss
+
+
+def get_batch(fabric: L.Fabric, data: list):
+ ix = torch.randint(len(data), (micro_batch_size,))
+
+ input_ids = [data[i]["input_ids"].type(torch.int64) for i in ix]
+ labels = [data[i]["labels"].type(torch.int64) for i in ix]
+
+ max_len = max(len(s) for s in input_ids)
+
+ def pad_right(x, pad_id):
+ # pad right based on the longest sequence
+ n = max_len - len(x)
+ return torch.cat((x, torch.full((n,), pad_id, dtype=x.dtype)))
+
+ x = torch.stack([pad_right(x, pad_id=0) for x in input_ids])
+ y = torch.stack([pad_right(x, pad_id=-1) for x in labels])
+ x, y = fabric.to_device((x.pin_memory(), y.pin_memory()))
+ return x, y
+
+
+def load_datasets(data_dir):
+ train_data = torch.load(os.path.join(data_dir, "train.pt"))
+ val_data = torch.load(os.path.join(data_dir, "test.pt"))
+ return train_data, val_data
+
+
+if __name__ == "__main__":
+ # Uncomment this line if you see an error: "Expected is_sm80 to be true, but got false"
+ # torch.backends.cuda.enable_flash_sdp(False)
+ torch.set_float32_matmul_precision("high")
+
+ from jsonargparse.cli import CLI
+
+ CLI(main)
diff --git a/generate.py b/generate.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4a312d483ce15494e88bcc0929b4ea95cd8406c
--- /dev/null
+++ b/generate.py
@@ -0,0 +1,162 @@
+import sys
+import time
+import warnings
+from pathlib import Path
+from typing import Optional
+
+import lightning as L
+import torch
+
+from lit_llama import LLaMA, Tokenizer
+from lit_llama.utils import EmptyInitOnDevice, lazy_load, llama_model_lookup
+
+@torch.no_grad()
+def generate(
+ model: torch.nn.Module,
+ idx: torch.Tensor,
+ max_new_tokens: int,
+ max_seq_length: int,
+ temperature: float = 1.0,
+ top_k: Optional[int] = None,
+ eos_id: Optional[int] = None,
+) -> torch.Tensor:
+ """Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested.
+
+ The implementation of this function is modified from A. Karpathy's nanoGPT.
+
+ Args:
+ model: The model to use.
+ idx: Tensor of shape (T) with indices of the prompt sequence.
+ max_new_tokens: The number of new tokens to generate.
+ max_seq_length: The maximum sequence length allowed.
+ temperature: Scales the predicted logits by 1 / temperature
+ top_k: If specified, only sample among the tokens with the k highest probabilities
+ eos_id: If specified, stop generating any more token once the token is triggered
+ """
+ # create an empty tensor of the expected final shape and fill in the current tokens
+ T = idx.size(0)
+ T_new = T + max_new_tokens
+ empty = torch.empty(T_new, dtype=idx.dtype, device=idx.device)
+ empty[:T] = idx
+ idx = empty
+
+ # generate max_new_tokens tokens
+ for t in range(T, T_new):
+ # ignore the not-filled-yet tokens
+ idx_cond = idx[:t]
+ # if the sequence context is growing too long we must crop it at max_seq_length
+ idx_cond = idx_cond if T <= max_seq_length else idx_cond[-max_seq_length:]
+
+ # forward
+ logits = model(idx_cond.view(1, -1))
+ logits = logits[0, -1] / temperature
+
+ # optionally crop the logits to only the top k options
+ if top_k is not None:
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
+ logits[logits < v[[-1]]] = -float("Inf")
+
+ probs = torch.nn.functional.softmax(logits, dim=-1)
+ idx_next = torch.multinomial(probs, num_samples=1)
+
+ # concatenate the new generation
+ idx[t] = idx_next
+
+ # if token is triggered, return the output (stop generation)
+ if idx_next == eos_id:
+ return idx[:t + 1] # include the EOS token
+
+ return idx
+
+
+def main(
+ prompt: str = "Hello, my name is",
+ *,
+ num_samples: int = 1,
+ max_new_tokens: int = 50,
+ top_k: int = 200,
+ temperature: float = 0.8,
+ checkpoint_path: Optional[Path] = None,
+ tokenizer_path: Optional[Path] = None,
+ quantize: Optional[str] = None,
+) -> None:
+ """Generates text samples based on a pre-trained LLaMA model and tokenizer.
+
+ Args:
+ prompt: The prompt string to use for generating the samples.
+ num_samples: The number of text samples to generate.
+ max_new_tokens: The number of generation steps to take.
+ top_k: The number of top most probable tokens to consider in the sampling process.
+ temperature: A value controlling the randomness of the sampling process. Higher values result in more random
+ samples.
+ checkpoint_path: The checkpoint path to load.
+ tokenizer_path: The tokenizer path to load.
+ quantize: Whether to quantize the model and using which method:
+ ``"llm.int8"``: LLM.int8() mode,
+ ``"gptq.int4"``: GPTQ 4-bit mode.
+ """
+ if not checkpoint_path:
+ checkpoint_path = Path(f"./checkpoints/lit-llama/7B/lit-llama.pth")
+ if not tokenizer_path:
+ tokenizer_path = Path("./checkpoints/lit-llama/tokenizer.model")
+ assert checkpoint_path.is_file(), checkpoint_path
+ assert tokenizer_path.is_file(), tokenizer_path
+
+ fabric = L.Fabric(devices="auto", accelerator="cuda")
+ #fabric = L.Fabric(accelerator="cpu")
+ fabric.launch()
+ dtype = torch.bfloat16 if fabric.device.type == "cuda" and torch.cuda.is_bf16_supported() else torch.float32
+
+ print("Loading model ...", file=sys.stderr)
+ t0 = time.time()
+ with lazy_load(checkpoint_path) as checkpoint:
+ name = llama_model_lookup(checkpoint)
+
+ with EmptyInitOnDevice(
+ device=fabric.device, dtype=dtype, quantization_mode=quantize
+ ):
+ model = LLaMA.from_name(name)
+
+ model.load_state_dict(checkpoint)
+ print(f"Time to load model: {time.time() - t0:.02f} seconds.", file=sys.stderr)
+
+ model.eval()
+ model = fabric.setup_module(model)
+
+ tokenizer = Tokenizer(tokenizer_path)
+ encoded_prompt = tokenizer.encode(prompt, bos=True, eos=False, device=fabric.device)
+
+ L.seed_everything(1234)
+ for i in range(num_samples):
+ t0 = time.perf_counter()
+ y = generate(
+ model,
+ encoded_prompt,
+ max_new_tokens,
+ model.config.block_size, # type: ignore[union-attr,arg-type]
+ temperature=temperature,
+ top_k=top_k,
+ )
+ t = time.perf_counter() - t0
+ print(tokenizer.decode(y))
+ print(f"Time for inference {i + 1}: {t:.02f} sec total, {max_new_tokens / t:.02f} tokens/sec", file=sys.stderr)
+ if fabric.device.type == "cuda":
+ print(f"Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB", file=sys.stderr)
+
+
+if __name__ == "__main__":
+ from jsonargparse import CLI
+ torch.backends.cuda.max_split_size_mb = 16
+ torch.quantization.quantize_dynamic
+ torch.set_float32_matmul_precision("high")
+ warnings.filterwarnings(
+ # Triggered internally at ../aten/src/ATen/EmptyTensor.cpp:31
+ "ignore",
+ message="ComplexHalf support is experimental and many operators don't support it yet"
+ )
+ warnings.filterwarnings(
+ # Triggered in bitsandbytes/autograd/_functions.py:298
+ "ignore",
+ message="MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization",
+ )
+ CLI(main)
diff --git a/generate_adapter.py b/generate_adapter.py
new file mode 100644
index 0000000000000000000000000000000000000000..938b5d6f0d2a901536b2a7e03a04310d081f8150
--- /dev/null
+++ b/generate_adapter.py
@@ -0,0 +1,117 @@
+import sys
+import time
+import warnings
+from pathlib import Path
+from typing import Optional
+
+import lightning as L
+import torch
+
+from generate import generate
+from lit_llama import Tokenizer
+from lit_llama.adapter import LLaMA
+from lit_llama.utils import EmptyInitOnDevice, lazy_load, llama_model_lookup
+from scripts.prepare_alpaca import generate_prompt
+
+
+def main(
+ prompt: str = "What food do lamas eat?",
+ input: str = "",
+ adapter_path: Optional[Path] = None,
+ pretrained_path: Optional[Path] = None,
+ tokenizer_path: Optional[Path] = None,
+ quantize: Optional[str] = None,
+ max_new_tokens: int = 100,
+ top_k: int = 200,
+ temperature: float = 0.8,
+) -> None:
+ """Generates a response based on a given instruction and an optional input.
+ This script will only work with checkpoints from the instruction-tuned LLaMA-Adapter model.
+ See `finetune_adapter.py`.
+
+ Args:
+ prompt: The prompt/instruction (Alpaca style).
+ adapter_path: Path to the checkpoint with trained adapter weights, which are the output of
+ `finetune_adapter.py`.
+ input: Optional input (Alpaca style).
+ pretrained_path: The path to the checkpoint with pretrained LLaMA weights.
+ tokenizer_path: The tokenizer path to load.
+ quantize: Whether to quantize the model and using which method:
+ ``"llm.int8"``: LLM.int8() mode,
+ ``"gptq.int4"``: GPTQ 4-bit mode.
+ max_new_tokens: The number of generation steps to take.
+ top_k: The number of top most probable tokens to consider in the sampling process.
+ temperature: A value controlling the randomness of the sampling process. Higher values result in more random
+ samples.
+ """
+ if not adapter_path:
+ adapter_path = Path("out/adapter/alpaca/lit-llama-adapter-finetuned.pth")
+ if not pretrained_path:
+ pretrained_path = Path(f"./checkpoints/lit-llama/7B/lit-llama.pth")
+ if not tokenizer_path:
+ tokenizer_path = Path("./checkpoints/lit-llama/tokenizer.model")
+
+ assert adapter_path.is_file()
+ assert pretrained_path.is_file()
+ assert tokenizer_path.is_file()
+
+ fabric = L.Fabric(devices=1)
+ dtype = torch.bfloat16 if fabric.device.type == "cuda" and torch.cuda.is_bf16_supported() else torch.float32
+
+ print("Loading model ...", file=sys.stderr)
+ t0 = time.time()
+ with (lazy_load(pretrained_path) as pretrained_checkpoint,
+ lazy_load(adapter_path) as adapter_checkpoint):
+ name = llama_model_lookup(pretrained_checkpoint)
+
+ with EmptyInitOnDevice(
+ device=fabric.device, dtype=dtype, quantization_mode=quantize
+ ):
+ model = LLaMA.from_name(name)
+
+ # 1. Load the pretrained weights
+ model.load_state_dict(pretrained_checkpoint, strict=False)
+ # 2. Load the fine-tuned adapter weights
+ model.load_state_dict(adapter_checkpoint, strict=False)
+
+ print(f"Time to load model: {time.time() - t0:.02f} seconds.", file=sys.stderr)
+
+ model.eval()
+ model = fabric.setup_module(model)
+
+ tokenizer = Tokenizer(tokenizer_path)
+ sample = {"instruction": prompt, "input": input}
+ prompt = generate_prompt(sample)
+ encoded = tokenizer.encode(prompt, bos=True, eos=False, device=model.device)
+
+ t0 = time.perf_counter()
+ output = generate(
+ model,
+ idx=encoded,
+ max_seq_length=max_new_tokens,
+ max_new_tokens=max_new_tokens,
+ temperature=temperature,
+ top_k=top_k,
+ eos_id=tokenizer.eos_id
+ )
+ t = time.perf_counter() - t0
+
+ output = tokenizer.decode(output)
+ output = output.split("### Response:")[1].strip()
+ print(output)
+
+ print(f"\n\nTime for inference: {t:.02f} sec total, {max_new_tokens / t:.02f} tokens/sec", file=sys.stderr)
+ if fabric.device.type == "cuda":
+ print(f"Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB", file=sys.stderr)
+
+
+if __name__ == "__main__":
+ from jsonargparse import CLI
+
+ torch.set_float32_matmul_precision("high")
+ warnings.filterwarnings(
+ # Triggered internally at ../aten/src/ATen/EmptyTensor.cpp:31
+ "ignore",
+ message="ComplexHalf support is experimental and many operators don't support it yet"
+ )
+ CLI(main)
diff --git a/generate_full.py b/generate_full.py
new file mode 100644
index 0000000000000000000000000000000000000000..697af4ec1ce6f037235fe317adca95cca366c9fb
--- /dev/null
+++ b/generate_full.py
@@ -0,0 +1,160 @@
+import sys
+import time
+import warnings
+from pathlib import Path
+from typing import Optional
+
+import lightning as L
+import torch
+
+from lit_llama import LLaMA, Tokenizer
+from lit_llama.utils import EmptyInitOnDevice
+
+
+@torch.no_grad()
+def generate(
+ model: torch.nn.Module,
+ idx: torch.Tensor,
+ max_new_tokens: int,
+ max_seq_length: int,
+ temperature: float = 1.0,
+ top_k: Optional[int] = None,
+ eos_id: Optional[int] = None,
+) -> torch.Tensor:
+ """Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested.
+
+ The implementation of this function is modified from A. Karpathy's nanoGPT.
+
+ Args:
+ model: The model to use.
+ idx: Tensor of shape (T) with indices of the prompt sequence.
+ max_new_tokens: The number of new tokens to generate.
+ max_seq_length: The maximum sequence length allowed.
+ temperature: Scales the predicted logits by 1 / temperature
+ top_k: If specified, only sample among the tokens with the k highest probabilities
+ eos_id: If specified, stop generating any more token once the token is triggered
+ """
+ # create an empty tensor of the expected final shape and fill in the current tokens
+ T = idx.size(0)
+ T_new = T + max_new_tokens
+ empty = torch.empty(T_new, dtype=idx.dtype, device=idx.device)
+ empty[:T] = idx
+ idx = empty
+
+ # generate max_new_tokens tokens
+ for t in range(T, T_new):
+ # ignore the not-filled-yet tokens
+ idx_cond = idx[:t]
+ # if the sequence context is growing too long we must crop it at max_seq_length
+ idx_cond = idx_cond if T <= max_seq_length else idx_cond[-max_seq_length:]
+
+ # forward
+ logits = model(idx_cond.view(1, -1))
+ logits = logits[0, -1] / temperature
+
+ # optionally crop the logits to only the top k options
+ if top_k is not None:
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
+ logits[logits < v[[-1]]] = -float("Inf")
+
+ probs = torch.nn.functional.softmax(logits, dim=-1)
+ idx_next = torch.multinomial(probs, num_samples=1)
+
+ # concatenate the new generation
+ idx[t] = idx_next
+
+ # if token is triggered, return the output (stop generation)
+ if idx_next == eos_id:
+ return idx[:t + 1] # include the EOS token
+
+ return idx
+
+
+def main(
+ prompt: str = "Hello, my name is",
+ *,
+ num_samples: int = 1,
+ max_new_tokens: int = 50,
+ top_k: int = 200,
+ temperature: float = 0.8,
+ checkpoint_path: Optional[Path] = None,
+ tokenizer_path: Optional[Path] = None,
+ model_size: str = "7B",
+ quantize: Optional[str] = None,
+) -> None:
+ """Generates text samples based on a pre-trained LLaMA model and tokenizer.
+
+ Args:
+ prompt: The prompt string to use for generating the samples.
+ num_samples: The number of text samples to generate.
+ max_new_tokens: The number of generation steps to take.
+ top_k: The number of top most probable tokens to consider in the sampling process.
+ temperature: A value controlling the randomness of the sampling process. Higher values result in more random
+ samples.
+ checkpoint_path: The checkpoint path to load.
+ tokenizer_path: The tokenizer path to load.
+ model_size: The model size to load.
+ quantize: Whether to quantize the model and using which method:
+ ``"llm.int8"``: LLM.int8() mode,
+ ``"gptq.int4"``: GPTQ 4-bit mode.
+ """
+ if not checkpoint_path:
+ checkpoint_path = Path(f"./checkpoints/lit-llama/{model_size}/lit-llama.pth")
+ if not tokenizer_path:
+ tokenizer_path = Path("./checkpoints/lit-llama/tokenizer.model")
+ assert checkpoint_path.is_file(), checkpoint_path
+ assert tokenizer_path.is_file(), tokenizer_path
+
+ fabric = L.Fabric(devices=1)
+ dtype = torch.bfloat16 if fabric.device.type == "cuda" and torch.cuda.is_bf16_supported() else torch.float32
+
+ print("Loading model ...", file=sys.stderr)
+ t0 = time.time()
+ with EmptyInitOnDevice(
+ device=fabric.device, dtype=dtype, quantization_mode=quantize
+ ):
+ model = LLaMA.from_name(model_size)
+
+ checkpoint = torch.load(checkpoint_path)
+ model.load_state_dict(checkpoint)
+ print(f"Time to load model: {time.time() - t0:.02f} seconds.", file=sys.stderr)
+
+ model.eval()
+ model = fabric.setup_module(model)
+
+ tokenizer = Tokenizer(tokenizer_path)
+ encoded_prompt = tokenizer.encode(prompt, bos=True, eos=False, device=fabric.device)
+
+ L.seed_everything(1234)
+ for i in range(num_samples):
+ t0 = time.perf_counter()
+ y = generate(
+ model,
+ encoded_prompt,
+ max_new_tokens,
+ model.config.block_size, # type: ignore[union-attr,arg-type]
+ temperature=temperature,
+ top_k=top_k,
+ )
+ t = time.perf_counter() - t0
+ print(tokenizer.decode(y))
+ print(f"Time for inference {i + 1}: {t:.02f} sec total, {max_new_tokens / t:.02f} tokens/sec", file=sys.stderr)
+ if fabric.device.type == "cuda":
+ print(f"Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB", file=sys.stderr)
+
+
+if __name__ == "__main__":
+ from jsonargparse import CLI
+
+ torch.set_float32_matmul_precision("high")
+ warnings.filterwarnings(
+ # Triggered internally at ../aten/src/ATen/EmptyTensor.cpp:31
+ "ignore",
+ message="ComplexHalf support is experimental and many operators don't support it yet"
+ )
+ warnings.filterwarnings(
+ # Triggered in bitsandbytes/autograd/_functions.py:298
+ "ignore",
+ message="MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization",
+ )
+ CLI(main)
diff --git a/generate_lora.py b/generate_lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..6788d53c246b4d596c147877faf9f2ced4e008f8
--- /dev/null
+++ b/generate_lora.py
@@ -0,0 +1,131 @@
+import sys
+import time
+import warnings
+from pathlib import Path
+from typing import Optional
+
+import lightning as L
+import torch
+
+from generate import generate
+from lit_llama import Tokenizer, LLaMA
+from lit_llama.lora import lora
+from lit_llama.utils import EmptyInitOnDevice, lazy_load, llama_model_lookup
+from scripts.prepare_alpaca import generate_prompt
+
+lora_r = 8
+lora_alpha = 16
+lora_dropout = 0.05
+
+
+def main(
+ prompt: str = "What food do lamas eat?",
+ input: str = "",
+ lora_path: Optional[Path] = None,
+ pretrained_path: Optional[Path] = None,
+ tokenizer_path: Optional[Path] = None,
+ quantize: Optional[str] = None,
+ dtype: str = "float32",
+ max_new_tokens: int = 100,
+ top_k: int = 200,
+ temperature: float = 0.8,
+) -> None:
+ """Generates a response based on a given instruction and an optional input.
+ This script will only work with checkpoints from the instruction-tuned LoRA model.
+ See `finetune_lora.py`.
+
+ Args:
+ prompt: The prompt/instruction (Alpaca style).
+ lora_path: Path to the checkpoint with trained LoRA weights, which are the output of
+ `finetune_lora.py`.
+ input: Optional input (Alpaca style).
+ pretrained_path: The path to the checkpoint with pretrained LLaMA weights.
+ tokenizer_path: The tokenizer path to load.
+ quantize: Whether to quantize the model and using which method:
+ ``"llm.int8"``: LLM.int8() mode,
+ ``"gptq.int4"``: GPTQ 4-bit mode.
+ dtype: The dtype to use during generation.
+ max_new_tokens: The number of generation steps to take.
+ top_k: The number of top most probable tokens to consider in the sampling process.
+ temperature: A value controlling the randomness of the sampling process. Higher values result in more random
+ samples.
+ """
+ if not lora_path:
+ lora_path = Path("out/lora/alpaca/lit-llama-lora-finetuned.pth")
+ if not pretrained_path:
+ pretrained_path = Path(f"./checkpoints/lit-llama/7B/lit-llama.pth")
+ if not tokenizer_path:
+ tokenizer_path = Path("./checkpoints/lit-llama/tokenizer.model")
+
+ assert lora_path.is_file()
+ assert pretrained_path.is_file()
+ assert tokenizer_path.is_file()
+
+ if quantize is not None:
+ raise NotImplementedError("Quantization in LoRA is not supported yet")
+
+ fabric = L.Fabric(devices=1)
+
+ dt = getattr(torch, dtype, None)
+ if not isinstance(dt, torch.dtype):
+ raise ValueError(f"{dtype} is not a valid dtype.")
+ dtype = dt
+
+ print("Loading model ...", file=sys.stderr)
+ t0 = time.time()
+
+ with (lazy_load(pretrained_path) as pretrained_checkpoint,
+ lazy_load(lora_path) as adapter_checkpoint):
+ name = llama_model_lookup(pretrained_checkpoint)
+
+ with EmptyInitOnDevice(
+ device=fabric.device, dtype=dtype, quantization_mode=quantize
+ ), lora(r=lora_r, alpha=lora_alpha, dropout=lora_dropout, enabled=True):
+ model = LLaMA.from_name(name)
+
+ # 1. Load the pretrained weights
+ model.load_state_dict(pretrained_checkpoint, strict=False)
+ # 2. Load the fine-tuned adapter weights
+ model.load_state_dict(adapter_checkpoint, strict=False)
+
+ print(f"Time to load model: {time.time() - t0:.02f} seconds.", file=sys.stderr)
+
+ model.eval()
+ model = fabric.setup_module(model)
+
+ tokenizer = Tokenizer(tokenizer_path)
+ sample = {"instruction": prompt, "input": input}
+ prompt = generate_prompt(sample)
+ encoded = tokenizer.encode(prompt, bos=True, eos=False, device=model.device)
+
+ t0 = time.perf_counter()
+ output = generate(
+ model,
+ idx=encoded,
+ max_seq_length=max_new_tokens,
+ max_new_tokens=max_new_tokens,
+ temperature=temperature,
+ top_k=top_k,
+ eos_id=tokenizer.eos_id
+ )
+ t = time.perf_counter() - t0
+
+ output = tokenizer.decode(output)
+ output = output.split("### Response:")[1].strip()
+ print(output)
+
+ print(f"\n\nTime for inference: {t:.02f} sec total, {max_new_tokens / t:.02f} tokens/sec", file=sys.stderr)
+ if fabric.device.type == "cuda":
+ print(f"Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB", file=sys.stderr)
+
+
+if __name__ == "__main__":
+ from jsonargparse import CLI
+
+ torch.set_float32_matmul_precision("high")
+ warnings.filterwarnings(
+ # Triggered internally at ../aten/src/ATen/EmptyTensor.cpp:31
+ "ignore",
+ message="ComplexHalf support is experimental and many operators don't support it yet"
+ )
+ CLI(main)
diff --git a/howto/customize_paths.md b/howto/customize_paths.md
new file mode 100644
index 0000000000000000000000000000000000000000..7f9783fa9c8500eaa6e64140f600b2933064b3fb
--- /dev/null
+++ b/howto/customize_paths.md
@@ -0,0 +1,33 @@
+## Customize paths
+
+The project is setup to use specific paths to read the original weights and save checkpoints etc.
+
+For all scripts, you can run
+
+```shell
+python script.py -h
+```
+
+to get a list of available options. For instance, here's how you would modify the checkpoint dir:
+
+```shell
+python scripts/convert_checkpoint.py --checkpoint_dir "data/checkpoints/foo"
+```
+
+Note that this change will need to be passed along to subsequent steps, for example:
+
+```shell
+python scripts/generate.py \
+ --checkpoint_path "data/checkpoints/foo/7B/lit-llama.pth" \
+ --tokenizer_path "data/checkpoints/foo/tokenizer.model"
+```
+
+and
+
+```shell
+python scripts/quantize.py \
+ --checkpoint_path "data/checkpoints/foo/7B/lit-llama.pth" \
+ --tokenizer_path "data/checkpoints/foo/tokenizer.model"
+```
+
+To avoid this, you can use symbolic links to create shortcuts and avoid passing different paths.
diff --git a/howto/download_weights.md b/howto/download_weights.md
new file mode 100644
index 0000000000000000000000000000000000000000..a91f5fa2e6a42a6320a0e0cdaada70511b79a659
--- /dev/null
+++ b/howto/download_weights.md
@@ -0,0 +1,131 @@
+## Downloading pretrained weights
+
+Except for when you are training from scratch, you will need the pretrained weights from Meta.
+
+### Original Meta weights
+
+Download the model weights following the instructions on the official [LLaMA repository](https://github.com/facebookresearch/llama).
+
+Once downloaded, you should have a folder like this:
+
+```text
+checkpoints/llama
+├── 7B
+│ ├── ...
+│ └── consolidated.00.pth
+├── 13B
+│ ...
+└── tokenizer.model
+```
+
+Convert the weights to the Lit-LLaMA format:
+
+```bash
+python scripts/convert_checkpoint.py --model_size 7B
+```
+
+> **Note**
+> All scripts support argument [customization](customize_paths.md)
+
+### OpenLLaMA
+
+OpenLM Research has released **Apache 2.0 licensed** weights obtained by training LLaMA on the 1.2 trillion token open-source [RedPajama](https://github.com/togethercomputer/RedPajama-Data) dataset.
+
+Weights were released in preview on intermediate number of tokens (200B, 300B at the time of writing). In order to get them do:
+
+```bash
+# Make sure you have git-lfs installed (https://git-lfs.com): git lfs install
+git clone https://huggingface.co/openlm-research/open_llama_7b_preview_300bt checkpoints/open-llama/7B
+```
+
+Or if you don't have `git-lfs` installed:
+
+```bash
+python scripts/download.py --repo_id openlm-research/open_llama_7b_preview_300bt --local_dir checkpoints/open-llama/7B
+```
+
+Once downloaded, you should have a folder like this:
+
+```text
+checkpoints/open-llama/
+└── 7B
+ └── open_llama_7b_preview_300bt_transformers_weights
+ ├── ...
+ ├── pytorch_model-00001-of-00002.bin
+ ├── pytorch_model-00002-of-00002.bin
+ ├── pytorch_model.bin.index.json
+ └── tokenizer.model
+```
+
+Convert the weights to the Lit-LLaMA format:
+
+```bash
+python scripts/convert_hf_checkpoint.py --checkpoint_dir checkpoints/open-llama/7B/open_llama_7b_preview_300bt_transformers_weights --model_size 7B
+```
+
+> **Note**
+> All scripts support argument [customization](customize_paths.md)
+
+Once converted, you should have a folder like this:
+
+```text
+checkpoints/lit-llama/
+├── 7B
+│ └── lit-llama.pth
+└── tokenizer.model
+```
+
+You are all set. Now you can continue with inference or finetuning.
+
+Try running [`generate.py` to test the imported weights](inference.md).
+
+
+### Alternative sources
+
+You might find LLaMA weights hosted online in the HuggingFace hub. Beware that this infringes the original weight's license.
+You could try downloading them by running the following command with a specific repo id:
+
+```bash
+# Make sure you have git-lfs installed (https://git-lfs.com): git lfs install
+git clone REPO_ID checkpoints/hf-llama/7B
+```
+
+Or if you don't have `git-lfs` installed:
+
+```bash
+python scripts/download.py --repo_id REPO_ID --local_dir checkpoints/hf-llama/7B
+```
+
+Once downloaded, you should have a folder like this:
+
+```text
+checkpoints/hf-llama/
+└── 7B
+ ├── ...
+ ├── pytorch_model-00001-of-00002.bin
+ ├── pytorch_model-00002-of-00002.bin
+ ├── pytorch_model.bin.index.json
+ └── tokenizer.model
+```
+
+Convert the weights to the Lit-LLaMA format:
+
+```bash
+python scripts/convert_hf_checkpoint.py --model_size 7B
+```
+
+> **Note**
+> All scripts support argument [customization](customize_paths.md)
+
+Once converted, you should have a folder like this:
+
+```text
+checkpoints/lit-llama/
+├── 7B
+│ └── lit-llama.pth
+└── tokenizer.model
+```
+
+You are all set. Now you can continue with inference or finetuning.
+
+Try running [`generate.py` to test the imported weights](inference.md).
diff --git a/howto/finetune_adapter.md b/howto/finetune_adapter.md
new file mode 100644
index 0000000000000000000000000000000000000000..706aa41959e24f58550de51a6a5a25e0209f8475
--- /dev/null
+++ b/howto/finetune_adapter.md
@@ -0,0 +1,102 @@
+# Finetuning with Adapter
+
+[LLaMA-Adapter](https://arxiv.org/abs/2303.16199) is a form of prefix-tuning that prepends a learnable adaption-prompt to the inputs of the attention blocks in LLaMA. In total, there are only 1.2M parameters to update during finetuning, which significantly reduces the memory footprint and speeds up training.
+
+We are able to demonstrate instruction-finetuning Lit-LLaMA 7B on the [Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset on a **single GTX 3090 (24GB) GPU**. If using 8 GPUs, finetuning can be completed in under 1 hour.
+
+If you are new to LLaMA-Adapter and are interest to learn more about how it works before proceeding with the finetuning guide below, you might find our article [Understanding Parameter-Efficient Finetuning of Large Language Models: From Prefix Tuning to LLaMA-Adapters](https://lightning.ai/pages/community/article/understanding-llama-adapters/) helpful.
+
+## Preparation
+
+The steps here only need to be done once:
+
+1. Follow the instructions in the [README](README.md) to install the dependencies.
+2. Download and convert the weights and save them in the `./checkpoints` folder as described [here](download_weights.md).
+3. If you want to utilize more than one GPU, you should `pip install deepspeed`.
+4. Download the data and generate the Alpaca instruction tuning dataset:
+
+ ```bash
+ python scripts/prepare_alpaca.py
+ ```
+
+ or [prepare your own dataset](#tune-on-your-own-dataset).
+
+## Running the finetuning
+
+```bash
+python finetune_adapter.py
+```
+
+The finetuning requires at least one GPU with ~24 GB memory (GTX 3090).
+You can speed up training by setting the `devices` variable in the script to utilize more GPUs if available.
+Depending on the available GPU memory, you can also tune the `micro_batch_size` parameter to utilize the GPU efficiently.
+
+For example, the following settings will let you finetune the model in under 1 hour using DeepSpeed Zero-2:
+```python
+devices = 8
+micro_batch_size = 8
+```
+
+This script will save checkpoints periodically to the folder `out/`.
+
+> **Note**
+> All scripts support argument [customization](customize_paths.md)
+
+## Test the model
+
+You can test the finetuned model with your own instructions by running:
+
+```bash
+python generate_adapter.py \
+ --prompt "Recommend a movie to watch on the weekend." \
+ --quantize llm.int8
+```
+Output:
+```
+A good movie to watch on the weekend would be The Lion King, since it's a classic family film that everyone can enjoy...
+```
+If your GPU supports `bfloat16`, the script will automatically use it. Together with `--quantize llm.int8`, this brings the memory consumption down to ~8 GB.
+
+## Tune on your dataset
+
+With only a few modifications, you can prepare and train on your own instruction dataset.
+
+1. Create a json file in which each row holds one instruction-response pair.
+ A row has an entry for 'instruction', 'input', and 'output', where 'input' is optional an can be
+ the empty string if the instruction doesn't require a context. Below is an example json file:
+
+ ```
+ [
+ {
+ "instruction": "Arrange the given numbers in ascending order.",
+ "input": "2, 4, 0, 8, 3",
+ "output": "0, 2, 3, 4, 8"
+ },
+ ...
+ ]
+ ```
+
+2. Make a copy of `scripts/prepare_alpaca.py` and name it what you want:
+
+ ```bash
+ cp scripts/prepare_alpaca.py scripts/prepare_mydata.py
+ ```
+
+3. Modify `scripts/prepare_mydata.py` to read the json data file.
+4. Run the script to generate the preprocessed, tokenized train-val split:
+
+ ```bash
+ python scripts/prepare_mydata.py --destination_path data/mydata/
+ ```
+
+5. Run `finetune_adapter.py` by passing in the location of your data (and optionally other parameters):
+
+ ```bash
+ python finetune_adapter.py --data_dir data/mydata/ --out_dir out/myexperiment
+ ```
+
+
+## Troubleshooting
+
+If you run into a CUDA error "Expected is_sm80 to be true, but got false", uncomment the line
+`torch.backends.cuda.enable_flash_sdp(False)` in the script below (see https://github.com/Lightning-AI/lit-llama/issues/101).
diff --git a/howto/finetune_full.md b/howto/finetune_full.md
new file mode 100644
index 0000000000000000000000000000000000000000..ef0a74746c721fb376f1ee9c2b4307a4517eff33
--- /dev/null
+++ b/howto/finetune_full.md
@@ -0,0 +1,104 @@
+# Full Finetuning
+
+Full finetuning updates all layers in the pretrained LLaMA model. This *regular* finetuning procedure is typically considered as the baseline for parameter-efficient alternatives such as Low-Rank Adaptation (LoRA) or LLaMA-Adapter.
+
+The current [finetune_full.py](../scripts/finetune_full.py) we provide uses 4 A100 GPUs with a fully-sharded data parallel strategy to finetune Lit-LLaMA 7B on [Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset. The A100 GPUs have 40 GB each, but it may require less memory to finetune this model.
+
+
+
+## Preparation
+
+The steps here only need to be done once:
+
+1. Follow the instructions in the [README](README.md) to install the dependencies.
+
+2. Download and convert the weights and save them in the `./checkpoints` folder as described [here](download_weights.md).
+
+4. Download the data and generate the Alpaca instruction tuning dataset:
+
+ ```bash
+ python scripts/prepare_alpaca.py
+ ```
+
+ or [prepare your own dataset](#tune-on-your-own-dataset).
+
+## Running the finetuning
+
+```bash
+python finetune_full.py
+```
+
+
+You can speed up training by setting the `devices` variable in the script to utilize more GPUs if available or increase the `batch_size`.
+Depending on the available GPU memory, you can also tune the `micro_batch_size` parameter to utilize the GPU efficiently.
+
+For example, the following settings will let you finetune the model in 32 hours using a fully-sharded data parallel strategy:
+```python
+devices = 4
+batch_size = 128 // devices
+micro_batch_size = 4
+```
+
+This script will save checkpoints periodically to the folder `out/`.
+
+> **Note**
+> All scripts support argument [customization](customize_paths.md)
+
+## Test the model
+
+You can test the finetuned model with your own instructions by running:
+
+```bash
+python generate_full.py \
+ --prompt "Recommend a movie to watch on the weekend." \
+ --quantize llm.int8
+```
+Output:
+```
+A good movie to watch on the weekend would be The Lion King, since it's a classic family film that everyone can enjoy...
+```
+If your GPU supports `bfloat16`, the script will automatically use it. Together with `--quantize llm.int8`, this brings the memory consumption down to ~8 GB.
+
+## Tune on your dataset
+
+With only a few modifications, you can prepare and train on your own instruction dataset.
+
+1. Create a json file in which each row holds one instruction-response pair.
+ A row has an entry for 'instruction', 'input', and 'output', where 'input' is optional an can be
+ the empty string if the instruction doesn't require a context. Below is an example json file:
+
+ ```
+ [
+ {
+ "instruction": "Arrange the given numbers in ascending order.",
+ "input": "2, 4, 0, 8, 3",
+ "output": "0, 2, 3, 4, 8"
+ },
+ ...
+ ]
+ ```
+
+2. Make a copy of `scripts/prepare_alpaca.py` and name it what you want:
+
+ ```bash
+ cp scripts/prepare_alpaca.py scripts/prepare_mydata.py
+ ```
+
+3. Modify `scripts/prepare_mydata.py` to read the json data file.
+4. Run the script to generate the preprocessed, tokenized train-val split:
+
+ ```bash
+ python scripts/prepare_mydata.py --destination_path data/mydata/
+ ```
+
+5. Run `finetune_full.py` by passing in the location of your data (and optionally other parameters):
+
+ ```bash
+ python finetune_full.py --data_dir data/mydata/ --out_dir out/myexperiment
+ ```
+
+
+## Troubleshooting
+
+If you run into a CUDA error "Expected is_sm80 to be true, but got false", uncomment the line
+`torch.backends.cuda.enable_flash_sdp(False)` in the script below (see https://github.com/Lightning-AI/lit-llama/issues/101).
diff --git a/howto/finetune_lora.md b/howto/finetune_lora.md
new file mode 100644
index 0000000000000000000000000000000000000000..251d08d5e8122c13a106754f13ca4126d7764d2f
--- /dev/null
+++ b/howto/finetune_lora.md
@@ -0,0 +1,88 @@
+# Finetuning with LoRA
+
+[Low-rank adaption (LoRA)](https://arxiv.org/abs/2106.09685) is a technique to approximate the update to the linear layers in a LLM with a low-rank matrix factorization. This significantly reduces the number of trainable parameters and speeds up training with little impact on the final performance of the model.
+We demonstrate this method by instruction-finetuning LLaMA 7B on the [Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset on a **single GTX 3090 (24GB) GPU**.
+
+## Preparation
+
+The steps here only need to be done once:
+
+1. Follow the instructions in the [README](README.md) to install the dependencies.
+2. Download and convert the weights and save them in the `./checkpoints` folder as described [here](download_weights.md).
+3. Download the data and generate the instruction tuning dataset:
+
+ ```bash
+ python scripts/prepare_alpaca.py
+ ```
+
+## Running the finetuning
+
+```bash
+python finetune_lora.py
+```
+
+The finetuning requires at least one GPU with ~24 GB memory (GTX 3090).
+
+This script will save checkpoints periodically to the folder `out/`.
+
+> **Note**
+> All scripts support argument [customization](customize_paths.md)
+
+
+## Test the model
+
+You can test the finetuned model with your own instructions by running:
+
+```bash
+python generate_lora.py --prompt "Recommend a movie to watch on the weekend."
+```
+Output:
+```
+I would recommend the movie The Martian (2015). It is a sci-fi movie starring Matt Damon that follows the story of...
+```
+
+If your GPU supports `bfloat16`, you can additionally pass `--dtype bfloat16` to bring the memory consumption down to ~14 GB.
+
+## Tune on your dataset
+
+With only a few modifications, you can prepare and train on your own instruction dataset.
+
+1. Create a json file in which each row holds one instruction-response pair.
+ A row has an entry for 'instruction', 'input', and 'output', where 'input' is optional an can be
+ the empty string if the instruction doesn't require a context. Below is an example json file:
+
+ ```
+ [
+ {
+ "instruction": "Arrange the given numbers in ascending order.",
+ "input": "2, 4, 0, 8, 3",
+ "output": "0, 2, 3, 4, 8"
+ },
+ ...
+ ]
+ ```
+
+2. Make a copy of `scripts/prepare_alpaca.py` and name it what you want:
+
+ ```bash
+ cp scripts/prepare_alpaca.py scripts/prepare_mydata.py
+ ```
+
+3. Modify `scripts/prepare_mydata.py` to read the json data file.
+4. Run the script to generate the preprocessed, tokenized train-val split:
+
+ ```bash
+ python scripts/prepare_mydata.py --destination_path data/mydata/
+ ```
+
+5. Run `finetune_lora.py` by passing in the location of your data (and optionally other parameters):
+
+ ```bash
+ python finetune_lora.py --data_dir data/mydata/ --out_dir out/myexperiment
+ ```
+
+
+## Troubleshooting
+
+If you run into a CUDA error "Expected is_sm80 to be true, but got false", uncomment the line
+`torch.backends.cuda.enable_flash_sdp(False)` in the script below (see https://github.com/Lightning-AI/lit-llama/issues/101).
diff --git a/howto/inference.md b/howto/inference.md
new file mode 100644
index 0000000000000000000000000000000000000000..df11a6db271cd5293d17c2e4cecd33edcfa9d43b
--- /dev/null
+++ b/howto/inference.md
@@ -0,0 +1,37 @@
+# Inference
+
+We demonstrate how to run inference (next token prediction) with the LLaMA base model in the [`generate.py`](generate.py) script:
+
+```bash
+python generate.py --prompt "Hello, my name is"
+```
+Output:
+```
+Hello my name is TJ. I have a passion for the outdoors, love hiking and exploring. I also enjoy traveling and learning new things. I especially enjoy long walks, good conversation and a friendly smile.
+```
+
+The script assumes you have downloaded and converted the weights and saved them in the `./checkpoints` folder as described [here](download_weights.md).
+
+> **Note**
+> All scripts support argument [customization](customize_paths.md)
+
+With the default settings, this will run the 7B model and require ~26 GB of GPU memory (A100 GPU).
+
+## Run Lit-LLaMA on consumer devices
+
+On GPUs with `bfloat16` support, the `generate.py` script will automatically convert the weights and consume about ~14 GB.
+For GPUs with less memory, or ones that don't support `bfloat16`, enable quantization (`--quantize llm.int8`):
+
+```bash
+python generate.py --quantize llm.int8 --prompt "Hello, my name is"
+```
+This will consume about ~10 GB of GPU memory or ~8 GB if also using `bfloat16`.
+See `python generate.py --help` for more options.
+
+You can also use GPTQ-style int4 quantization, but this needs conversions of the weights first:
+
+```bash
+python quantize.py --checkpoint_path lit-llama.pth --tokenizer_path tokenizer.model --output_path llama-7b-gptq.4bit.pt --dtype bfloat16 --quantize gptq.int4
+```
+
+With the generated quantized checkpoint generation works as usual with `--quantize gptq.int4`, bringing GPU usage to about ~5GB. As only the weights of the Linear layers are quantized, it is useful to use `--dtype bfloat16` even with the quantization enabled.
diff --git a/howto/tpus.md b/howto/tpus.md
new file mode 100644
index 0000000000000000000000000000000000000000..5629ba8b4d095febd5cf54a17ad2d1ed9066bdb6
--- /dev/null
+++ b/howto/tpus.md
@@ -0,0 +1,51 @@
+# TPU support
+
+Lit-LLaMA used `lightning.Fabric` under the hood, which itself supports TPUs (via [PyTorch XLA](https://github.com/pytorch/xla)).
+
+The following commands will allow you to set up a `Google Cloud` instance with a [TPU v4](https://cloud.google.com/tpu/docs/system-architecture-tpu-vm) VM:
+
+```shell
+gcloud compute tpus tpu-vm create lit-llama --version=tpu-vm-v4-pt-2.0 --accelerator-type=v4-8 --zone=us-central2-b
+gcloud compute tpus tpu-vm ssh lit-llama --zone=us-central2-b
+```
+
+Now that you are in the machine, let's clone the repository and install the dependencies
+
+```shell
+git clone https://github.com/Lightning-AI/lit-llama
+cd lit-llama
+pip install -r requirements.txt
+```
+
+By default, computations will run using the new (and experimental) PjRT runtime. Still, it's recommended that you set the following environment variables
+
+```shell
+export PJRT_DEVICE=TPU
+export ALLOW_MULTIPLE_LIBTPU_LOAD=1
+```
+
+> **Note**
+> You can find an extensive guide on how to get set-up and all the available options [here](https://cloud.google.com/tpu/docs/v4-users-guide).
+
+Since you created a new machine, you'll probably need to download the weights. You could scp them into the machine with `gcloud compute tpus tpu-vm scp` or you can follow the steps described in our [downloading guide](download_weights.md).
+
+## Inference
+
+Generation works out-of-the-box with TPUs:
+
+```shell
+python3 generate.py --prompt "Hello, my name is" --num_samples 2
+```
+
+This command will take a long time as XLA needs to compile the graph (~13 min) before running the model.
+In fact, you'll notice that the second sample takes considerable less time (~12 sec).
+
+## Finetuning
+
+Coming soon.
+
+> **Warning**
+> When you are done, remember to delete your instance
+> ```shell
+> gcloud compute tpus tpu-vm delete lit-llama --zone=us-central2-b
+> ```
\ No newline at end of file
diff --git a/howto/train_redpajama.md b/howto/train_redpajama.md
new file mode 100644
index 0000000000000000000000000000000000000000..76b3065bc99af2fb7e8bd27cf825250cde0eb8b5
--- /dev/null
+++ b/howto/train_redpajama.md
@@ -0,0 +1,133 @@
+# Pre-train LLaMA on RedPajama
+
+This howto will walk you through setting up the RedPajama dataset and launching the pre-training script.
+
+## What's RedPajama
+
+[RedPajama](https://github.com/togethercomputer/RedPajama-Data) is an open-source reproduction of the original LLaMA training dataset.
+
+It contains a total of 1.2 trillion tokens, divided into
+
+```text
+Commoncrawl 878B
+C4 175B
+GitHub 59B
+Books 26B
+ArXiv 28B
+Wikipedia 24B
+StackExchange 20B
+```
+
+The [RedPajama repo](https://github.com/togethercomputer/RedPajama-Data) contains the source code for collecting and preparing
+the dataset, and it is Apache 2.0 licensed.
+
+The data itself is licensed according to the original licenses with which its invidivdual parts were released.
+The GitHub datasets are limited to MIT, BSD, or Apache 2.0 repositories.
+
+Along with the full [RedPajama-1T dataset](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T),
+the [RedPajama-1T-Sample](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T-Sample) 1B sample dataset
+is also available for development.
+
+You can download the data using git lfs:
+
+```bash
+# Make sure you have git-lfs installed (https://git-lfs.com): git lfs install
+git clone https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T data/RedPajama-Data-1T
+```
+
+```bash
+# Make sure you have git-lfs installed (https://git-lfs.com): git lfs install
+git clone https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T-Sample data/RedPajama-Data-1T-Sample
+```
+
+## Prepare RedPajama for training
+
+The dataset consists of 2084 `jsonl` files (the sample dataset contains 11). In order to start pre-training lit-llama
+on it, you need to read, tokenize, and write the data in binary chunks. This will leverage the `PackedDataset`
+streaming dataset that comes with lit-llama.
+
+Do to so, run
+
+```bash
+python scripts/prepare_redpajama.py --source_path data/RedPajama-Data-1T --tokenizer_path checkpoints/lit-llama/tokenizer.model --destination_path data/lit-redpajama
+```
+
+or
+
+```bash
+python scripts/prepare_redpajama.py --source_path data/RedPajama-Data-1T-Sample --tokenizer_path checkpoints/lit-llama/tokenizer.model --destination_path data/lit-redpajama-sample --sample True
+```
+
+for the sample dataset.
+
+In the above we are assuming that you will be using the same tokenizer as used in LLaMA, but any trained [SentencePiece](https://github.com/google/sentencepiece) tokenizer with a 32000 vocabulary size will do here.
+
+The script will take a while to run, so time for :tea:
+
+## Pre-training
+
+Running the pre-training script requires at least 4 GPUs with 40GB+ each (A100).
+
+```bash
+python train_redpajama.py --devices 4 --train_data_dir data/lit-redpajama
+```
+
+For running on the sample dataset:
+
+```bash
+python train_redpajama.py --devices 4 --train_data_dir data/lit-redpajama-sample
+```
+
+The script will save checkpoints periodically to the folder `out/`.
+
+The `train_redpajama.py` script will pre-train the LLaMA 7B model with FSDP in
+`bfloat16` precision and gradient accumulation.
+
+You can easily change the size of the model by passing a different string to
+
+```python
+config = LLaMAConfig.from_name("7B")
+```
+
+in the `main` function.
+
+Keep in mind that the original LLaMA training for the 7B model required 83k A100 80GB
+hours, so you'll need access to a cluster.
+
+Once you're in a cluster, you can follow [these instructions](https://lightning.ai/docs/fabric/stable/guide/multi_node/other.html)
+to launch the script across machines:
+
+- [SLURM cluster](https://lightning.ai/docs/fabric/stable/guide/multi_node/slurm.html)
+- [Barebones cluster](https://lightning.ai/docs/fabric/stable/guide/multi_node/barebones.html)
+- [MPI](https://lightning.ai/docs/fabric/stable/guide/multi_node/other.html)
+
+The script contains several configurations and hyperparameters you can tweak:
+
+```python
+out_dir = "out/training"
+save_interval = 1000
+eval_interval = 1000
+eval_iters = 100
+log_interval = 1
+
+# Hyperparameters
+learning_rate = 6e-4
+batch_size = 125
+micro_batch_size = 5
+max_iters = 600000 # num_epochs * epoch_size // devices
+weight_decay = 1e-1
+beta1 = 0.9
+beta2 = 0.95
+grad_clip = 1.0
+decay_lr = True
+warmup_iters = 2000
+lr_decay_iters = max_iters
+min_lr = 6e-5
+```
+
+In particular, `micro_batch_size` should be adjusted so the process will use the available
+GPU memory.
+
+Last, logging is kept minimal in the script. In order to use a particular logger
+please refer to or
+call a logging client library like `wandb` directly.
diff --git a/lit_llama/__init__.py b/lit_llama/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c169d4c6c201d9085d5b0b95f9a1a5ca85eab4c0
--- /dev/null
+++ b/lit_llama/__init__.py
@@ -0,0 +1,2 @@
+from lit_llama.model import LLaMAConfig, LLaMA, RMSNorm, build_rope_cache, apply_rope
+from lit_llama.tokenizer import Tokenizer
diff --git a/lit_llama/__pycache__/__init__.cpython-311.pyc b/lit_llama/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2359c45963bebc710595be64e90e1dc0c82fb7df
Binary files /dev/null and b/lit_llama/__pycache__/__init__.cpython-311.pyc differ
diff --git a/lit_llama/__pycache__/lora.cpython-311.pyc b/lit_llama/__pycache__/lora.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e8a75726514514619f0047d379694e7f16ec25da
Binary files /dev/null and b/lit_llama/__pycache__/lora.cpython-311.pyc differ
diff --git a/lit_llama/__pycache__/model.cpython-311.pyc b/lit_llama/__pycache__/model.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dc95d5192d6aab90a4a2a58410efc75517450781
Binary files /dev/null and b/lit_llama/__pycache__/model.cpython-311.pyc differ
diff --git a/lit_llama/__pycache__/tokenizer.cpython-311.pyc b/lit_llama/__pycache__/tokenizer.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1f6e5fadb28cf28029cc3b49ad4b3c34a83e7059
Binary files /dev/null and b/lit_llama/__pycache__/tokenizer.cpython-311.pyc differ
diff --git a/lit_llama/__pycache__/utils.cpython-311.pyc b/lit_llama/__pycache__/utils.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c235aefa2dede0af8479f1c3855fdef9d02ef29f
Binary files /dev/null and b/lit_llama/__pycache__/utils.cpython-311.pyc differ
diff --git a/lit_llama/adapter.py b/lit_llama/adapter.py
new file mode 100644
index 0000000000000000000000000000000000000000..f743c1945beb85bef610b34add8e0703388f3da6
--- /dev/null
+++ b/lit_llama/adapter.py
@@ -0,0 +1,151 @@
+"""Implementation of the paper:
+
+LLaMA-Adapter: Efficient Fine-tuning of Language Models with Zero-init Attention
+https://arxiv.org/abs/2303.16199
+"""
+# mypy: ignore-errors
+import math
+from dataclasses import dataclass
+
+import torch
+import torch.nn as nn
+from torch.nn import functional as F
+import lit_llama.model as llama
+from lit_llama.model import build_rope_cache, apply_rope, RMSNorm, MLP
+
+
+@dataclass
+class LLaMAConfig(llama.LLaMAConfig):
+ adapter_prompt_length: int = 10
+ adapter_start_layer: int = 2
+
+
+class CausalSelfAttention(nn.Module):
+ """A modification of `lit_llama.model.CausalSelfAttention` that adds the attention
+ over the adaption prompt."""
+
+ def __init__(self, config: LLaMAConfig, block_idx: int) -> None:
+ super().__init__()
+ assert config.n_embd % config.n_head == 0
+
+ # key, query, value projections for all heads, but in a batch
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=False)
+ # output projection
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=False)
+
+ if block_idx >= config.adapter_start_layer:
+ # adapter embedding layer
+ self.adapter_wte = nn.Embedding(config.adapter_prompt_length, config.n_embd)
+ # gate for adaption
+ self.gating_factor = torch.nn.Parameter(torch.zeros(1))
+
+ self.n_head = config.n_head
+ self.n_embd = config.n_embd
+ self.block_size = config.block_size
+ self.block_idx = block_idx
+ self.adapter_prompt_length = config.adapter_prompt_length
+ self.adapter_start_layer = config.adapter_start_layer
+ self.rope_cache = None
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
+
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
+ q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
+
+ head_size = C // self.n_head
+ k = k.view(B, T, self.n_head, head_size).transpose(1, 2) # (B, nh, T, hs)
+ q = q.view(B, T, self.n_head, head_size).transpose(1, 2) # (B, nh, T, hs)
+ v = v.view(B, T, self.n_head, head_size).transpose(1, 2) # (B, nh, T, hs)
+
+ if self.rope_cache is None:
+ # cache for future forward calls
+ self.rope_cache = build_rope_cache(
+ seq_len=self.block_size,
+ n_elem=self.n_embd // self.n_head,
+ dtype=x.dtype,
+ device=x.device,
+ )
+
+ q = apply_rope(q, self.rope_cache)
+ k = apply_rope(k, self.rope_cache)
+
+ # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
+ # att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
+ # att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
+ # att = F.softmax(att, dim=-1)
+ # y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
+
+ # efficient attention using Flash Attention CUDA kernels
+ y = F.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=True)
+
+ if self.block_idx >= self.adapter_start_layer:
+ prefix = self.adapter_wte.weight.reshape(1, self.adapter_prompt_length, self.n_embd)
+
+ aT = prefix.size(1)
+ _, ak, av = self.c_attn(prefix).split(self.n_embd, dim=2)
+ ak = ak.view(1, aT, self.n_head, head_size).repeat(B, 1, 1, 1).transpose(1, 2)
+ av = av.view(1, aT, self.n_head, head_size).repeat(B, 1, 1, 1).transpose(1, 2)
+
+ amask = torch.ones(q.shape[-2], ak.shape[-2], dtype=torch.bool, device=x.device)
+ ay = F.scaled_dot_product_attention(q, ak, av, attn_mask=amask, dropout_p=0.0, is_causal=False)
+ y = y + self.gating_factor * ay
+
+ y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
+
+ # output projection
+ y = self.c_proj(y)
+
+ return y
+
+
+class Block(nn.Module):
+ """The implementation is identical to `lit_llama.model.Block` with the exception that
+ we replace the attention layer where adaption is implemented."""
+
+ def __init__(self, config: LLaMAConfig, block_idx: int) -> None:
+ super().__init__()
+ self.rms_1 = RMSNorm(config.n_embd)
+ self.attn = CausalSelfAttention(config, block_idx)
+ self.rms_2 = RMSNorm(config.n_embd)
+ self.mlp = MLP(config)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = x + self.attn(self.rms_1(x))
+ x = x + self.mlp(self.rms_2(x))
+ return x
+
+
+class LLaMA(llama.LLaMA):
+ """The implementation is identical to `lit_llama.model.LLaMA` with the exception that
+ the `Block` saves the layer index and passes it down to the attention layer."""
+
+ def __init__(self, config: LLaMAConfig) -> None:
+ nn.Module.__init__(self)
+ assert config.vocab_size is not None
+ assert config.block_size is not None
+ self.config = config
+
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
+ self.transformer = nn.ModuleDict(
+ dict(
+ wte=nn.Embedding(config.vocab_size, config.n_embd),
+ h=nn.ModuleList([Block(config, i) for i in range(config.n_layer)]),
+ ln_f=RMSNorm(config.n_embd),
+ )
+ )
+
+ @classmethod
+ def from_name(cls, name: str):
+ return cls(LLaMAConfig.from_name(name))
+
+
+def mark_only_adapter_as_trainable(model: LLaMA) -> None:
+ """Sets `requires_grad=False` for all non-adapter weights."""
+ for name, param in model.named_parameters():
+ param.requires_grad = "adapter_wte" in name or "gating_factor" in name
+
+
+def adapter_state_from_state_dict(state_dict: dict) -> dict:
+ """Returns the model state dict with only the adapter weights for saving."""
+ return {name: param for name, param in state_dict.items() if "adapter_wte" in name or "gating_factor" in name}
diff --git a/lit_llama/lora.py b/lit_llama/lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..393c9eff69bc4b480262acb00719446151c6997b
--- /dev/null
+++ b/lit_llama/lora.py
@@ -0,0 +1,224 @@
+# Derived from https://github.com/microsoft/LoRA
+# ------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
+# ------------------------------------------------------------------------------------------
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import math
+from typing import Dict, List
+
+import lit_llama.model as llama
+
+from contextlib import contextmanager
+from dataclasses import dataclass
+
+
+class LoRALayer():
+ def __init__(
+ self,
+ r: int,
+ lora_alpha: int,
+ lora_dropout: float,
+ merge_weights: bool,
+ ):
+ self.r = r
+ self.lora_alpha = lora_alpha
+ # Optional dropout
+ if lora_dropout > 0.:
+ self.lora_dropout = nn.Dropout(p=lora_dropout)
+ else:
+ self.lora_dropout = lambda x: x
+ # Mark the weight as unmerged
+ self.merged = False
+ self.merge_weights = merge_weights
+
+
+class MergedLinear(nn.Linear, LoRALayer):
+ # LoRA implemented in a dense layer
+ def __init__(
+ self,
+ in_features: int,
+ out_features: int,
+ r: int = 0,
+ lora_alpha: int = 1,
+ lora_dropout: float = 0.,
+ enable_lora: List[bool] = [False],
+ fan_in_fan_out: bool = False,
+ merge_weights: bool = True,
+ **kwargs
+ ):
+ nn.Linear.__init__(self, in_features, out_features, **kwargs)
+ LoRALayer.__init__(self, r=r, lora_alpha=lora_alpha, lora_dropout=lora_dropout,
+ merge_weights=merge_weights)
+ assert out_features % len(enable_lora) == 0, \
+ 'The length of enable_lora must divide out_features'
+ self.enable_lora = enable_lora
+ self.fan_in_fan_out = fan_in_fan_out
+ # Actual trainable parameters
+ if r > 0 and any(enable_lora):
+ self.lora_A = nn.Parameter(
+ self.weight.new_zeros((r * sum(enable_lora), in_features)))
+ self.lora_B = nn.Parameter(
+ self.weight.new_zeros((out_features // len(enable_lora) * sum(enable_lora), r))
+ ) # weights for Conv1D with groups=sum(enable_lora)
+ self.scaling = self.lora_alpha / self.r
+ # Freezing the pre-trained weight matrix
+ self.weight.requires_grad = False
+ # Compute the indices
+ self.lora_ind = self.weight.new_zeros(
+ (out_features, ), dtype=torch.bool
+ ).view(len(enable_lora), -1)
+ self.lora_ind[enable_lora, :] = True
+ self.lora_ind = self.lora_ind.view(-1)
+ self.reset_parameters()
+ if fan_in_fan_out:
+ self.weight.data = self.weight.data.T
+
+ def reset_parameters(self):
+ nn.Linear.reset_parameters(self)
+ if hasattr(self, 'lora_A'):
+ # initialize A the same way as the default for nn.Linear and B to zero
+ nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
+ nn.init.zeros_(self.lora_B)
+
+ def zero_pad(self, x):
+ x = x.transpose(0, 1)
+ result = x.new_zeros((*x.shape[:-1], self.out_features))
+ result = result.view(-1, self.out_features)
+ result[:, self.lora_ind] = x.reshape(
+ -1, self.out_features // len(self.enable_lora) * sum(self.enable_lora)
+ )
+ return result.view((*x.shape[:-1], self.out_features)).transpose(0, 1)
+
+ def train(self, mode: bool = True):
+ def T(w):
+ return w.T if self.fan_in_fan_out else w
+ nn.Linear.train(self, mode)
+
+ # if train(True) -> unmerge unless we already have them unmerged
+ # if train(False) -> merge unless we already have them merged
+ should = self.merged if mode else not self.merged
+
+ if self.merge_weights and should:
+ if self.r > 0 and any(self.enable_lora):
+ delta_w = F.conv1d(
+ self.lora_A.data.unsqueeze(0),
+ self.lora_B.data.unsqueeze(-1),
+ groups=sum(self.enable_lora)
+ ).squeeze(0)
+ # -1: W = W - delta_W (unmerge), +1: W = W + delta_W (merge)
+ sign = -1 if mode else 1
+ self.weight.data += sign * self.zero_pad(T(delta_w * self.scaling))
+ self.merged = not mode
+
+ def forward(self, x: torch.Tensor):
+ def T(w):
+ return w.T if self.fan_in_fan_out else w
+ if self.merged:
+ return F.linear(x, T(self.weight), bias=self.bias)
+ else:
+ result = F.linear(x, T(self.weight), bias=self.bias)
+ if self.r > 0:
+ after_A = F.linear(self.lora_dropout(x), self.lora_A)
+ after_B = F.conv1d(
+ after_A.transpose(-2, -1),
+ self.lora_B.unsqueeze(-1),
+ groups=sum(self.enable_lora)
+ ).transpose(-2, -1)
+ result += self.zero_pad(after_B) * self.scaling
+ return result
+
+
+def mark_only_lora_as_trainable(model: nn.Module, bias: str = 'none') -> None:
+ for n, p in model.named_parameters():
+ if 'lora_' not in n:
+ p.requires_grad = False
+ if bias == 'none':
+ return
+ elif bias == 'all':
+ for n, p in model.named_parameters():
+ if 'bias' in n:
+ p.requires_grad = True
+ elif bias == 'lora_only':
+ for m in model.modules():
+ if isinstance(m, LoRALayer) and \
+ hasattr(m, 'bias') and \
+ m.bias is not None:
+ m.bias.requires_grad = True
+ else:
+ raise NotImplementedError
+
+
+def lora_state_dict(model: nn.Module, bias: str = 'none') -> Dict[str, torch.Tensor]:
+ my_state_dict = model.state_dict()
+ if bias == 'none':
+ return {k: my_state_dict[k] for k in my_state_dict if 'lora_' in k}
+ elif bias == 'all':
+ return {k: my_state_dict[k] for k in my_state_dict if 'lora_' in k or 'bias' in k}
+ elif bias == 'lora_only':
+ to_return = {}
+ for k in my_state_dict:
+ if 'lora_' in k:
+ to_return[k] = my_state_dict[k]
+ bias_name = k.split('lora_')[0]+'bias'
+ if bias_name in my_state_dict:
+ to_return[bias_name] = my_state_dict[bias_name]
+ return to_return
+ else:
+ raise NotImplementedError
+
+
+@dataclass
+class LoRAConfig:
+ r: float = 0.0
+ alpha: float = 1.0
+ dropout: float = 0.0
+
+
+class CausalSelfAttention(llama.CausalSelfAttention):
+ lora_config = None
+
+ def __init__(self, config: llama.LLaMAConfig) -> None:
+ # Skip the parent class __init__ altogether and replace it to avoid
+ # useless allocations
+ nn.Module.__init__(self)
+ assert config.n_embd % config.n_head == 0
+
+ # key, query, value projections for all heads, but in a batch
+ self.c_attn = MergedLinear(
+ in_features=config.n_embd,
+ out_features=3 * config.n_embd,
+ r=self.lora_config.r,
+ lora_alpha=self.lora_config.alpha,
+ lora_dropout=self.lora_config.dropout,
+ enable_lora=[True, False, True],
+ fan_in_fan_out = False,
+ merge_weights=True,
+ bias=False)
+ # output projection
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=False)
+ # regularization
+ self.n_head = config.n_head
+ self.n_embd = config.n_embd
+ self.block_size = config.block_size
+ self.rope_cache = None
+
+
+@contextmanager
+def lora(r, alpha, dropout, enabled: bool = True):
+ """A context manager under which you can instantiate the model with LoRA."""
+ if not enabled:
+ yield
+ return
+
+ CausalSelfAttention.lora_config = LoRAConfig(r=r, alpha=alpha, dropout=dropout)
+
+ causal_self_attention = llama.CausalSelfAttention
+ llama.CausalSelfAttention = CausalSelfAttention
+ yield
+ llama.CausalSelfAttention = causal_self_attention
+
+ CausalSelfAttention.lora_config = None
diff --git a/lit_llama/model.py b/lit_llama/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ec2433bb3588591401aae4880c19f640a868e31
--- /dev/null
+++ b/lit_llama/model.py
@@ -0,0 +1,236 @@
+"""Full definition of a LLaMA Language Model, all of it in this single file.
+
+Based on the nanoGPT implementation: https://github.com/karpathy/nanoGPT.
+"""
+# mypy: ignore-errors
+import math
+from dataclasses import dataclass
+from typing import Optional
+
+import torch
+import torch.nn as nn
+from torch.nn import functional as F
+from typing_extensions import Self
+
+from lit_llama.utils import find_multiple
+
+
+@dataclass
+class LLaMAConfig:
+ block_size: int = 2048
+ vocab_size: int = 32000
+ padded_vocab_size: Optional[int] = None
+ n_layer: int = 32
+ n_head: int = 32
+ n_embd: int = 4096
+
+ def __post_init__(self):
+ if self.padded_vocab_size is None:
+ self.padded_vocab_size = find_multiple(self.vocab_size, 64)
+
+ @classmethod
+ def from_name(cls, name: str) -> Self:
+ return cls(**llama_configs[name])
+
+
+llama_configs = {
+ "7B": dict(n_layer=32, n_head=32, n_embd=4096),
+ "13B": dict(n_layer=40, n_head=40, n_embd=5120),
+ "30B": dict(n_layer=60, n_head=52, n_embd=6656),
+ "65B": dict(n_layer=80, n_head=64, n_embd=8192),
+}
+
+
+class LLaMA(nn.Module):
+ def __init__(self, config: LLaMAConfig) -> None:
+ super().__init__()
+ assert config.padded_vocab_size is not None
+ self.config = config
+
+ self.lm_head = nn.Linear(config.n_embd, config.padded_vocab_size, bias=False)
+ self.transformer = nn.ModuleDict(
+ dict(
+ wte=nn.Embedding(config.padded_vocab_size, config.n_embd),
+ h=nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
+ ln_f=RMSNorm(config.n_embd),
+ )
+ )
+
+ def _init_weights(self, module: nn.Module) -> None:
+ if isinstance(module, nn.Linear):
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02 / math.sqrt(2 * self.config.n_layer))
+ elif isinstance(module, nn.Embedding):
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02 / math.sqrt(2 * self.config.n_layer))
+
+ def forward(self, idx: torch.Tensor) -> torch.Tensor:
+ _, t = idx.size()
+ assert (
+ t <= self.config.block_size
+ ), f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
+
+ # forward the LLaMA model itself
+ x = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
+
+ for block in self.transformer.h:
+ x = block(x)
+ x = self.transformer.ln_f(x)
+
+ logits = self.lm_head(x) # (b, t, vocab_size)
+
+ return logits
+
+ @classmethod
+ def from_name(cls, name: str) -> Self:
+ return cls(LLaMAConfig.from_name(name))
+
+
+class Block(nn.Module):
+ def __init__(self, config: LLaMAConfig) -> None:
+ super().__init__()
+ self.rms_1 = RMSNorm(config.n_embd)
+ self.attn = CausalSelfAttention(config)
+ self.rms_2 = RMSNorm(config.n_embd)
+ self.mlp = MLP(config)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = x + self.attn(self.rms_1(x))
+ x = x + self.mlp(self.rms_2(x))
+ return x
+
+
+class CausalSelfAttention(nn.Module):
+ def __init__(self, config: LLaMAConfig) -> None:
+ super().__init__()
+ assert config.n_embd % config.n_head == 0
+
+ # key, query, value projections for all heads, but in a batch
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=False)
+ # output projection
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=False)
+
+ self.n_head = config.n_head
+ self.n_embd = config.n_embd
+ self.block_size = config.block_size
+ self.rope_cache: Optional[torch.Tensor] = None
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
+
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
+ q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
+
+ head_size = C // self.n_head
+ k = k.view(B, T, self.n_head, head_size).transpose(1, 2) # (B, nh, T, hs)
+ q = q.view(B, T, self.n_head, head_size).transpose(1, 2) # (B, nh, T, hs)
+ v = v.view(B, T, self.n_head, head_size).transpose(1, 2) # (B, nh, T, hs)
+
+ if self.rope_cache is None:
+ # cache for future forward calls
+ self.rope_cache = build_rope_cache(
+ seq_len=self.block_size,
+ n_elem=self.n_embd // self.n_head,
+ dtype=x.dtype,
+ device=x.device,
+ )
+
+ q = apply_rope(q, self.rope_cache)
+ k = apply_rope(k, self.rope_cache)
+
+ # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
+ # att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
+ # att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
+ # att = F.softmax(att, dim=-1)
+ # y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
+
+ # efficient attention using Flash Attention CUDA kernels
+ y = F.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=True)
+
+ y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
+
+ # output projection
+ y = self.c_proj(y)
+
+ return y
+
+
+class MLP(nn.Module):
+ def __init__(self, config: LLaMAConfig) -> None:
+ super().__init__()
+ hidden_dim = 4 * config.n_embd
+ n_hidden = int(2 * hidden_dim / 3)
+ n_hidden = find_multiple(n_hidden, 256)
+
+ self.c_fc1 = nn.Linear(config.n_embd, n_hidden, bias=False)
+ self.c_fc2 = nn.Linear(config.n_embd, n_hidden, bias=False)
+ self.c_proj = nn.Linear(n_hidden, config.n_embd, bias=False)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = F.silu(self.c_fc1(x)) * self.c_fc2(x)
+ x = self.c_proj(x)
+ return x
+
+
+class RMSNorm(nn.Module):
+ """Root Mean Square Layer Normalization.
+
+ Derived from https://github.com/bzhangGo/rmsnorm/blob/master/rmsnorm_torch.py. BSD 3-Clause License:
+ https://github.com/bzhangGo/rmsnorm/blob/master/LICENSE.
+ """
+
+ def __init__(self, size: int, dim: int = -1, eps: float = 1e-5) -> None:
+ super().__init__()
+ self.scale = nn.Parameter(torch.ones(size))
+ self.eps = eps
+ self.dim = dim
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ # NOTE: the original RMSNorm paper implementation is not equivalent
+ # norm_x = x.norm(2, dim=self.dim, keepdim=True)
+ # rms_x = norm_x * d_x ** (-1. / 2)
+ # x_normed = x / (rms_x + self.eps)
+ norm_x = torch.mean(x * x, dim=self.dim, keepdim=True)
+ x_normed = x * torch.rsqrt(norm_x + self.eps)
+ return self.scale * x_normed
+
+
+def build_rope_cache(seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000) -> torch.Tensor:
+ """Enhanced Transformer with Rotary Position Embedding.
+
+ Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/
+ transformers/rope/__init__.py. MIT License:
+ https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license.
+ """
+ # $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
+ theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=dtype, device=device) / n_elem))
+
+ # Create position indexes `[0, 1, ..., seq_len - 1]`
+ seq_idx = torch.arange(seq_len, dtype=dtype, device=device)
+
+ # Calculate the product of position index and $\theta_i$
+ idx_theta = torch.outer(seq_idx, theta).float()
+
+ cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1)
+
+ # this is to mimic the behaviour of complex32, else we will get different results
+ if dtype in (torch.float16, torch.bfloat16, torch.int8):
+ cache = cache.half()
+ return cache
+
+
+def apply_rope(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor:
+ x = x.transpose(1, 2)
+
+ # truncate to support variable sizes
+ T = x.size(1)
+ rope_cache = rope_cache[:T]
+
+ # cast because the reference does
+ xshaped = x.float().reshape(*x.shape[:-1], -1, 2)
+ rope_cache = rope_cache.view(1, xshaped.size(1), 1, xshaped.size(3), 2)
+ x_out2 = torch.stack(
+ [xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1],
+ xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1],
+ ], -1)
+
+ x_out2 = x_out2.flatten(3)
+ return x_out2.transpose(1, 2).type_as(x)
diff --git a/lit_llama/packed_dataset.py b/lit_llama/packed_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..909df1d0d4d80a8a6a3bff8dd0c8988d9cb96a73
--- /dev/null
+++ b/lit_llama/packed_dataset.py
@@ -0,0 +1,258 @@
+# Very loosely inspired by indexed_dataset in Fairseq, Megatron
+# https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/data/indexed_dataset.py
+
+
+import os
+import struct
+import random
+
+import numpy as np
+import torch
+from torch.utils.data import IterableDataset, get_worker_info
+
+
+dtypes = {
+ 1: np.uint8,
+ 2: np.int8,
+ 3: np.int16,
+ 4: np.int32,
+ 5: np.int64,
+ 6: np.float32,
+ 7: np.float64,
+ 8: np.uint16,
+}
+
+
+def code(dtype):
+ for k in dtypes.keys():
+ if dtypes[k] == dtype:
+ return k
+ raise ValueError(dtype)
+
+
+HDR_MAGIC = b"LITPKDS"
+HDR_SIZE = 24 # bytes
+
+
+class PackedDataset(IterableDataset):
+ def __init__(self, filenames, n_chunks, block_size, seed=12345, shuffle=True, wrap=False, num_processes=1, process_rank=0):
+ self._filenames = filenames
+ self._n_chunks = n_chunks
+ self._block_size = block_size
+ self._seed = seed
+ self._shuffle = shuffle
+ self._wrap = wrap
+ self._num_processes = num_processes
+ self._process_rank = process_rank
+
+ def __iter__(self):
+ worker_info = get_worker_info()
+ num_workers = worker_info.num_workers if worker_info is not None else 1
+ worker_id = worker_info.id if worker_info is not None else 0
+ num_shards = num_workers * self._num_processes
+ shard_id = self._process_rank * num_workers + worker_id
+
+ max_num_files = len(self._filenames) // num_shards * num_shards
+ filenames = self._filenames[shard_id : max_num_files : num_shards]
+
+ return PackedDatasetIterator(
+ filenames=filenames,
+ n_chunks=self._n_chunks,
+ block_size=self._block_size,
+ seed=self._seed,
+ shuffle=self._shuffle,
+ wrap=self._wrap,
+ )
+
+
+class PackedDatasetBuilder(object):
+ def __init__(
+ self,
+ outdir,
+ prefix,
+ chunk_size,
+ sep_token,
+ dtype="auto",
+ vocab_size=None,
+ ):
+ if dtype == "auto":
+ if vocab_size is None:
+ raise ValueError("vocab_size cannot be None when dtype='auto'")
+ if vocab_size is not None and vocab_size < 65500:
+ self._dtype = np.uint16
+ else:
+ self._dtype = np.int32
+ else:
+ self._dtype = dtype
+ self._counter = 0
+ self._chunk_size = chunk_size
+ self._outdir = outdir
+ self._prefix = prefix
+ self._sep_token = sep_token
+ self._arr = np.zeros(self._chunk_size, dtype=self._dtype)
+ self._arr.fill(self._sep_token)
+ self._idx = 0
+ self._version = 1
+ self._filenames = []
+
+ def _write_chunk(self):
+ filename = f"{self._prefix}_{self._counter:010d}.bin"
+ filename = os.path.join(self._outdir, filename)
+
+ with open(filename, "wb") as f:
+ f.write(HDR_MAGIC)
+ f.write(struct.pack(" self._chunk_size:
+ part_len = self._chunk_size - self._idx
+ self._arr[self._idx : self._idx + part_len] = arr[:part_len]
+ self._write_chunk()
+ arr = arr[part_len:]
+
+ arr_len = arr.shape[0]
+ self._arr[self._idx : self._idx + arr_len] = arr
+ self._idx += arr_len
+
+ def write_reminder(self):
+ self._write_chunk()
+
+
+class PackedDatasetIterator:
+ def __init__(self, filenames, n_chunks, block_size, seed, shuffle, wrap):
+ self._seed = seed
+ self._shuffle = shuffle
+ self._rng = np.random.default_rng(seed) if shuffle else None
+ self._block_idxs = None
+
+ self._wrap = wrap
+
+ # TODO: instead of filenames, we could have a single text stream
+ # (or text file) with the sequence of all files to be
+ # fetched/loaded.
+ self._filenames = filenames
+ self._file_idx = 0
+
+ self._n_chunks = n_chunks
+
+ self._dtype = None
+ self._block_size = block_size
+ self._n_blocks = None
+
+ self._mmaps = []
+ self._buffers = []
+
+ self._block_idxs = []
+ self._curr_idx = 0
+
+ self._load_n_chunks()
+
+ def _read_header(self, path):
+ with open(path, "rb") as f:
+ magic = f.read(len(HDR_MAGIC))
+ assert magic == HDR_MAGIC, "File doesn't match expected format."
+ version = struct.unpack(" len(self._filenames[self._file_idx :]):
+ if not self._wrap:
+ raise StopIteration
+ else:
+ self._file_idx = 0
+
+ for i in range(self._n_chunks):
+ filename = self._filenames[self._file_idx + i]
+ if self._dtype is None:
+ self._dtype, self._chunk_size = self._read_header(
+ filename
+ )
+ self._n_blocks = self._chunk_size // self._block_size
+ # TODO: check header matches with previous files
+ mmap = np.memmap(filename, mode="r", order="C", offset=HDR_SIZE)
+ self._mmaps.append(mmap)
+ self._buffers.append(memoryview(mmap))
+
+ self._file_idx += self._n_chunks
+ n_all_blocks = self._n_chunks * self._n_blocks
+
+ self._block_idxs = (
+ self._rng.permutation(n_all_blocks)
+ if self._shuffle
+ else range(n_all_blocks)
+ )
+
+ self._curr_idx = 0
+
+ def __del__(self):
+ self._close_mmaps()
+ del self._mmaps
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self._curr_idx >= len(self._block_idxs):
+ self._load_n_chunks()
+ # TODO: trigger fetching next next n_chunks if remote
+ block_idx = self._block_idxs[self._curr_idx]
+ chunk_id = block_idx // self._n_blocks
+ buffer = self._buffers[chunk_id]
+ elem_id = (block_idx % self._n_blocks) * self._block_size
+ offset = np.dtype(self._dtype).itemsize * elem_id
+ arr = np.frombuffer(
+ buffer, dtype=self._dtype, count=self._block_size, offset=offset
+ )
+ self._curr_idx += 1
+ return torch.from_numpy(arr.astype(np.int64))
+
+
+class CombinedDataset(IterableDataset):
+ def __init__(self, datasets, seed, weights=None):
+ self._seed = seed
+ self._datasets = datasets
+ self._weights = weights
+ n_datasets = len(datasets)
+ if weights is None:
+ self._weights = [1 / n_datasets] * n_datasets
+
+ def __iter__(self):
+ return CombinedDatasetIterator(self._datasets, self._seed, self._weights)
+
+
+class CombinedDatasetIterator:
+ def __init__(self, datasets, seed, weights):
+ self._datasets = [iter(el) for el in datasets]
+ self._weights = weights
+ self._rng = random.Random(seed)
+
+ def __next__(self):
+ dataset, = self._rng.choices(self._datasets, weights=self._weights, k=1)
+ return next(dataset)
+
diff --git a/lit_llama/quantization.py b/lit_llama/quantization.py
new file mode 100644
index 0000000000000000000000000000000000000000..668a39f47083537bd254fcb08d79e5199ae318bb
--- /dev/null
+++ b/lit_llama/quantization.py
@@ -0,0 +1,614 @@
+import os
+from contextlib import contextmanager
+import warnings
+import math
+
+import torch
+
+# configuration for bitsandbytes before import
+os.environ["BITSANDBYTES_NOWELCOME"] = "1"
+warnings.filterwarnings(
+ "ignore",
+ message="MatMul8bitLt: inputs will be cast from torch.float32 to float16 during quantization",
+)
+warnings.filterwarnings(
+ "ignore",
+ message="MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization",
+)
+warnings.filterwarnings(
+ "ignore",
+ message="The installed version of bitsandbytes was compiled without GPU support. 8-bit optimizers and GPU quantization are unavailable.",
+)
+
+try:
+ import bitsandbytes as bnb # noqa: E402
+except:
+ bnb = None
+
+try:
+ import triton # noqa: E402
+ import triton.language as tl # noqa: E402
+except:
+ triton = None
+
+if bnb is not None:
+
+ class Linear8bitLt(bnb.nn.Linear8bitLt):
+ """Wraps `bnb.nn.Linear8bitLt` and enables instantiation directly on the device and
+ re-quantizaton when loading the state dict.
+
+
+ This should only be used for inference. For training, use `bnb.nn.Linear8bitLt` directly.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs, has_fp16_weights=False, threshold=6.0)
+ # We quantize the initial weight here so we don't end up filling the device
+ # memory with float32 weights which could lead to OOM.
+ self._quantize_weight(self.weight.data)
+
+ def _load_from_state_dict(self, local_state_dict, *args, **kwargs):
+ # There is only one key that ends with `*.weight`, the other one is the bias
+ weight_key = next(
+ (name for name in local_state_dict.keys() if name.endswith("weight")),
+ None,
+ )
+ if weight_key is None:
+ return
+
+ # Load the weight from the state dict and re-quantize it
+ weight = local_state_dict.pop(weight_key)
+ self._quantize_weight(weight)
+
+ # If there is a bias, let nn.Module load it
+ if local_state_dict:
+ super()._load_from_state_dict(local_state_dict, *args, **kwargs)
+
+ def _quantize_weight(self, weight: torch.Tensor) -> None:
+ # This code is taken and adapted from `bnb.nn.Int8Params.cuda()`
+ B = weight.contiguous().half().cuda()
+ CB, CBt, SCB, SCBt, coo_tensorB = bnb.functional.double_quant(B)
+ del CBt
+ del SCBt
+ self.weight.data = CB
+ setattr(self.weight, "CB", CB)
+ setattr(self.weight, "SCB", SCB)
+
+
+if triton is not None:
+ # This is adapted from the OpenAI Triton matmul example.
+ @triton.autotune(
+ configs=[
+ triton.Config(
+ {
+ "BLOCK_SIZE_M": 128,
+ "BLOCK_SIZE_N": 256,
+ "BLOCK_SIZE_K": 32,
+ "GROUP_SIZE_M": 8,
+ },
+ num_stages=3,
+ num_warps=8,
+ ),
+ triton.Config(
+ {
+ "BLOCK_SIZE_M": 256,
+ "BLOCK_SIZE_N": 128,
+ "BLOCK_SIZE_K": 32,
+ "GROUP_SIZE_M": 8,
+ },
+ num_stages=3,
+ num_warps=8,
+ ),
+ triton.Config(
+ {
+ "BLOCK_SIZE_M": 256,
+ "BLOCK_SIZE_N": 64,
+ "BLOCK_SIZE_K": 32,
+ "GROUP_SIZE_M": 8,
+ },
+ num_stages=4,
+ num_warps=4,
+ ),
+ triton.Config(
+ {
+ "BLOCK_SIZE_M": 64,
+ "BLOCK_SIZE_N": 256,
+ "BLOCK_SIZE_K": 32,
+ "GROUP_SIZE_M": 8,
+ },
+ num_stages=4,
+ num_warps=4,
+ ),
+ triton.Config(
+ {
+ "BLOCK_SIZE_M": 128,
+ "BLOCK_SIZE_N": 128,
+ "BLOCK_SIZE_K": 32,
+ "GROUP_SIZE_M": 8,
+ },
+ num_stages=4,
+ num_warps=4,
+ ),
+ triton.Config(
+ {
+ "BLOCK_SIZE_M": 128,
+ "BLOCK_SIZE_N": 64,
+ "BLOCK_SIZE_K": 32,
+ "GROUP_SIZE_M": 8,
+ },
+ num_stages=4,
+ num_warps=4,
+ ),
+ triton.Config(
+ {
+ "BLOCK_SIZE_M": 64,
+ "BLOCK_SIZE_N": 128,
+ "BLOCK_SIZE_K": 32,
+ "GROUP_SIZE_M": 8,
+ },
+ num_stages=4,
+ num_warps=4,
+ ),
+ triton.Config(
+ {
+ "BLOCK_SIZE_M": 128,
+ "BLOCK_SIZE_N": 32,
+ "BLOCK_SIZE_K": 32,
+ "GROUP_SIZE_M": 8,
+ },
+ num_stages=4,
+ num_warps=4,
+ ),
+ triton.Config(
+ {
+ "BLOCK_SIZE_M": 64,
+ "BLOCK_SIZE_N": 32,
+ "BLOCK_SIZE_K": 32,
+ "GROUP_SIZE_M": 8,
+ },
+ num_stages=5,
+ num_warps=2,
+ ),
+ triton.Config(
+ {
+ "BLOCK_SIZE_M": 32,
+ "BLOCK_SIZE_N": 64,
+ "BLOCK_SIZE_K": 32,
+ "GROUP_SIZE_M": 8,
+ },
+ num_stages=5,
+ num_warps=2,
+ ),
+ ],
+ key=["M", "N", "K"],
+ )
+ @triton.jit
+ def linear_kernel_4bit_weight(
+ # Pointers to matrices
+ a_ptr,
+ b_ptr,
+ c_ptr,
+ bscales_ptr,
+ bzeros_ptr,
+ # bdequant,
+ # Matrix dimensions
+ M,
+ N,
+ K,
+ # The stride variables represent how much to increase the ptr by when moving by 1
+ # element in a particular dimension. E.g. stride_am is how much to increase a_ptr
+ # by to get the element one row down (A has M rows)
+ stride_am,
+ stride_ak,
+ stride_bk,
+ stride_bn,
+ stride_cm,
+ stride_cn,
+ # Meta-parameters
+ BLOCK_SIZE_M: tl.constexpr,
+ BLOCK_SIZE_N: tl.constexpr,
+ BLOCK_SIZE_K: tl.constexpr,
+ GROUP_SIZE_M: tl.constexpr,
+ ):
+ """Kernel for computing the matmul C = A x B.T.
+ A has shape (M, K), B has shape (N, K) and C has shape (M, N)
+ """
+ # -----------------------------------------------------------
+ # Map program ids `pid` to the block of C it should compute.
+ # This is done in a grouped ordering to promote L2 data reuse
+ # See above `L2 Cache Optimizations` section for details
+ pid = tl.program_id(axis=0)
+ num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
+ num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
+ num_pid_in_group = GROUP_SIZE_M * num_pid_n
+ group_id = pid // num_pid_in_group
+ first_pid_m = group_id * GROUP_SIZE_M
+ group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
+ pid_m = first_pid_m + (pid % group_size_m)
+ pid_n = (pid % num_pid_in_group) // group_size_m
+
+ # ----------------------------------------------------------
+ # Create pointers for the first blocks of A and B.
+ # We will advance this pointer as we move in the K direction
+ # and accumulate
+ # a_ptrs is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
+ # b_ptrs is a block of [BLOCK_SIZE_K, BLOCK_SIZE_n] pointers
+ # see above `Pointer Arithmetics` section for details
+ offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
+ offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
+ a_mask = offs_am[:, None] < M
+ b_mask = offs_bn[None, :] < N
+ offs_k = tl.arange(0, BLOCK_SIZE_K)
+ a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
+ b_ptrs = b_ptr + (
+ (offs_k[:, None] // 2) * stride_bk + offs_bn[None, :] * stride_bn
+ )
+
+ bscales_ptrs = bscales_ptr + offs_bn[None, :]
+ bzeros_ptrs = bzeros_ptr + offs_bn[None, :]
+
+ scale = tl.load(bscales_ptrs)
+ zero = tl.load(bzeros_ptrs)
+ # -----------------------------------------------------------
+ # Iterate to compute a block of the C matrix
+ # We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
+ # of fp32 values for higher accuracy.
+ # `accumulator` will be converted back to fp16 after the loop
+ accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
+ for k in range(0, K, BLOCK_SIZE_K):
+ # wasteful as it is to load everything twice, my attempts at avoiding it lead to slower code
+ b12 = tl.load(b_ptrs, mask=b_mask)
+ # Note that for simplicity, we don't apply a mask in K here.
+ a = tl.load(a_ptrs, mask=a_mask).to(tl.float32)
+ b = (
+ ((b12.to(tl.uint8) >> ((offs_k[:, None] % 2) * 4)) & 0xF).to(tl.float32)
+ - zero
+ ) * scale
+ accumulator += tl.dot(a, b)
+
+ # Advance the ptrs to the next K block
+ a_ptrs += BLOCK_SIZE_K * stride_ak
+ b_ptrs += (BLOCK_SIZE_K // 2) * stride_bk
+ c = accumulator
+
+ # -----------------------------------------------------------
+ # Write back the block of the output matrix C
+ offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
+ offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
+ c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
+ c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
+ tl.store(c_ptrs, c, mask=c_mask)
+
+ def qlinear_4bit_weight(inp, weight, scales, zeros):
+ weight = weight.t().contiguous()
+ c_shape = inp.shape[:-1] + weight.shape[-1:]
+ inp = inp.reshape(-1, inp.shape[-1]).contiguous()
+ # we pad the input to amortize triton compilation cost better
+ PAD_TO = 256
+ if inp.shape[0] % PAD_TO != 0:
+ c_crop = inp.shape[0]
+ new_inp_shape0 = inp.shape[0] + PAD_TO - inp.shape[0] % PAD_TO
+ inp2 = inp.new_empty((new_inp_shape0, inp.shape[1]))
+ inp2[: inp.shape[0]] = inp
+ inp2[inp.shape[0] :].zero_()
+ inp = inp2
+ else:
+ c_crop = None
+
+ assert inp.shape[1] == weight.shape[0] * 2, "incompatible dimensions"
+
+ assert scales.shape == (weight.shape[1], 1)
+ assert zeros.shape == (weight.shape[1], 1)
+ scales = scales.contiguous()
+ zeros = zeros.contiguous()
+ K, N = weight.shape
+ M, K = inp.shape
+ assert (
+ K % 32 == 0
+ ), "We don't check memory-out-of-bounds with K so K must be divisible by BLOCK_SIZE_K"
+ # allocates output
+ c = torch.empty((M, N), device=inp.device, dtype=inp.dtype)
+ # 1D launch kernel where each block gets its own program.
+ grid = lambda META: (
+ triton.cdiv(M, META["BLOCK_SIZE_M"]) * triton.cdiv(N, META["BLOCK_SIZE_N"]),
+ )
+ linear_kernel_4bit_weight[grid](
+ inp,
+ weight,
+ c,
+ scales,
+ zeros,
+ M,
+ N,
+ K,
+ inp.stride(0),
+ inp.stride(1),
+ weight.stride(0),
+ weight.stride(1),
+ c.stride(0),
+ c.stride(1),
+ )
+ return c[:c_crop].reshape(c_shape)
+
+else:
+ qlinear_4bit_weight = None
+
+
+# for correctness but with terrible perf
+class ColBlockQuantizedLinear(torch.nn.Module):
+ def __init__(self, in_features, out_features, bias: bool, *, bits, tile_cols):
+ super().__init__()
+ self.in_features = in_features
+ self.out_features = out_features
+ self.tile_cols = tile_cols if tile_cols != -1 else self.in_features
+ self.bits = bits
+ self.entries_per_byte = 8 // bits
+ assert self.entries_per_byte > 0 and self.entries_per_byte * self.bits == 8
+ assert in_features % self.entries_per_byte == 0
+ self.register_buffer(
+ "quant_weight",
+ torch.empty(
+ (self.out_features, self.in_features // self.entries_per_byte),
+ dtype=torch.uint8,
+ )
+ .t()
+ .contiguous()
+ .t(),
+ )
+ self.register_buffer(
+ "scales",
+ torch.empty(
+ (
+ self.out_features,
+ (self.in_features + self.tile_cols - 1) // self.tile_cols,
+ )
+ ),
+ )
+ self.register_buffer("zeros", torch.empty_like(self.scales))
+ assert isinstance(bias, bool)
+ if bias:
+ self.register_buffer("bias", torch.empty((self.out_features,)))
+ else:
+ self.register_buffer("bias", None)
+
+ def pack_weight(self, weight):
+ weight = weight.to(device=self.quant_weight.device, copy=True)
+ for j in range(self.scales.size(1)):
+ weight[:, j * self.tile_cols : (j + 1) * self.tile_cols] /= self.scales[
+ :, j : j + 1
+ ]
+ weight[:, j * self.tile_cols : (j + 1) * self.tile_cols] += self.zeros[
+ :, j : j + 1
+ ]
+ weight = weight.clamp_(min=0, max=2**self.bits - 1).to(dtype=torch.uint8)
+ self.quant_weight.zero_()
+ for nr in range(self.entries_per_byte):
+ self.quant_weight += weight[:, nr :: self.entries_per_byte] << (
+ nr * self.bits
+ )
+
+ def get_weight(self, dtype=torch.float):
+ weight = torch.empty(
+ (self.out_features, self.in_features),
+ device=self.quant_weight.device,
+ dtype=dtype,
+ )
+ mask = (1 << self.bits) - 1
+ for nr in range(self.entries_per_byte):
+ weight[:, nr :: self.entries_per_byte] = (
+ (self.quant_weight >> (nr * self.bits)) & mask
+ ).float()
+ self.quant_weight.to(dtype)
+ for j in range(self.scales.size(1)):
+ weight[:, j * self.tile_cols : (j + 1) * self.tile_cols] -= self.zeros[
+ :, j : j + 1
+ ]
+ weight[:, j * self.tile_cols : (j + 1) * self.tile_cols] *= self.scales[
+ :, j : j + 1
+ ]
+ return weight
+
+ def forward(self, inp):
+ if (
+ triton is not None
+ and self.bits == 4
+ and self.quant_weight.device.type == "cuda"
+ and self.zeros.shape[1] == 1
+ and self.quant_weight.shape[1] % 32 == 0
+ ):
+ return qlinear_4bit_weight(inp, self.quant_weight, self.scales, self.zeros)
+ weight = self.get_weight(dtype=inp.dtype)
+ return torch.nn.functional.linear(inp, weight, self.bias)
+
+
+class GPTQQuantizer:
+ # The algorithm and code has been taken from https://github.com/IST-DASLab/gptq/
+ # E. Frantar et al GPTQ: Accurate Post-training Compression for GPT, arXiv:2210.17323
+ # portions copyright by the authors licensed under the Apache License 2.0
+ # All errors are our own.
+
+ def __init__(
+ self,
+ linear_module,
+ *,
+ bits,
+ perchannel=True,
+ sym=False,
+ blocksize=128,
+ percdamp=0.01,
+ groupsize=-1,
+ actorder=False
+ ):
+ assert isinstance(linear_module, torch.nn.Linear)
+
+ self.linear_module = linear_module
+ self.dev = self.linear_module.weight.device
+ self.rows = linear_module.weight.shape[0]
+ self.columns = linear_module.weight.shape[1]
+ self.H = torch.zeros((self.columns, self.columns), device=self.dev)
+ self.nsamples = 0
+ self.bits = bits
+ self.maxq = 2**bits - 1
+ self.perchannel = perchannel
+ self.sym = sym
+ self.blocksize = blocksize
+ self.percdamp = percdamp
+ self.groupsize = groupsize
+ self.actorder = actorder
+ self.tile_cols = self.columns if groupsize == -1 else groupsize
+ self.scales = torch.zeros(
+ (self.rows, (self.columns + self.tile_cols - 1) // self.tile_cols),
+ dtype=self.linear_module.weight.dtype,
+ device=self.dev,
+ )
+ self.zeros = torch.zeros_like(self.scales)
+ assert not (
+ self.actorder and self.groupsize != -1
+ ), "The permutation trick does not work for grouped quantization"
+
+ @staticmethod
+ def quantize_weight(x, scale, zero, maxq):
+ q = torch.clamp(torch.round(x / scale) + zero, 0, maxq)
+ x_rec = scale * (q - zero)
+ return x_rec
+
+ def find_params_weight(self, x):
+ dev = x.device
+
+ shape = x.shape
+ if self.perchannel:
+ x = x.flatten(1)
+ else:
+ x = x.flatten().unsqueeze(0)
+
+ tmp = torch.zeros(x.shape[0], device=dev)
+ xmin = torch.minimum(x.min(1)[0], tmp)
+ xmax = torch.maximum(x.max(1)[0], tmp)
+
+ if self.sym:
+ xmax = torch.maximum(torch.abs(xmin), xmax)
+ tmp = xmin < 0
+ if torch.any(tmp):
+ xmin[tmp] = -xmax[tmp]
+ tmp = (xmin == 0) & (xmax == 0)
+ xmin[tmp] = -1
+ xmax[tmp] = +1
+
+ scale = (xmax - xmin) / self.maxq
+ if self.sym:
+ zero = torch.full_like(scale, (self.maxq + 1) / 2)
+ else:
+ zero = torch.round(-xmin / scale)
+
+ if not self.perchannel:
+ tmp = shape[0]
+ scale = scale.repeat(tmp)
+ zero = zero.repeat(tmp)
+
+ shape = [-1] + [1] * (len(shape) - 1)
+ scale = scale.reshape(shape)
+ zero = zero.reshape(shape)
+ return scale, zero
+
+ def collect_input_stats(self, _1, inp, _2):
+ inp = inp[0].detach()
+ self.last_inp = inp
+ if len(inp.shape) == 2:
+ inp = inp.unsqueeze(0)
+ tmp = inp.shape[0]
+ if len(inp.shape) == 3:
+ inp = inp.reshape((-1, inp.shape[-1]))
+ inp = inp.t()
+ self.H *= self.nsamples / (self.nsamples + tmp)
+ self.nsamples += tmp
+ # inp = inp.float()
+ inp = math.sqrt(2 / self.nsamples) * inp.float()
+ # self.H += 2 / self.nsamples * inp.matmul(inp.t())
+ self.H += inp.matmul(inp.t())
+
+ def quantize(self):
+ W = self.linear_module.weight.detach().to(dtype=torch.float, copy=True)
+
+ scale, zero = self.find_params_weight(W)
+ self.scales[:] = scale
+ self.zeros[:] = zero
+
+ H = self.H
+ del self.H
+ dead = torch.diag(H) == 0
+ H[dead, dead] = 1
+ W[:, dead] = 0
+ if self.actorder:
+ perm = torch.argsort(torch.diag(H), descending=True)
+ W = W[:, perm]
+ H = H[perm][:, perm]
+
+ Losses = torch.zeros_like(W)
+ Q = torch.zeros_like(W)
+
+ damp = self.percdamp * torch.mean(torch.diag(H))
+ diag = torch.arange(self.columns, device=self.dev)
+ H[diag, diag] += damp
+ H = torch.linalg.cholesky(H)
+ H = torch.cholesky_inverse(H)
+ H = torch.linalg.cholesky(H, upper=True)
+ Hinv = H
+
+ for i1 in range(0, self.columns, self.blocksize):
+ i2 = min(i1 + self.blocksize, self.columns)
+ count = i2 - i1
+
+ W1 = W[:, i1:i2].clone()
+ Q1 = torch.zeros_like(W1)
+ Err1 = torch.zeros_like(W1)
+ Losses1 = torch.zeros_like(W1)
+ Hinv1 = Hinv[i1:i2, i1:i2]
+
+ for i in range(count):
+ w = W1[:, i]
+ d = Hinv1[i, i]
+
+ if self.groupsize != -1:
+ if (i1 + i) % self.groupsize == 0:
+ scale, zero = self.find_params_weight(
+ W[:, (i1 + i) : (i1 + i + self.groupsize)]
+ )
+ self.scales[:, (i1 + i) // self.groupsize] = scale
+ self.zeros[:, (i1 + i) // self.groupsize] = zeros
+
+ q = self.quantize_weight(w.unsqueeze(1), scale, zero, self.maxq)
+ q = q.squeeze(1)
+ assert q.dim() == 1
+ Q1[:, i] = q
+ Losses1[:, i] = (w - q) ** 2 / d**2
+
+ err1 = (w - q) / d
+ W1[:, i:] -= err1.unsqueeze(1).matmul(Hinv1[i, i:].unsqueeze(0))
+ Err1[:, i] = err1
+
+ Q[:, i1:i2] = Q1
+ Losses[:, i1:i2] = Losses1 / 2
+
+ W[:, i2:] -= Err1.matmul(Hinv[i1:i2, i2:])
+
+ if self.actorder:
+ invperm = torch.argsort(perm)
+ Q = Q[:, invperm]
+
+ weight = Q.reshape(self.linear_module.weight.shape).to(
+ self.linear_module.weight.data.dtype
+ )
+ error = torch.sum(Losses).item()
+
+ q_module = ColBlockQuantizedLinear(
+ self.linear_module.in_features,
+ self.linear_module.out_features,
+ self.linear_module.bias is not None,
+ bits=self.bits,
+ tile_cols=self.groupsize,
+ ).to(self.dev)
+ q_module.scales = self.scales
+ q_module.zeros = self.zeros
+ q_module.pack_weight(weight)
+ q_module.bias = self.linear_module.bias
+ return q_module, error
diff --git a/lit_llama/tokenizer.py b/lit_llama/tokenizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb681e3f51e697902cd3cb0bdcadee4ac3306f2d
--- /dev/null
+++ b/lit_llama/tokenizer.py
@@ -0,0 +1,49 @@
+import os
+from pathlib import Path
+from typing import Optional
+
+import torch
+from sentencepiece import SentencePieceProcessor, SentencePieceTrainer
+
+
+class Tokenizer:
+ """Tokenizer for LLaMA."""
+
+ def __init__(self, model_path: Path) -> None:
+ self.processor = SentencePieceProcessor(model_file=str(model_path))
+ self.bos_id = self.processor.bos_id()
+ self.eos_id = self.processor.eos_id()
+ self.pad_id = self.processor.pad_id()
+
+ @property
+ def vocab_size(self) -> int:
+ return self.processor.vocab_size()
+
+ def encode(
+ self,
+ string: str,
+ bos: bool = True,
+ eos: bool = False,
+ max_length: int = -1,
+ pad: bool = False,
+ device: Optional[torch.device] = None
+ ) -> torch.Tensor:
+ tokens = self.processor.encode(string)
+ if bos:
+ tokens = [self.bos_id] + tokens
+ if eos:
+ tokens = tokens + [self.eos_id]
+ if max_length > 0:
+ tokens = tokens[:max_length]
+ if pad and len(tokens) < max_length:
+ tokens += [self.pad_id] * (max_length - len(tokens))
+
+ return torch.tensor(tokens, dtype=torch.int, device=device)
+
+ def decode(self, tokens: torch.Tensor) -> str:
+ return self.processor.decode(tokens.tolist())
+
+ @staticmethod
+ def train(input: str, destination: str, vocab_size=32000) -> None:
+ model_prefix = os.path.join(destination, "tokenizer")
+ SentencePieceTrainer.Train(input=input, model_prefix=model_prefix, vocab_size=vocab_size)
diff --git a/lit_llama/utils.py b/lit_llama/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c78a9bc59a385cacb8ed46e525a355117467493
--- /dev/null
+++ b/lit_llama/utils.py
@@ -0,0 +1,316 @@
+"""Utility functions for training and inference."""
+
+import functools
+from pathlib import Path
+import pickle
+import warnings
+from io import BytesIO
+
+import torch
+import torch.utils._device
+from lightning.fabric.strategies import DeepSpeedStrategy, FSDPStrategy
+from torch.distributed.fsdp import FullStateDictConfig
+from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
+from torch.distributed.fsdp import StateDictType
+
+
+llama_model_sizes = {
+ 4096: "7B", # 7B n_embd=4096
+ 5120: "13B", # 13B n_embd=5120
+ 6656: "30B", # 30B n_embd=6656
+ 8192: "65B", # 65B n_embd=8192
+}
+
+
+def llama_model_lookup(checkpoint: dict) -> str:
+ """Returns the LLaMA model name from the checkpoint.
+
+ Checks the width of the lm_head.weight matrix, as these uniquely identify the model.
+ """
+ embedding_size = checkpoint["lm_head.weight"].shape[1]
+ return llama_model_sizes[embedding_size]
+
+
+def find_multiple(n: int, k: int) -> int:
+ if n % k == 0:
+ return n
+ return n + k - (n % k)
+
+
+def save_model_checkpoint(fabric, model, file_path):
+ """Handles boilerplate logic for retrieving and saving the state_dict.
+
+ This will be upstreamed to Fabric soon.
+ """
+ file_path = Path(file_path)
+
+ if isinstance(fabric.strategy, DeepSpeedStrategy):
+ from deepspeed.utils.zero_to_fp32 import convert_zero_checkpoint_to_fp32_state_dict
+
+ fabric.save(file_path, {"model": model})
+ fabric.barrier()
+ if fabric.global_rank == 0:
+ # Create a consolidated checkpoint with the same name next to the deepspeed checkpoint
+ convert_zero_checkpoint_to_fp32_state_dict(file_path, file_path.with_suffix(".pth"))
+ return
+
+ if isinstance(fabric.strategy, FSDPStrategy):
+ save_policy = FullStateDictConfig(offload_to_cpu=(fabric.world_size > 1), rank0_only=True)
+ with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, save_policy):
+ state_dict = model._forward_module.state_dict()
+ else:
+ state_dict = model.state_dict()
+
+ if fabric.global_rank == 0:
+ torch.save(state_dict, file_path)
+ fabric.barrier()
+
+
+class EmptyInitOnDevice(torch.overrides.TorchFunctionMode):
+ def __init__(self, device=None, dtype=None, quantization_mode=None):
+ """
+ Create tensors with given device and dtype and don't run initialization
+ (but instead use "empty tensors", i.e. uninitialized memory).
+
+ device: `torch.device` to work with
+ dtype: `torch.dtype` to work with
+ quantization_mode: optional string, quantization mode to work with, default `None`.
+ Available modes: `llm.int8` bitsnbytes LLM.int8 quantization (only on GPU)
+ `qptq.int4`, `gptq.int8`: GPTQ pre-quantized models
+
+ Example::
+ with EmptyInitOnDevice("cuda", dtype=torch.bfloat16):
+ model = LLaMA.from_name('7B')
+ model.load_state_dict(torch.load('llama-lit/7B/lit-llama.pth'))"""
+
+ self.quantization_mode = quantization_mode
+ self.quantized_linear_cls = None
+ if self.quantization_mode == 'llm.int8':
+ if device.type != "cuda":
+ raise ValueError("Quantization is only supported on the GPU.")
+ from .quantization import Linear8bitLt
+ self.quantized_linear_cls = Linear8bitLt
+ elif self.quantization_mode == 'gptq.int4':
+ from .quantization import ColBlockQuantizedLinear
+ self.quantized_linear_cls = functools.partial(ColBlockQuantizedLinear, bits=4, tile_cols=-1)
+ elif self.quantization_mode == 'gptq.int8':
+ from .quantization import ColBlockQuantizedLinear
+ self.quantized_linear_cls = functools.partial(ColBlockQuantizedLinear, bits=8, tile_cols=-1)
+ elif self.quantization_mode is not None:
+ raise RuntimeError(f"unknown quantization mode {self.quantization_mode}")
+ self.device = device
+ self.dtype = dtype
+
+ def __enter__(self):
+ if self.quantized_linear_cls != None:
+ self.torch_linear_cls = torch.nn.Linear
+ torch.nn.Linear = self.quantized_linear_cls
+ return super().__enter__()
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if self.quantized_linear_cls != None:
+ torch.nn.Linear = self.torch_linear_cls
+ return super().__exit__(exc_type, exc_val, exc_tb)
+
+ def __torch_function__(self, func, types, args=(), kwargs=None):
+ kwargs = kwargs or {}
+ if getattr(func, "__module__", None) == "torch.nn.init":
+ if "tensor" in kwargs:
+ return kwargs["tensor"]
+ else:
+ return args[0]
+ if (
+ self.device is not None
+ and func in torch.utils._device._device_constructors()
+ and kwargs.get("device") is None
+ ):
+ kwargs["device"] = self.device
+ if (
+ self.dtype is not None
+ and func in torch.utils._device._device_constructors()
+ and kwargs.get("dtype") is None
+ ):
+ kwargs["dtype"] = self.dtype
+ return func(*args, **kwargs)
+
+
+# this is taken from torchhacks https://github.com/lernapparat/torchhacks
+
+
+class NotYetLoadedTensor:
+ def __init__(self, metatensor, archiveinfo, storageinfo, rebuild_args):
+ self.metatensor = metatensor
+ self.archiveinfo = archiveinfo
+ self.storageinfo = storageinfo
+ self.rebuild_args = rebuild_args
+
+ @classmethod
+ def rebuild_from_type_v2(cls, func, new_type, args, state, *, archiveinfo=None):
+ ret = func(*args)
+ if isinstance(ret, NotYetLoadedTensor):
+ old_lt = ret._load_tensor
+
+ def _load_tensor():
+ t = old_lt()
+ return torch._tensor._rebuild_from_type_v2(
+ lambda: t, new_type, (), state
+ )
+
+ ret._load_tensor = _load_tensor
+ return ret
+ return torch._tensor._rebuild_from_type_v2(func, new_type, args, state)
+
+ @classmethod
+ def rebuild_parameter(
+ cls, data, requires_grad, backward_hooks, *, archiveinfo=None
+ ):
+ if isinstance(data, NotYetLoadedTensor):
+ old_lt = data._load_tensor
+
+ def _load_tensor():
+ t = old_lt()
+ return torch._utils._rebuild_parameter(t, requires_grad, backward_hooks)
+
+ data._load_tensor = _load_tensor
+ return data
+ return torch._utils._rebuild_parameter(data, requires_grad, backward_hooks)
+
+ @classmethod
+ def rebuild_tensor_v2(
+ cls,
+ storage,
+ storage_offset,
+ size,
+ stride,
+ requires_grad,
+ backward_hooks,
+ metadata=None,
+ *,
+ archiveinfo=None,
+ ):
+ rebuild_args = (
+ storage_offset,
+ size,
+ stride,
+ requires_grad,
+ backward_hooks,
+ metadata,
+ )
+ metatensor = torch._utils._rebuild_tensor_v2(
+ storage,
+ storage_offset,
+ size,
+ stride,
+ requires_grad,
+ backward_hooks,
+ metadata,
+ )
+ storageinfo = storage.archiveinfo
+ return NotYetLoadedTensor(metatensor, archiveinfo, storageinfo, rebuild_args)
+
+ def _load_tensor(self):
+ name, storage_cls, fn, device, size = self.storageinfo
+ dtype = self.metatensor.dtype
+
+ uts = (
+ self.archiveinfo.zipfile_context.zf.get_storage_from_record(
+ f"data/{fn}",
+ size * torch._utils._element_size(dtype),
+ torch.UntypedStorage,
+ )
+ ._typed_storage()
+ ._untyped_storage
+ )
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ storage = torch.storage.TypedStorage(
+ wrap_storage=uts, dtype=self.metatensor.dtype, _internal=True
+ )
+ tensor = torch._utils._rebuild_tensor_v2(storage, *self.rebuild_args)
+ return tensor
+
+ @classmethod
+ def __torch_function__(cls, func, types, args=(), kwargs=None):
+ if kwargs is None:
+ kwargs = {}
+ loaded_args = [
+ (a._load_tensor() if isinstance(a, NotYetLoadedTensor) else a) for a in args
+ ]
+ res = func(*loaded_args, **kwargs)
+ # gc.collect would be costly here, maybe do it optionally
+ return res
+
+ def __getattr__(self, name):
+ # properties
+ ## TODO: device, is_...??
+ ## TODO: mH, mT, H, T, data, imag, real
+ ## name ???
+ if name in {
+ "dtype",
+ "grad",
+ "grad_fn",
+ "layout",
+ "names",
+ "ndim",
+ "output_nr",
+ "requires_grad",
+ "retains_grad",
+ "shape",
+ "volatile",
+ }:
+ return getattr(self.metatensor, name)
+ if name in {"size"}:
+ return getattr(self.metatensor, name)
+ # materializing with contiguous is needed for quantization
+ if name in {"contiguous"}:
+ return getattr(self._load_tensor(), name)
+
+ raise AttributeError(f"{type(self)} does not have {name}")
+
+ def __repr__(self):
+ return f"NotYetLoadedTensor({repr(self.metatensor)})"
+
+
+class LazyLoadingUnpickler(pickle.Unpickler):
+ def __init__(self, file, zipfile_context):
+ super().__init__(file)
+ self.zipfile_context = zipfile_context
+
+ def find_class(self, module, name):
+ res = super().find_class(module, name)
+ if module == "torch._utils" and name == "_rebuild_tensor_v2":
+ return functools.partial(
+ NotYetLoadedTensor.rebuild_tensor_v2, archiveinfo=self
+ )
+ elif module == "torch._tensor" and name == "_rebuild_from_type_v2":
+ return functools.partial(
+ NotYetLoadedTensor.rebuild_from_type_v2, archiveinfo=self
+ )
+ elif module == "torch._utils" and name == "_rebuild_parameter":
+ return functools.partial(
+ NotYetLoadedTensor.rebuild_parameter, archiveinfo=self
+ )
+ return res
+
+ def persistent_load(self, pid):
+ name, cls, fn, device, size = pid
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ s = torch.storage.TypedStorage(dtype=cls().dtype, device="meta")
+ s.archiveinfo = pid
+ return s
+
+
+class lazy_load:
+ def __init__(self, fn):
+ self.zf = torch._C.PyTorchFileReader(str(fn))
+ with BytesIO(self.zf.get_record("data.pkl")) as pkl:
+ mup = LazyLoadingUnpickler(pkl, self)
+ self.sd = mup.load()
+
+ def __enter__(self):
+ return self.sd
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ del self.zf # I don't think there is a way to force closing...
+ self.zf = None
diff --git a/prepare.sh b/prepare.sh
new file mode 100644
index 0000000000000000000000000000000000000000..6b43a17924d5cdeda0ed3018ea1783fcaa2ad12b
--- /dev/null
+++ b/prepare.sh
@@ -0,0 +1,27 @@
+!# /bin/bash
+git clone https://github.com/Lightning-AI/lit-llama
+cd lit-llama
+pip install -r requirements.txt
+sudo apt-get install linux-headers-$(uname -r)
+# sudo apt-key del 7fa2af80
+# wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin
+# sudo mv cuda-ubuntu2004.pin /etc/apt/preferences.d/cuda-repository-pin-600
+# wget https://developer.download.nvidia.com/compute/cuda/12.1.1/local_installers/cuda-repo-ubuntu2004-12-1-local_12.1.1-530.30.02-1_amd64.deb
+# sudo dpkg -i cuda-repo-ubuntu2004-12-1-local_12.1.1-530.30.02-1_amd64.deb
+# sudo cp /var/cuda-repo-ubuntu2004-12-1-local/cuda-*-keyring.gpg /usr/share/keyrings/
+# sudo apt-get update
+# sudo apt-get -y install cuda
+# sudo apt-get install zlib1g
+
+wget 'https://www.nvidia.com/content/DriverDownloads/confirmation.php?url=/XFree86/Linux-x86_64/470.182.03/NVIDIA-Linux-x86_64-470.182.03.run&lang=us&type=TITAN'
+sudo sh ./NVIDIA-Linux-x86_64-470.182.03.run
+wget 'http://developer.download.nvidia.com/compute/cuda/11.0.2/local_installers/cuda_11.0.2_450.51.05_linux.run'
+sudo sh cuda_11.0.2_450.51.05_linux.run
+
+wget 'https://developer.download.nvidia.com/compute/machine-learning/cudnn/secure/8.0.2.39/11.0_20200724/cudnn-11.0-linux-x64-v8.0.2.39.tgz?EPRBj3t55swQVnJiFnH4gQ3dNCqs3Xb3cJ4SBngE0OSMK4OGqBZdYVocwne3qqgP5H-GA62hpATuiLL-6AI-cKQjBobbYiUCs3uZqGux62j4EwE8AsxdTjP0yCLxfnasliLnUBMQ76ojrYkvUBNYccmKS5maJp4W3UjGCa19EJV9ibVJZXedTZ_Yfgp9Mv95_mZoL3rD5q9O2Qy0GvI=&t=eyJscyI6ImdzZW8iLCJsc2QiOiJodHRwczovL3d3dy5nb29nbGUuY29tLyJ9'
+tar -xzvf cudnn-11.0-linux-x64-v8.0.2.39.tgz
+sudo cp cuda/include/cudnn*.h /usr/local/cuda/include
+sudo cp cuda/lib64/libcudnn* /usr/local/cuda/lib64
+sudo chmod a+r /usr/local/cuda/include/cudnn*.h /usr/local/cuda/lib64/libcudnn*
+
+#sudo reboot
\ No newline at end of file
diff --git a/quantize.py b/quantize.py
new file mode 100644
index 0000000000000000000000000000000000000000..b98048cb2caab9611344123ba9e7b508acae49e1
--- /dev/null
+++ b/quantize.py
@@ -0,0 +1,229 @@
+# This adapts GPTQ's quantization process: https://github.com/IST-DASLab/gptq/
+# E. Frantar et al GPTQ: Accurate Post-training Compression for GPT, arXiv:2210.17323
+# portions copyright by the authors licensed under the Apache License 2.0
+import gc
+import sys
+import time
+from pathlib import Path
+from typing import Optional
+
+import torch
+from datasets import load_dataset
+
+from lit_llama import LLaMA, Tokenizer
+from lit_llama.quantization import GPTQQuantizer
+from lit_llama.utils import EmptyInitOnDevice, llama_model_lookup
+
+
+def get_sample_data():
+ traindata = load_dataset(
+ "allenai/c4",
+ "allenai--c4",
+ data_files={"train": "en/c4-train.00000-of-01024.json.gz"},
+ split="train",
+ )
+ # heuristic for the data size?
+ txt = "\n".join(
+ traindata[i]["text"] for i in torch.randperm(len(traindata))[:1000].tolist()
+ )
+ return txt
+
+
+@torch.no_grad()
+def llama_blockwise_quantization(
+ model, sample_inputs, working_device, *, bits=4, groupsize=-1
+):
+ # This is the classic post-training quantization
+ # of all linear layers. We quantize in order, i.e.
+ # when observing the inputs, we use the outputs
+ # of the previously quantized layers rather than
+ # doing them all at once.
+
+ print("Getting inputs for first block")
+ print(model)
+ print(model.config)
+
+ model.transformer.wte.to(working_device)
+ inps = []
+ for batch in sample_inputs:
+ inps.append(model.transformer.wte(batch[None].to(working_device)))
+ inps = torch.cat(inps, dim=0)
+ model.transformer.wte.to("cpu")
+ torch.cuda.empty_cache()
+
+ print("Starting to quantize blocks")
+ outs = torch.zeros_like(inps)
+
+ # better than relying on enumeration? originally the code bundled
+ # the two mlp fc layers
+ # we could automate this with a lot of hooks and another iteration
+ submodules_to_process = [
+ "attn.c_attn",
+ "attn.c_proj",
+ "mlp.c_fc1",
+ "mlp.c_fc2",
+ "mlp.c_proj",
+ ]
+
+ for i, block in enumerate(model.transformer.h):
+ block.to(working_device)
+
+ for name in submodules_to_process:
+ print(i, name, end=" ")
+ t0 = time.perf_counter()
+ print("collecting stats", end=" ")
+ sys.stdout.flush()
+ module = block.get_submodule(name)
+
+ gptq = GPTQQuantizer(
+ module,
+ bits=bits,
+ groupsize=groupsize,
+ actorder=(groupsize == -1),
+ )
+ handle = module.register_forward_hook(gptq.collect_input_stats)
+ for j in range(inps.size(0)):
+ outs[j : j + 1] = block(inps[j : j + 1])
+
+ handle.remove()
+
+ print("quantizing", end=" ")
+ sys.stdout.flush()
+ q_module, error = gptq.quantize()
+
+ # replace the linear module with the quantized module
+ pname, dname = name.rsplit(".", 1)
+ setattr(block.get_submodule(pname), dname, q_module)
+
+ # cleanup in an attempt to not run out of memory
+ del gptq
+ gc.collect()
+ torch.cuda.empty_cache()
+ t1 = time.perf_counter()
+ print(f"time {int(t1 - t0 + 0.5)}s quantization error {error:.1f}")
+
+ for j in range(inps.size(0)):
+ outs[j : j + 1] = block(inps[j : j + 1])
+
+ block.cpu()
+ gc.collect()
+ torch.cuda.empty_cache()
+
+ # the outputs are the next block's inputs and we'll reuse the old inputs
+ inps, outs = outs, inps
+
+ model.transformer.ln_f.to(working_device)
+ for j in range(inps.size(0)):
+ outs[j : j + 1] = model.transformer.ln_f(inps[j : j + 1])
+ model.transformer.ln_f.to("cpu")
+ inps, outs = outs, inps
+
+ model.lm_head.to(working_device)
+ gptq = GPTQQuantizer(
+ model.lm_head,
+ bits=bits,
+ groupsize=groupsize,
+ actorder=(groupsize == -1),
+ )
+ handle = model.lm_head.register_forward_hook(gptq.collect_input_stats)
+ for j in range(inps.size(0)):
+ model.lm_head(inps[j : j + 1])
+ handle.remove()
+ q_module, error = gptq.quantize()
+ model.lm_head = q_module
+ model.lm_head.to("cpu")
+
+
+def main(
+ *,
+ checkpoint_path: Optional[Path] = None,
+ output_path: Optional[Path] = None,
+ tokenizer_path: Optional[Path] = None,
+ n_samples: int = 128,
+ dtype: str = "float32",
+ quantize: Optional[str] = None,
+) -> None:
+ """Generates text samples based on a pre-trained LLaMA model and tokenizer.
+
+ Args:
+ # compile: Whether to compile the model.
+ checkpoint_path: The checkpoint path to load.
+ output_path: Path to write the quantized model's state dict to.
+ tokenizer_path: The tokenizer path to load.
+ n_samples: Number of example inputs to use for statistics (default: 128)
+ dtype: The dtype to use to load the model.
+ quantize: Mode to quantize the model to:
+ ``"gptq.int4"``: GPTQ 4-bit mode.
+ Note that ``"llm.int8"```does not need a quantization step.
+ """
+ if not checkpoint_path:
+ checkpoint_path = Path(f"./checkpoints/lit-llama/7B/lit-llama.pth")
+ if not tokenizer_path:
+ tokenizer_path = Path("./checkpoints/lit-llama/tokenizer.model")
+ assert checkpoint_path.is_file()
+ assert tokenizer_path.is_file()
+ assert output_path.parent.is_dir() and (
+ not output_path.exists() or output_path.is_file()
+ )
+
+ device = "cuda"
+
+ dt = getattr(torch, dtype, None)
+ if not isinstance(dt, torch.dtype):
+ raise ValueError(f"{dtype} is not a valid dtype.")
+ dtype = dt
+
+ if quantize == "gptq.int4":
+ bits = 4
+ elif quantize == "gptq.int8":
+ bits = 8
+ else:
+ raise RuntimeError(f"unknown/unsupported quantization mode {quantize}")
+
+ # we avoid loading the entire model on the GPU and do this block by block
+ with EmptyInitOnDevice(
+ device="cpu",
+ dtype=dtype,
+ ):
+ print("Loading model ...", file=sys.stderr)
+ t0 = time.time()
+ checkpoint = torch.load(checkpoint_path)
+ name = llama_model_lookup(checkpoint)
+ model = LLaMA.from_name(name)
+ model.load_state_dict(checkpoint)
+ print(f"Time to load model: {time.time() - t0:.02f} seconds.", file=sys.stderr)
+
+ model.eval()
+
+ tokenizer = Tokenizer(tokenizer_path)
+
+ test_string = get_sample_data()
+ encoded_text = tokenizer.encode(
+ test_string,
+ bos=True,
+ eos=False,
+ )
+ block_size = 2048 # this is for compat with gptq, and indeed we get much worse beyond this (https://github.com/facebookresearch/llama/blob/57b0eb62de0636e75af471e49e2f1862d908d9d8/llama/model.py#L30)
+ encoded_text = encoded_text[: n_samples * block_size].reshape(n_samples, block_size)
+ t0 = time.perf_counter()
+
+ llama_blockwise_quantization(model, encoded_text, device, bits=bits)
+
+ torch.save(model.state_dict(), output_path)
+
+ t = time.perf_counter() - t0
+ print(
+ f"\n\nTime for quantization: {t:.02f} sec total",
+ file=sys.stderr,
+ )
+ print(
+ f"Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB",
+ file=sys.stderr,
+ )
+
+
+if __name__ == "__main__":
+ from jsonargparse import CLI
+
+ torch.set_float32_matmul_precision("high")
+ CLI(main)
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e8706b998430472e53186da1d281d11e6e755589
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,9 @@
+torch>=2.0.0
+lightning @ git+https://github.com/Lightning-AI/lightning@master
+sentencepiece
+tqdm # convert_checkpoint.py
+numpy # train.py dataset memmap
+jsonargparse[signatures] # generate.py, convert_checkpoint.py CLI
+bitsandbytes # quantization.py
+datasets # evaluate.py
+zstandard # prepare_redpajama.py
diff --git a/scripts/__pycache__/prepare_alpaca.cpython-311.pyc b/scripts/__pycache__/prepare_alpaca.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a4d8a4c67761c91214e4b47e6728ef2dad71691f
Binary files /dev/null and b/scripts/__pycache__/prepare_alpaca.cpython-311.pyc differ
diff --git a/scripts/convert_checkpoint.py b/scripts/convert_checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..a10c9585425f319d5d1d8bf02ca9d177c9cfcb2d
--- /dev/null
+++ b/scripts/convert_checkpoint.py
@@ -0,0 +1,141 @@
+import gc
+import shutil
+from pathlib import Path
+from typing import Dict
+
+import torch
+from tqdm import tqdm
+
+"""
+Sample usage:
+
+```bash
+python -m scripts.convert_checkpoint -h
+
+python -m scripts.convert_checkpoint converted
+```
+"""
+
+
+def convert_state_dict(state_dict: Dict[str, torch.Tensor], dtype: torch.dtype = torch.float32) -> Dict[str, torch.Tensor]:
+ converted = {}
+ converted["transformer.wte.weight"] = state_dict["tok_embeddings.weight"].to(dtype)
+ converted["lm_head.weight"] = state_dict["output.weight"].to(dtype)
+ converted["transformer.ln_f.scale"] = state_dict["norm.weight"].to(dtype)
+
+ for layer_idx in sorted(set([k.split(".")[1] for k in state_dict if k.startswith("layers")])):
+ # attention
+ # the wq, wk, wv from the FB model are stacked in our model as c_attn
+ converted[f"transformer.h.{layer_idx}.attn.c_attn.weight"] = torch.cat(
+ (
+ state_dict[f"layers.{layer_idx}.attention.wq.weight"].to(dtype),
+ state_dict[f"layers.{layer_idx}.attention.wk.weight"].to(dtype),
+ state_dict[f"layers.{layer_idx}.attention.wv.weight"].to(dtype),
+ )
+ )
+ converted[f"transformer.h.{layer_idx}.attn.c_proj.weight"] = state_dict[
+ f"layers.{layer_idx}.attention.wo.weight"
+ ].to(dtype)
+ # mlp
+ converted[f"transformer.h.{layer_idx}.mlp.c_fc1.weight"] = state_dict[
+ f"layers.{layer_idx}.feed_forward.w1.weight"
+ ].to(dtype)
+ converted[f"transformer.h.{layer_idx}.mlp.c_proj.weight"] = state_dict[
+ f"layers.{layer_idx}.feed_forward.w2.weight"
+ ].to(dtype)
+ converted[f"transformer.h.{layer_idx}.mlp.c_fc2.weight"] = state_dict[
+ f"layers.{layer_idx}.feed_forward.w3.weight"
+ ].to(dtype)
+ # rms norm
+ converted[f"transformer.h.{layer_idx}.rms_1.scale"] = state_dict[f"layers.{layer_idx}.attention_norm.weight"].to(dtype)
+ converted[f"transformer.h.{layer_idx}.rms_2.scale"] = state_dict[f"layers.{layer_idx}.ffn_norm.weight"].to(dtype)
+ return converted
+
+
+shard_dims = {
+ "lm_head.weight": 0,
+ "wte.weight": 1,
+ "attn.c_attn.weight": 0,
+ "attn.c_proj.weight": 1,
+ "mlp.c_fc1.weight": 0,
+ "mlp.c_fc2.weight": 0,
+ "mlp.c_proj.weight": 1
+}
+
+
+def meta_weights_for_nano_model(
+ *,
+ output_dir: Path = Path("checkpoints/lit-llama"),
+ ckpt_dir: Path = Path("checkpoints/llama/"),
+ model_size: str = "7B",
+ dtype: str = "float32",
+) -> None:
+ output_dir = output_dir / model_size
+ ckpt_dir = ckpt_dir / model_size
+ output_dir.mkdir(parents=True, exist_ok=True)
+
+ # the tokenizer is the same for all model sizes, so we store it in the parent dir
+ shutil.copy(ckpt_dir.parent / "tokenizer.model", output_dir.parent)
+
+ dt = getattr(torch, dtype, None)
+ if not isinstance(dt, torch.dtype):
+ raise ValueError(f"{dtype} is not a valid dtype.")
+ dtype = dt
+
+ checkpoint_files = sorted(ckpt_dir.glob("*.pth"))
+ checkpoint_files.sort()
+ n_checkpoints = len(checkpoint_files)
+
+ if n_checkpoints == 0:
+ raise RuntimeError(f"No checkpoints were found at ckpt_dir {ckpt_dir}. `consolidated.0*.pth` files expected at that location.")
+
+ # for the bigger models, there are multiple model-parallel checkpoints
+ # and we combine them into one single file
+ combined = None
+ for file in tqdm(checkpoint_files, total=n_checkpoints):
+ checkpoint = torch.load(file, map_location="cpu")
+ converted = convert_state_dict(checkpoint, dtype=dtype)
+ if combined is None:
+ combined = converted
+ continue
+ for name, param in converted.items():
+ dim = None
+ for k, d in shard_dims.items():
+ if k in name:
+ dim = d
+ break
+ if dim is None:
+ # Extra check: assert that tensors are the same if not sharded
+ # assert torch.allclose(combined[name], param)
+ continue
+ combined[name] = torch.cat((combined[name], param), dim=dim)
+
+ del checkpoint
+ del converted
+ gc.collect()
+
+ for name, param in combined.items():
+ if "c_attn" not in name:
+ continue
+
+ # Turn [Q1, K1, V1, Q2, K2, V2, ...] into [Q1, Q2, ..., K1, K2, .., V1, V2, ...]
+
+ src_chunk_len = param.shape[0] // n_checkpoints
+ mat_len = src_chunk_len // 3
+ dst_chunk_len = mat_len * n_checkpoints
+ attn = torch.clone(param)
+ for i in range(n_checkpoints):
+ for j in range(3):
+ param[j * dst_chunk_len + i * mat_len: j * dst_chunk_len + (i+1) * mat_len] = \
+ attn[i * src_chunk_len + j * mat_len: i * src_chunk_len + (j+1) * mat_len]
+
+ del attn
+ gc.collect()
+
+ torch.save(combined, output_dir / "lit-llama.pth")
+
+
+if __name__ == "__main__":
+ from jsonargparse import CLI
+
+ CLI(meta_weights_for_nano_model)
diff --git a/scripts/convert_hf_checkpoint.py b/scripts/convert_hf_checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ee26239a9ed8624757ed88877f0f60f167ec369
--- /dev/null
+++ b/scripts/convert_hf_checkpoint.py
@@ -0,0 +1,137 @@
+import gc
+import json
+import shutil
+import sys
+from pathlib import Path
+
+import torch
+
+# support running without installing as a package
+wd = Path(__file__).parent.parent.resolve()
+sys.path.append(str(wd))
+
+from lit_llama.model import LLaMA, LLaMAConfig
+from lit_llama.utils import EmptyInitOnDevice
+
+
+@torch.no_grad()
+def convert_hf_checkpoint(
+ *,
+ output_dir: Path = Path("checkpoints/lit-llama/7B"),
+ checkpoint_dir: Path = Path("checkpoints/hf-llama/7B"),
+ model_size: str = "7B",
+ dtype: str = "float32",
+ verify: bool = False,
+) -> None:
+ """
+ Perform the reverse operation of: https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py
+ """
+ output_dir.mkdir(parents=True, exist_ok=True)
+
+ # the tokenizer is the same for all model sizes, so we store it in the parent dir
+ shutil.copy(checkpoint_dir / "tokenizer.model", output_dir.parent)
+
+ dt = getattr(torch, dtype, None)
+ if not isinstance(dt, torch.dtype):
+ raise ValueError(f"{dtype} is not a valid dtype.")
+ dtype = dt
+
+ print("Initializing lit-llama")
+ config = LLaMAConfig.from_name(model_size)
+
+ with EmptyInitOnDevice(device="cpu", dtype=dtype):
+ model = LLaMA(config)
+
+ qkv_size = model.transformer.h[0].attn.c_attn.weight.shape[0] // 3
+
+ # initialize a new empty state dict to hold our new weights
+ sd = model.state_dict()
+
+ # Load the json file containing weight mapping
+ pytorch_bin_map_json_path = checkpoint_dir / "pytorch_model.bin.index.json"
+ with open(pytorch_bin_map_json_path) as json_map:
+ bin_index = json.load(json_map)
+
+ bin_files = set(el for el in bin_index["weight_map"].values())
+
+ def permute(w):
+ dim = config.n_embd
+ return (
+ w.view(config.n_head, 2, dim // config.n_head // 2, dim)
+ .transpose(1, 2)
+ .reshape(dim, dim)
+ )
+
+ weight_map = {
+ "self_attn.o_proj.weight": "attn.c_proj.weight",
+ "self_attn.q_proj.weight": "attn.c_attn.weight",
+ "self_attn.k_proj.weight": "attn.c_attn.weight",
+ "self_attn.v_proj.weight": "attn.c_attn.weight",
+ "mlp.gate_proj.weight": "mlp.c_fc1.weight",
+ "mlp.up_proj.weight": "mlp.c_fc2.weight",
+ "mlp.down_proj.weight": "mlp.c_proj.weight",
+ "input_layernorm.weight": "rms_1.scale",
+ "post_attention_layernorm.weight": "rms_2.scale",
+ "model.embed_tokens.weight": "transformer.wte.weight",
+ "model.norm.weight": "transformer.ln_f.scale",
+ "lm_head.weight": "lm_head.weight"
+ }
+
+ for bin_file in bin_files:
+ print("Processing", bin_file)
+
+ hf_weights = torch.load(checkpoint_dir / bin_file, map_location="cpu")
+
+ for name, param in hf_weights.items():
+ param = param.to(dtype=dtype)
+ if "rotary_emb.inv_freq" in name:
+ continue
+ if "model.layers" in name:
+ block_id = int(name.split(".")[2])
+ from_name = ".".join(name.split(".")[3:])
+ to_name = weight_map[from_name]
+
+ if "q_proj" in name:
+ sd[f"transformer.h.{block_id}.{to_name}"][:qkv_size] = permute(param)
+ elif "k_proj" in name:
+ sd[f"transformer.h.{block_id}.{to_name}"][qkv_size:-qkv_size] = permute(param)
+ elif "v_proj" in name:
+ sd[f"transformer.h.{block_id}.{to_name}"][-qkv_size:] = param
+ else:
+ sd[f"transformer.h.{block_id}.{to_name}"].copy_(param)
+ else:
+ sd[weight_map[name]].copy_(param)
+
+ del hf_weights
+ gc.collect()
+
+ print(f"Saving to disk at {output_dir}")
+ torch.save(model.state_dict(), output_dir / "lit-llama.pth")
+
+ if verify:
+ try:
+ from transformers import LlamaForCausalLM
+ except ImportError:
+ raise ImportError("verify=True requires transformers to be installed, please `pip install transformers`")
+ print("Verifying...")
+
+ token_sample = torch.randint(0, config.vocab_size, size=(1, config.block_size), dtype=torch.int64)
+ out = model(token_sample)
+ del model
+ gc.collect()
+
+ print("Loading original model for comparison")
+ model_hf = LlamaForCausalLM.from_pretrained(checkpoint_dir)
+ out_hf = model_hf(token_sample)["logits"]
+
+ print("Comparing outputs")
+ assert out.device.type == out_hf.device.type
+ assert out.dtype == out_hf.dtype
+ assert torch.testing.assert_close(out, out_hf)
+
+
+if __name__ == "__main__":
+ from jsonargparse import CLI
+
+ CLI(convert_hf_checkpoint)
+
diff --git a/scripts/download.py b/scripts/download.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4dd39e56a82d16c7975215bd5ebe69a34e25617
--- /dev/null
+++ b/scripts/download.py
@@ -0,0 +1,34 @@
+import os
+from typing import Optional
+from urllib.request import urlretrieve
+
+files = {
+ "original_model.py": "https://gist.githubusercontent.com/lantiga/fd36849fb1c498da949a0af635318a7b/raw/7dd20f51c2a1ff2886387f0e25c1750a485a08e1/llama_model.py",
+ "original_adapter.py": "https://gist.githubusercontent.com/awaelchli/546f33fcdb84cc9f1b661ca1ca18418d/raw/e81d8f35fb1fec53af1099349b0c455fc8c9fb01/original_adapter.py",
+}
+
+
+def download_original(wd: str) -> None:
+ for file, url in files.items():
+ filepath = os.path.join(wd, file)
+ if not os.path.isfile(filepath):
+ print(f"Downloading original implementation to {filepath!r}")
+ urlretrieve(url=url, filename=file)
+ print("Done")
+ else:
+ print("Original implementation found. Skipping download.")
+
+
+def download_from_hub(repo_id: Optional[str] = None, local_dir: str = "checkpoints/hf-llama/7B") -> None:
+ if repo_id is None:
+ raise ValueError("Please pass `--repo_id=...`. You can try googling 'huggingface hub llama' for options.")
+
+ from huggingface_hub import snapshot_download
+
+ snapshot_download(repo_id, local_dir=local_dir)
+
+
+if __name__ == "__main__":
+ from jsonargparse import CLI
+
+ CLI(download_from_hub)
diff --git a/scripts/prepare_alpaca.py b/scripts/prepare_alpaca.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a93227883eca14a0356be18e739861ec904d9f8
--- /dev/null
+++ b/scripts/prepare_alpaca.py
@@ -0,0 +1,136 @@
+"""Implementation derived from https://github.com/tloen/alpaca-lora"""
+import sys
+from pathlib import Path
+
+# support running without installing as a package
+wd = Path(__file__).parent.parent.resolve()
+sys.path.append(str(wd))
+
+import torch
+import requests
+import json
+from torch.utils.data import random_split
+from lit_llama.tokenizer import Tokenizer
+from tqdm import tqdm
+
+
+DATA_FILE = "https://raw.githubusercontent.com/tloen/alpaca-lora/main/alpaca_data_cleaned_archive.json"
+DATA_FILE_NAME = "alpaca_data_cleaned_archive.json"
+IGNORE_INDEX = -1
+
+
+def prepare(
+ destination_path: Path = Path("data/alpaca"),
+ tokenizer_path: Path = Path("checkpoints/lit-llama/tokenizer.model"),
+ #test_split_size: int = 2000,
+ test_split_size: int = 2,
+ max_seq_length: int = 256,
+ seed: int = 42,
+ mask_inputs: bool = False, # as in alpaca-lora
+ data_file_name: str = DATA_FILE_NAME
+) -> None:
+ """Prepare the Alpaca dataset for instruction tuning.
+
+ The output is a training and validation dataset saved as `train.pt` and `val.pt`,
+ which stores the preprocessed and tokenized prompts and labels.
+ """
+
+ destination_path.mkdir(parents=True, exist_ok=True)
+ file_path = destination_path / data_file_name
+ download(file_path)
+
+ # TODO: If we don't have the Meta weights, where do we get the tokenizer from?
+ tokenizer = Tokenizer(tokenizer_path)
+ print(file_path)
+ #file = open(file_path, 'r', encoding='utf-8')
+ with open(file_path, 'r', encoding='utf-8') as file:
+ data = json.load(file)
+ #data = json.load(file)
+
+ # filedata = file.read()
+ #print(filedata)
+
+ # Partition the dataset into train and test
+ train_split_size = len(data) - test_split_size
+ train_set, test_set = random_split(
+ data,
+ lengths=(train_split_size, test_split_size),
+ generator=torch.Generator().manual_seed(seed),
+ )
+ train_set, test_set = list(train_set), list(test_set)
+
+ print(f"train has {len(train_set):,} samples")
+ print(f"val has {len(test_set):,} samples")
+
+ print("Processing train split ...")
+ train_set = [prepare_sample(sample, tokenizer, max_seq_length, mask_inputs) for sample in tqdm(train_set)]
+ torch.save(train_set, file_path.parent / "train.pt")
+
+ print("Processing test split ...")
+ test_set = [prepare_sample(sample, tokenizer, max_seq_length, mask_inputs) for sample in tqdm(test_set)]
+ torch.save(test_set, file_path.parent / "test.pt")
+
+
+def download(file_path: Path):
+ """Downloads the raw json data file and saves it in the given destination."""
+ if file_path.exists():
+ return
+ with open(file_path, "w") as f:
+ f.write(requests.get(DATA_FILE).text)
+
+
+def prepare_sample(example: dict, tokenizer: Tokenizer, max_length: int, mask_inputs: bool = True):
+ """Processes a single sample.
+
+ Each sample in the dataset consists of:
+ - instruction: A string describing the task
+ - input: A string holding a special input value for the instruction.
+ This only applies to some samples, and in others this is empty.
+ - output: The response string
+
+ This function processes this data to produce a prompt text and a label for
+ supervised training. The prompt text is formed as a single message including both
+ the instruction and the input. The label/target is the same message but with the
+ response attached.
+
+ Finally, both the prompt and the label get tokenized. If desired, all tokens
+ in the label that correspond to the original input prompt get masked out (default).
+ """
+ full_prompt = generate_prompt(example)
+ full_prompt_and_response = full_prompt + example["output"]
+ encoded_full_prompt = tokenize(tokenizer, full_prompt, max_length=max_length, eos=False)
+ encoded_full_prompt_and_response = tokenize(tokenizer, full_prompt_and_response, eos=True, max_length=max_length)
+
+ # The labels are the full prompt with response, but with the prompt masked out
+ labels = encoded_full_prompt_and_response.clone()
+ if mask_inputs:
+ labels[:len(encoded_full_prompt)] = IGNORE_INDEX
+
+ return {**example, "input_ids": encoded_full_prompt_and_response, "input_ids_no_response": encoded_full_prompt, "labels": labels}
+
+
+def tokenize(tokenizer: Tokenizer, string: str, max_length: int, eos=True) -> torch.Tensor:
+ return tokenizer.encode(string, bos=True, eos=eos, max_length=max_length)
+
+
+def generate_prompt(example):
+ """Generates a standardized message to prompt the model with an instruction, optional input and a
+ 'response' field."""
+
+ if example["input"]:
+ return (
+ "Below is an instruction that describes a task, paired with an input that provides further context. "
+ "Write a response that appropriately completes the request.\n\n"
+ f"### Instruction:\n{example['instruction']}\n\n### Input:\n{example['input']}\n\n### Response:"
+ )
+ return (
+ "Below is an instruction that describes a task. "
+ "Write a response that appropriately completes the request.\n\n"
+ f"### Instruction:\n{example['instruction']}\n\n### Response:"
+ )
+
+
+if __name__ == "__main__":
+ from jsonargparse import CLI
+
+ CLI(prepare)
diff --git a/scripts/prepare_apaca2.py b/scripts/prepare_apaca2.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c76260d29f402e69fe31312719b0bdfc630d7f5
--- /dev/null
+++ b/scripts/prepare_apaca2.py
@@ -0,0 +1,132 @@
+"""Implementation derived from https://github.com/tloen/alpaca-lora"""
+import sys
+from pathlib import Path
+
+# support running without installing as a package
+wd = Path(__file__).parent.parent.resolve()
+sys.path.append(str(wd))
+
+import torch
+import requests
+import json
+from torch.utils.data import random_split
+from lit_llama.tokenizer import Tokenizer
+from tqdm import tqdm
+
+
+DATA_FILE = "https://raw.githubusercontent.com/tloen/alpaca-lora/main/alpaca_data_cleaned_archive.json"
+DATA_FILE_NAME = "alpaca_data_cleaned_archive.json"
+IGNORE_INDEX = -1
+
+
+def prepare(
+ destination_path: Path = Path("data/alpaca"),
+ tokenizer_path: Path = Path("checkpoints/lit-llama/tokenizer.model"),
+ test_split_size: int = 2000,
+ max_seq_length: int = 256,
+ seed: int = 42,
+ mask_inputs: bool = False, # as in alpaca-lora
+ data_file_name: str = DATA_FILE_NAME
+) -> None:
+ """Prepare the Alpaca dataset for instruction tuning.
+
+ The output is a training and validation dataset saved as `train.pt` and `val.pt`,
+ which stores the preprocessed and tokenized prompts and labels.
+ """
+
+ destination_path.mkdir(parents=True, exist_ok=True)
+ file_path = destination_path / data_file_name
+ download(file_path)
+
+ # TODO: If we don't have the Meta weights, where do we get the tokenizer from?
+ tokenizer = Tokenizer(tokenizer_path)
+
+ with open(file_path, "r") as file:
+ filedata = file.read()
+ print(filedata)
+ data = json.load(file)
+
+ # Partition the dataset into train and test
+ train_split_size = len(data) - test_split_size
+ train_set, test_set = random_split(
+ data,
+ lengths=(train_split_size, test_split_size),
+ generator=torch.Generator().manual_seed(seed),
+ )
+ train_set, test_set = list(train_set), list(test_set)
+
+ print(f"train has {len(train_set):,} samples")
+ print(f"val has {len(test_set):,} samples")
+
+ print("Processing train split ...")
+ train_set = [prepare_sample(sample, tokenizer, max_seq_length, mask_inputs) for sample in tqdm(train_set)]
+ torch.save(train_set, file_path.parent / "train.pt")
+
+ print("Processing test split ...")
+ test_set = [prepare_sample(sample, tokenizer, max_seq_length, mask_inputs) for sample in tqdm(test_set)]
+ torch.save(test_set, file_path.parent / "test.pt")
+
+
+def download(file_path: Path):
+ """Downloads the raw json data file and saves it in the given destination."""
+ if file_path.exists():
+ return
+ with open(file_path, "w") as f:
+ f.write(requests.get(DATA_FILE).text)
+
+
+def prepare_sample(example: dict, tokenizer: Tokenizer, max_length: int, mask_inputs: bool = True):
+ """Processes a single sample.
+
+ Each sample in the dataset consists of:
+ - instruction: A string describing the task
+ - input: A string holding a special input value for the instruction.
+ This only applies to some samples, and in others this is empty.
+ - output: The response string
+
+ This function processes this data to produce a prompt text and a label for
+ supervised training. The prompt text is formed as a single message including both
+ the instruction and the input. The label/target is the same message but with the
+ response attached.
+
+ Finally, both the prompt and the label get tokenized. If desired, all tokens
+ in the label that correspond to the original input prompt get masked out (default).
+ """
+ full_prompt = generate_prompt(example)
+ full_prompt_and_response = full_prompt + example["output"]
+ encoded_full_prompt = tokenize(tokenizer, full_prompt, max_length=max_length, eos=False)
+ encoded_full_prompt_and_response = tokenize(tokenizer, full_prompt_and_response, eos=True, max_length=max_length)
+
+ # The labels are the full prompt with response, but with the prompt masked out
+ labels = encoded_full_prompt_and_response.clone()
+ if mask_inputs:
+ labels[:len(encoded_full_prompt)] = IGNORE_INDEX
+
+ return {**example, "input_ids": encoded_full_prompt_and_response, "input_ids_no_response": encoded_full_prompt, "labels": labels}
+
+
+def tokenize(tokenizer: Tokenizer, string: str, max_length: int, eos=True) -> torch.Tensor:
+ return tokenizer.encode(string, bos=True, eos=eos, max_length=max_length)
+
+
+def generate_prompt(example):
+ """Generates a standardized message to prompt the model with an instruction, optional input and a
+ 'response' field."""
+
+ if example["input"]:
+ return (
+ "Below is an instruction that describes a task, paired with an input that provides further context. "
+ "Write a response that appropriately completes the request.\n\n"
+ f"### Instruction:\n{example['instruction']}\n\n### Input:\n{example['input']}\n\n### Response:"
+ )
+ return (
+ "Below is an instruction that describes a task. "
+ "Write a response that appropriately completes the request.\n\n"
+ f"### Instruction:\n{example['instruction']}\n\n### Response:"
+ )
+
+
+if __name__ == "__main__":
+ from jsonargparse import CLI
+
+ CLI(prepare)
\ No newline at end of file
diff --git a/scripts/prepare_dolly.py b/scripts/prepare_dolly.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb8451f45bb33f518a9b641d186bb3e6170c59ca
--- /dev/null
+++ b/scripts/prepare_dolly.py
@@ -0,0 +1,133 @@
+"""Implementation derived from https://github.com/tloen/alpaca-lora"""
+import sys
+from pathlib import Path
+
+# support running without installing as a package
+wd = Path(__file__).parent.parent.resolve()
+sys.path.append(str(wd))
+
+import torch
+import requests
+import json
+from torch.utils.data import random_split
+from lit_llama.tokenizer import Tokenizer
+from tqdm import tqdm
+
+
+DATA_FILE = "https://raw.githubusercontent.com/databrickslabs/dolly/master/data/databricks-dolly-15k.jsonl"
+DATA_FILE_NAME = "dolly_data_cleaned.json"
+IGNORE_INDEX = -1
+
+
+def prepare(
+ destination_path: Path = Path("data/dolly"),
+ tokenizer_path: Path = Path("checkpoints/lit-llama/tokenizer.model"),
+ test_split_size: int = 2000,
+ max_seq_length: int = 1024,
+ seed: int = 42,
+ mask_inputs: bool = False, # as in alpaca-lora
+) -> None:
+ """Prepare the Dolly dataset for instruction tuning.
+
+ The output is a training and validation dataset saved as `train.pt` and `val.pt`,
+ which stores the preprocessed and tokenized prompts and labels.
+ """
+
+ destination_path.mkdir(parents=True, exist_ok=True)
+ file_path = destination_path / DATA_FILE_NAME
+ download(file_path)
+
+ # TODO: If we don't have the Meta weights, where do we get the tokenizer from?
+ tokenizer = Tokenizer(tokenizer_path)
+
+ with open(file_path, "r") as file:
+ data = file.readlines()
+ data = [json.loads(line) for line in data]
+ for item in data:
+ item["input"] = item.pop("context")
+ item["output"] = item.pop("response")
+
+ # Partition the dataset into train and test
+ train_split_size = len(data) - test_split_size
+ train_set, test_set = random_split(
+ data,
+ lengths=(train_split_size, test_split_size),
+ generator=torch.Generator().manual_seed(seed),
+ )
+ train_set, test_set = list(train_set), list(test_set)
+
+ print(f"train has {len(train_set):,} samples")
+ print(f"val has {len(test_set):,} samples")
+
+ print("Processing train split ...")
+ train_set = [prepare_sample(sample, tokenizer, max_seq_length, mask_inputs) for sample in tqdm(train_set)]
+ torch.save(train_set, file_path.parent / "train.pt")
+
+ print("Processing test split ...")
+ test_set = [prepare_sample(sample, tokenizer, max_seq_length, mask_inputs) for sample in tqdm(test_set)]
+ torch.save(test_set, file_path.parent / "test.pt")
+
+
+def download(file_path: Path):
+ """Downloads the raw json data file and saves it in the given destination."""
+ if file_path.exists():
+ return
+ with open(file_path, "w") as f:
+ f.write(requests.get(DATA_FILE).text)
+
+
+def prepare_sample(example: dict, tokenizer: Tokenizer, max_length: int, mask_inputs: bool = True):
+ """Processes a single sample.
+
+ Each sample in the dataset consists of:
+ - instruction: A string describing the task
+ - input: A string holding a special input value for the instruction.
+ This only applies to some samples, and in others this is empty.
+ - output: The response string
+
+ This function processes this data to produce a prompt text and a label for
+ supervised training. The prompt text is formed as a single message including both
+ the instruction and the input. The label/target is the same message but with the
+ response attached.
+
+ Finally, both the prompt and the label get tokenized. If desired, all tokens
+ in the label that correspond to the original input prompt get masked out (default).
+ """
+ full_prompt = generate_prompt(example)
+ full_prompt_and_response = full_prompt + example["output"]
+ encoded_full_prompt = tokenize(tokenizer, full_prompt, max_length=max_length, eos=False)
+ encoded_full_prompt_and_response = tokenize(tokenizer, full_prompt_and_response, eos=True, max_length=max_length)
+
+ # The labels are the full prompt with response, but with the prompt masked out
+ labels = encoded_full_prompt_and_response.clone()
+ if mask_inputs:
+ labels[:len(encoded_full_prompt)] = IGNORE_INDEX
+
+ return {**example, "input_ids": encoded_full_prompt_and_response, "input_ids_no_response": encoded_full_prompt, "labels": labels}
+
+
+def tokenize(tokenizer: Tokenizer, string: str, max_length: int, eos=True) -> torch.Tensor:
+ return tokenizer.encode(string, bos=True, eos=eos, max_length=max_length)
+
+
+def generate_prompt(example):
+ """Generates a standardized message to prompt the model with an instruction, optional input and a
+ 'response' field."""
+
+ if example["input"]:
+ return (
+ f"Below is an instruction that describes a task, paired with an input that provides further context. "
+ "Write a response that appropriately completes the request.\n\n"
+ f"### Instruction:\n{example['instruction']}\n\n### Input:\n{example['input']}\n\n### Response:"
+ )
+ return (
+ f"Below is an instruction that describes a task. "
+ "Write a response that appropriately completes the request.\n\n"
+ f"### Instruction:\n{example['instruction']}\n\n### Response:"
+ )
+
+
+if __name__ == "__main__":
+ from jsonargparse import CLI
+
+ CLI(prepare)
diff --git a/scripts/prepare_redpajama.py b/scripts/prepare_redpajama.py
new file mode 100644
index 0000000000000000000000000000000000000000..8da1c1b4e5525e43268e2c2b0c59be99109894ed
--- /dev/null
+++ b/scripts/prepare_redpajama.py
@@ -0,0 +1,181 @@
+import json
+import glob
+import os
+from pathlib import Path
+import sys
+
+# support running without installing as a package
+wd = Path(__file__).parent.parent.resolve()
+sys.path.append(str(wd))
+
+import numpy as np
+from tqdm import tqdm
+
+from lit_llama import Tokenizer
+import lit_llama.packed_dataset as packed_dataset
+
+
+filenames_sample = [
+ "arxiv_sample.jsonl",
+ "book_sample.jsonl",
+ "c4_sample.jsonl",
+ "cc_2019-30_sample.jsonl",
+ "cc_2020-05_sample.jsonl",
+ "cc_2021-04_sample.jsonl",
+ "cc_2022-05_sample.jsonl",
+ "cc_2023-06_sample.jsonl",
+ "github_sample.jsonl",
+ "stackexchange_sample.jsonl",
+ "wikipedia_sample.jsonl",
+]
+
+filename_sets = {
+ "arxiv": "arxiv/arxiv*",
+ "book": "book/book*",
+ "c4": "c4/c4-train*",
+ "common_crawl": "common_crawl/*",
+ "github": "github/filtered*",
+ "stackexchange": "stackexchange/stackexchange*",
+ "wikipedia": "wikipedia/wiki*",
+}
+
+
+def prepare_sample(
+ source_path: Path,
+ tokenizer_path: Path,
+ destination_path: Path,
+ chunk_size: int,
+ match = ""
+) -> None:
+ """Prepare the "Red Pajama" dataset. We assume tokenizer has been trained (i.e. we reuse LLaMA's tokenizer model)."""
+ destination_path.mkdir(parents=True, exist_ok=True)
+
+ tokenizer = Tokenizer(tokenizer_path)
+
+ for name in filenames_sample:
+ if match and match not in name:
+ continue
+
+ filepath = source_path / name
+
+ if not filepath.is_file():
+ raise RuntimeError(
+ f"Input file not found at {filepath}. \n"
+ "Make sure you download the data, e.g. wget -i https://data.together.xyz/redpajama-data-1T/v1.0.0/urls.txt or through \n"
+ "https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T \n"
+ "https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T-Sample \n"
+ )
+
+ prefix, _ = os.path.splitext(name)
+
+ builder = packed_dataset.PackedDatasetBuilder(
+ outdir=destination_path,
+ prefix=prefix,
+ chunk_size=chunk_size,
+ sep_token=tokenizer.bos_id,
+ dtype="auto",
+ vocab_size=tokenizer.vocab_size,
+ )
+
+ print(f"Processing {name}")
+
+ with open(filepath, encoding="utf-8") as f:
+ for row in tqdm(f):
+ text = json.loads(row)["text"]
+ text_ids = tokenizer.encode(text)
+ builder.add_array(np.array(text_ids, dtype=builder.dtype))
+
+ builder.write_reminder()
+
+
+def prepare_full(
+ source_path: Path,
+ tokenizer_path: Path,
+ destination_path: Path,
+ chunk_size: int,
+ match: str = ""
+) -> None:
+ """Prepare the "Red Pajama" dataset. We assume tokenizer has been trained (i.e. we reuse LLaMA's tokenizer model)."""
+ import zstandard as zstd
+
+ destination_path.mkdir(parents=True, exist_ok=True)
+
+ tokenizer = Tokenizer(tokenizer_path)
+
+ for set_name, pattern in filename_sets.items():
+ if match and match not in set_name:
+ continue
+
+ is_cc = set_name == "common_crawl"
+
+ filenames = glob.glob(os.path.join(source_path, pattern), recursive=True)
+
+ if not filenames:
+ raise RuntimeError(
+ f"No files matching {pattern} found at {source_path}. \n"
+ "Make sure you download the data, e.g. wget -i https://data.together.xyz/redpajama-data-1T/v1.0.0/urls.txt or through \n"
+ "https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T \n"
+ "https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T-Sample \n"
+ )
+
+ builder = packed_dataset.PackedDatasetBuilder(
+ outdir=destination_path,
+ prefix=set_name,
+ chunk_size=chunk_size,
+ sep_token=tokenizer.bos_id,
+ dtype="auto",
+ vocab_size=tokenizer.vocab_size,
+ )
+
+ for name in filenames:
+ filepath = source_path / name
+
+ print(f"Processing {name}")
+
+ if is_cc:
+ with zstd.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
+ for row in tqdm(f):
+ text = json.loads(row)["text"]
+ text_ids = tokenizer.encode(text)
+ builder.add_array(np.array(text_ids, dtype=builder.dtype))
+ else:
+ with open(filepath, encoding="utf-8") as f:
+ for row in tqdm(f):
+ text = json.loads(row)["text"]
+ text_ids = tokenizer.encode(text)
+ builder.add_array(np.array(text_ids, dtype=builder.dtype))
+
+ builder.write_reminder()
+
+
+def prepare(
+ source_path: Path = Path("data/RedPajama-Data-1T-Sample"),
+ tokenizer_path: Path = Path("checkpoints/lit-llama/tokenizer.model"),
+ destination_path: Path = Path("data/red_pajama_sample"),
+ chunk_size: int = 2049 * 1024, # 2048 block size + 1 for causal (from LLama), 1024 blocks
+ sample: bool = False,
+ match: str = "",
+) -> None:
+ """Prepare the "Red Pajama" dataset. We assume tokenizer has been trained (i.e. we reuse LLaMA's tokenizer model)."""
+ if sample:
+ prepare_sample(
+ source_path=source_path,
+ tokenizer_path=tokenizer_path,
+ destination_path=destination_path,
+ chunk_size=chunk_size,
+ match=match,
+ )
+ else:
+ prepare_full(
+ source_path=source_path,
+ tokenizer_path=tokenizer_path,
+ destination_path=destination_path,
+ chunk_size=chunk_size,
+ match=match,
+ )
+
+
+if __name__ == "__main__":
+ from jsonargparse import CLI
+
+ CLI(prepare)
diff --git a/scripts/prepare_shakespeare.py b/scripts/prepare_shakespeare.py
new file mode 100644
index 0000000000000000000000000000000000000000..01a4079e37019bed4221c1a245a764772d1bef4d
--- /dev/null
+++ b/scripts/prepare_shakespeare.py
@@ -0,0 +1,69 @@
+# MIT License
+
+# Copyright (c) 2022 Andrej Karpathy
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+import sys
+from pathlib import Path
+
+# support running without installing as a package
+wd = Path(__file__).parent.parent.resolve()
+sys.path.append(str(wd))
+
+import numpy as np
+import requests
+
+
+def prepare(destination_path: Path = Path("data/shakespeare")) -> None:
+ """Prepare the "Tiny Shakespeare" dataset."""
+ destination_path.mkdir(parents=True, exist_ok=True)
+
+ # download the tiny shakespeare dataset
+ input_file_path = destination_path / "input.txt"
+ if not input_file_path.exists():
+ data_url = "https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt"
+ with open(input_file_path, "w") as f:
+ f.write(requests.get(data_url).text)
+
+ with open(input_file_path) as f:
+ data = f.read()
+ n = len(data)
+ train_data = data[: int(n * 0.9)]
+ val_data = data[int(n * 0.9) :]
+
+ from lit_llama import Tokenizer
+
+ Tokenizer.train(input=input_file_path, destination=destination_path, vocab_size=100)
+ tokenizer = Tokenizer(destination_path / "tokenizer.model")
+ train_ids = tokenizer.encode(train_data)
+ val_ids = tokenizer.encode(val_data)
+ print(f"train has {len(train_ids):,} tokens")
+ print(f"val has {len(val_ids):,} tokens")
+
+ # export to bin files
+ train_ids = np.array(train_ids, dtype=np.uint16)
+ val_ids = np.array(val_ids, dtype=np.uint16)
+ train_ids.tofile(destination_path / "train.bin")
+ val_ids.tofile(destination_path / "val.bin")
+
+
+if __name__ == "__main__":
+ from jsonargparse import CLI
+
+ CLI(prepare)
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..94f723632e289a4de853f4e87f8d11841ee7f700
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,26 @@
+import os
+
+from setuptools import setup, find_packages
+
+
+_PATH_ROOT = os.path.dirname(__file__)
+
+with open(os.path.join(_PATH_ROOT, "README.md"), encoding="utf-8") as fo:
+ readme = fo.read()
+
+setup(
+ name='lit-llama',
+ version='0.1.0',
+ description='Implementation of the LLaMA language model',
+ author='Lightning AI',
+ url='https://github.com/lightning-AI/lit-llama',
+ install_requires=[
+ "torch>=2.0.0",
+ "lightning @ git+https://github.com/Lightning-AI/lightning@master",
+ "sentencepiece",
+ "bitsandbytes",
+ ],
+ packages=find_packages(),
+ long_description=readme,
+ long_description_content_type="text/markdown",
+)
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab19c77e17e9e5836a114058453ec75dab203864
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,42 @@
+import sys
+from pathlib import Path
+
+import pytest
+
+wd = Path(__file__).parent.parent.absolute()
+
+
+@pytest.fixture()
+def orig_llama():
+ sys.path.append(str(wd))
+
+ from scripts.download import download_original
+
+ download_original(wd)
+
+ import original_model
+
+ return original_model
+
+
+@pytest.fixture()
+def orig_llama_adapter():
+ sys.path.append(str(wd))
+
+ from scripts.download import download_original
+
+ download_original(wd)
+
+ import original_adapter
+
+ return original_adapter
+
+
+@pytest.fixture()
+def lit_llama():
+ # this adds support for running tests without the package installed
+ sys.path.append(str(wd))
+
+ import lit_llama
+
+ return lit_llama
diff --git a/tests/test_adapter.py b/tests/test_adapter.py
new file mode 100644
index 0000000000000000000000000000000000000000..427a35c87c889567e228b5b146a621ec63db0f60
--- /dev/null
+++ b/tests/test_adapter.py
@@ -0,0 +1,24 @@
+import torch
+from dataclasses import asdict
+import pytest
+import sys
+
+
+@pytest.mark.skipif(sys.platform == "win32", reason="EmptyInitOnDevice on CPU not working for Windows.")
+@pytest.mark.parametrize("model_size", ["7B", "13B", "30B", "65B"])
+def test_config_identical(model_size, lit_llama):
+ import lit_llama.adapter as llama_adapter
+ import lit_llama.model as llama
+ from lit_llama.utils import EmptyInitOnDevice
+
+ llama_config = asdict(llama.LLaMAConfig.from_name(model_size))
+ adapter_config = asdict(llama_adapter.LLaMAConfig.from_name(model_size))
+
+ del adapter_config["adapter_prompt_length"]
+ del adapter_config["adapter_start_layer"]
+ assert adapter_config == llama_config
+
+ with EmptyInitOnDevice():
+ llama_model = llama.LLaMA.from_name(model_size)
+ adapter_model = llama_adapter.LLaMA.from_name(model_size)
+ assert llama_model.lm_head.weight.shape == adapter_model.lm_head.weight.shape
diff --git a/tests/test_generate.py b/tests/test_generate.py
new file mode 100644
index 0000000000000000000000000000000000000000..98bc7504ba3f77f281836074083951d743b4297a
--- /dev/null
+++ b/tests/test_generate.py
@@ -0,0 +1,113 @@
+import functools
+import subprocess
+import sys
+from contextlib import redirect_stdout
+from io import StringIO
+from pathlib import Path
+from unittest import mock
+from unittest.mock import Mock, call, ANY
+
+import torch
+
+wd = Path(__file__).parent.parent.absolute()
+
+
+@functools.lru_cache(maxsize=1)
+def load_generate_script():
+ sys.path.append(str(wd))
+
+ import generate
+
+ return generate
+
+
+def test_generate():
+ generate = load_generate_script()
+
+ from lit_llama.model import LLaMA, LLaMAConfig
+
+ T, C = 5, 3
+ logits = torch.randn(T, C)
+ input_idx = torch.randint(10, size=(T,))
+
+ config = LLaMAConfig(block_size=128, vocab_size=16, n_layer=1, n_head=4, n_embd=8)
+ model = LLaMA(config)
+ max_new_tokens = 20
+
+ multinomial_results = []
+ original_multinomial = torch.multinomial
+
+ def multinomial(*args, **kwargs):
+ out = original_multinomial(*args, **kwargs)
+ multinomial_results.append(out)
+ return out
+
+ with mock.patch("torch.multinomial", multinomial):
+ out = generate.generate(model, input_idx, max_new_tokens, max_seq_length=10, top_k=4)
+
+ assert out.size(0) == T + max_new_tokens
+ multinomial_results = torch.hstack(multinomial_results)
+ expected = torch.cat((input_idx, multinomial_results))
+ assert out.shape == expected.shape
+ torch.testing.assert_close(out, expected)
+
+
+@mock.patch("torch.cuda.is_bf16_supported", return_value=False)
+def test_main(tmp_path, monkeypatch):
+ generate = load_generate_script()
+
+ checkpoint_path = tmp_path / "ckpt"
+ checkpoint_path.touch()
+ tokenizer_path = tmp_path / "tokenizer"
+ tokenizer_path.touch()
+
+ class FabricMock(Mock):
+ @property
+ def device(self):
+ return torch.device("cpu")
+
+ monkeypatch.setattr(generate.L, "Fabric", FabricMock)
+ model_mock = Mock()
+ monkeypatch.setattr(generate.LLaMA, "from_name", model_mock)
+ lookup_mock = Mock(return_value="1T")
+ monkeypatch.setattr(generate, "llama_model_lookup", lookup_mock)
+ load_mock = Mock()
+ load_mock.return_value = load_mock
+ load_mock.__enter__ = Mock()
+ load_mock.__exit__ = Mock()
+ monkeypatch.setattr(generate.torch, "load", load_mock)
+ monkeypatch.setattr(generate, "lazy_load", load_mock)
+ tokenizer_mock = Mock()
+ tokenizer_mock.return_value.encode.return_value = torch.tensor([[1, 2, 3]])
+ tokenizer_mock.return_value.decode.return_value = "foo bar baz"
+ monkeypatch.setattr(generate, "Tokenizer", tokenizer_mock)
+ generate_mock = Mock()
+ generate_mock.return_value = torch.tensor([[3, 2, 1]])
+ monkeypatch.setattr(generate, "generate", generate_mock)
+
+ num_samples = 2
+ out = StringIO()
+ with redirect_stdout(out):
+ generate.main(
+ checkpoint_path=checkpoint_path,
+ tokenizer_path=tokenizer_path,
+ temperature=2.0,
+ top_k=2,
+ num_samples=num_samples,
+ )
+
+ model_mock.assert_called_once_with("1T")
+ load_mock.assert_called_once_with(checkpoint_path)
+ tokenizer_mock.assert_called_once_with(tokenizer_path)
+ assert len(tokenizer_mock.return_value.decode.mock_calls) == num_samples
+ assert torch.allclose(tokenizer_mock.return_value.decode.call_args[0][0], generate_mock.return_value)
+ assert generate_mock.mock_calls == [call(ANY, ANY, 50, ANY, temperature=2.0, top_k=2)] * num_samples
+ # only the generated result is printed to stdout
+ assert out.getvalue() == "foo bar baz\n" * num_samples
+
+
+def test_cli():
+ cli_path = wd / "generate.py"
+ output = subprocess.check_output([sys.executable, cli_path, "-h"])
+ output = str(output.decode())
+ assert "Generates text samples" in output
diff --git a/tests/test_lora.py b/tests/test_lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d9e3e8089defb6c1a19629b90c636f4dfa94f0a
--- /dev/null
+++ b/tests/test_lora.py
@@ -0,0 +1,64 @@
+import torch
+
+
+def test_lora_layer_replacement(lit_llama):
+ from lit_llama.lora import lora, CausalSelfAttention as LoRACausalSelfAttention
+ from lit_llama.model import LLaMA, LLaMAConfig
+
+ config = LLaMAConfig()
+ config.n_layer = 2
+ config.n_head = 4
+ config.n_embd = 8
+ config.block_size = 8
+ config.vocab_size = 8
+
+ with lora(r=8, alpha=8, dropout=0.1):
+ model = LLaMA(config)
+
+ assert isinstance(model.transformer.h[0].attn, LoRACausalSelfAttention)
+ assert isinstance(model.transformer.h[1].attn, LoRACausalSelfAttention)
+
+
+def test_lora_merge_unmerge(lit_llama):
+ from lit_llama.lora import lora, mark_only_lora_as_trainable
+ from lit_llama.model import LLaMA, LLaMAConfig
+
+ config = LLaMAConfig(n_layer=1, n_head=2, n_embd=8, block_size=8, vocab_size=8)
+
+ with lora(r=8, alpha=8, dropout=0.1):
+ model = LLaMA(config)
+
+ initial_weight = model.transformer.h[0].attn.c_attn.weight.clone()
+ model.train()
+ assert torch.equal(model.transformer.h[0].attn.c_attn.weight, initial_weight)
+
+ # perform an update to the LoRA weights
+ mark_only_lora_as_trainable(model)
+ optimizer = torch.optim.SGD(model.parameters(), lr=1.0)
+ model(torch.randint(0, 8, size=(2, 4), dtype=torch.int64)).sum().backward()
+ optimizer.step()
+ optimizer.zero_grad()
+ # the weight remains unchanged (only lora A and B change)
+ assert torch.equal(model.transformer.h[0].attn.c_attn.weight, initial_weight)
+
+ # 'merge' and then 'unmerge' should neutralize themselves
+ weight_before = model.transformer.h[0].attn.c_attn.weight.clone()
+ model.eval()
+ assert not torch.equal(model.transformer.h[0].attn.c_attn.weight, weight_before)
+ model.train()
+ # note: numerically, `W + (A * B) - (A * B) == W` does not hold exactly
+ assert torch.allclose(model.transformer.h[0].attn.c_attn.weight, weight_before)
+
+ # calling eval/train multiple times in a row should not merge/unmerge multiple times
+ model.eval()
+ assert model.transformer.h[0].attn.c_attn.merged
+ weight_after = model.transformer.h[0].attn.c_attn.weight.clone()
+ model.eval()
+ model.eval()
+ assert torch.equal(model.transformer.h[0].attn.c_attn.weight, weight_after)
+ model.train()
+ assert not model.transformer.h[0].attn.c_attn.merged
+ weight_after = model.transformer.h[0].attn.c_attn.weight.clone()
+ model.train()
+ model.train()
+ assert torch.equal(model.transformer.h[0].attn.c_attn.weight, weight_after)
diff --git a/tests/test_model.py b/tests/test_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d85e36859a7ae5e129b85d7264d5c4e23263315
--- /dev/null
+++ b/tests/test_model.py
@@ -0,0 +1,200 @@
+import torch
+import pytest
+import sys
+
+
+def copy_mlp(llama_mlp, orig_llama_mlp) -> None:
+ orig_llama_mlp.w1.weight.copy_(llama_mlp.c_fc1.weight)
+ orig_llama_mlp.w3.weight.copy_(llama_mlp.c_fc2.weight)
+ orig_llama_mlp.w2.weight.copy_(llama_mlp.c_proj.weight)
+
+
+def copy_attention(llama_attn, orig_llama_attn) -> None:
+ n_embd = llama_attn.c_attn.weight.shape[1]
+ orig_llama_attn.wq.weight.copy_(llama_attn.c_attn.weight[:n_embd])
+ orig_llama_attn.wk.weight.copy_(llama_attn.c_attn.weight[n_embd:-n_embd])
+ orig_llama_attn.wv.weight.copy_(llama_attn.c_attn.weight[-n_embd:])
+ orig_llama_attn.wo.weight.copy_(llama_attn.c_proj.weight)
+
+
+def copy_block(llama_block, orig_llama_block) -> None:
+ orig_llama_block.attention_norm.weight.copy_(llama_block.rms_1.scale)
+ copy_attention(llama_block.attn, orig_llama_block.attention)
+ orig_llama_block.ffn_norm.weight.copy_(llama_block.rms_2.scale)
+ copy_mlp(llama_block.mlp, orig_llama_block.feed_forward)
+
+
+def copy_weights(llama_model, orig_llama_model) -> None:
+ orig_llama_model.tok_embeddings.weight.copy_(llama_model.transformer.wte.weight)
+ for llama_block, orig_llama_block in zip(llama_model.transformer.h, orig_llama_model.layers):
+ copy_block(llama_block, orig_llama_block)
+ orig_llama_model.norm.weight.copy_(llama_model.transformer.ln_f.scale)
+ orig_llama_model.output.weight.copy_(llama_model.lm_head.weight)
+
+
+@torch.no_grad()
+def test_to_orig_llama(lit_llama, orig_llama) -> None:
+ block_size = 64
+ vocab_size = 32000
+ n_layer = 16
+ n_head = 16
+ n_embd = 32
+
+ llama_config = lit_llama.LLaMAConfig(
+ block_size=block_size, vocab_size=vocab_size, n_layer=n_layer, n_head=n_head, n_embd=n_embd
+ )
+ orig_llama_config = orig_llama.ModelArgs(
+ dim=n_embd, n_layers=n_layer, n_heads=n_head, vocab_size=vocab_size, norm_eps=1e-5, max_seq_len=block_size
+ )
+
+ batch_size = 3
+
+ token_sample = torch.randint(
+ 0, orig_llama_config.vocab_size, size=(batch_size, orig_llama_config.max_seq_len), dtype=torch.int64
+ )
+
+ llama_model = lit_llama.LLaMA(llama_config)
+ llama_model.apply(llama_model._init_weights)
+ orig_llama_model = orig_llama.Transformer(orig_llama_config)
+
+ copy_weights(llama_model, orig_llama_model)
+
+ orig_llama_embed = orig_llama_model.tok_embeddings(token_sample)
+ llama_embed = llama_model.transformer.wte(token_sample)
+ assert torch.allclose(orig_llama_embed, llama_embed)
+
+ seq_len = token_sample.shape[1]
+ mask = torch.full((1, 1, seq_len, seq_len), float("-inf"))
+ mask = torch.triu(mask, diagonal=1)
+ orig_llama_block_out = orig_llama_model.layers[0](orig_llama_embed, 0, orig_llama_model.freqs_cis[:seq_len], mask)
+ llama_block_out = llama_model.transformer.h[0](llama_embed)
+ assert torch.allclose(orig_llama_block_out, llama_block_out)
+
+ expected = orig_llama_model(token_sample, 0)
+ out = llama_model(token_sample)
+ assert torch.allclose(out, expected)
+
+
+@pytest.mark.skipif(not torch.cuda.is_available(), reason="Requires CUDA")
+@torch.no_grad()
+def test_bfloat16_llama_init(lit_llama, orig_llama) -> None:
+ from lit_llama.utils import EmptyInitOnDevice
+ block_size = 64
+ vocab_size = 32000
+ n_layer = 16
+ n_head = 16
+ n_embd = 32
+
+ llama_config = lit_llama.LLaMAConfig(
+ block_size=block_size,
+ vocab_size=vocab_size,
+ n_layer=n_layer,
+ n_head=n_head,
+ n_embd=n_embd,
+ )
+ llama_model = lit_llama.LLaMA(llama_config)
+ llama_model.apply(llama_model._init_weights)
+
+ batch_size = 3
+
+ token_sample = torch.randint(
+ 0, vocab_size, size=(batch_size, block_size), dtype=torch.int64
+ )
+
+ expected = llama_model(token_sample)
+
+ with EmptyInitOnDevice(device="cuda", dtype=torch.bfloat16):
+ llama_model2 = lit_llama.LLaMA(llama_config)
+ llama_model2.load_state_dict(llama_model.state_dict(keep_vars=True))
+
+ out = llama_model2(token_sample.cuda()).float().cpu()
+ torch.testing.assert_close(out, expected, atol=5e-3, rtol=1e-3)
+
+
+def copy_adapter_weights(llama_model, orig_llama_model) -> None:
+ # copy the gating parameter
+ for llama_block, orig_llama_block in zip(llama_model.transformer.h, orig_llama_model.layers):
+ if hasattr(llama_block.attn, "gating_factor"):
+ llama_block.attn.gating_factor.copy_(orig_llama_block.attention.gate)
+
+ # In the original model, there is one embedding layer for all blocks combined
+ orig_adapter_wte = orig_llama_model.adapter_query.weight.reshape(
+ orig_llama_model.params.adapter_layer, orig_llama_model.params.adapter_len, orig_llama_model.params.dim
+ )
+
+ # In ours, the embedding layer is split across the individual attention layers
+ index = 0
+ for llama_block in llama_model.transformer.h:
+ if hasattr(llama_block.attn, "adapter_wte"):
+ llama_block.attn.adapter_wte.weight.copy_(orig_adapter_wte[index])
+ index += 1
+
+
+def enable_gate(model):
+ for name, param in model.named_parameters():
+ if "gating_factor" in name or "gate" in name:
+ param.fill_(1)
+
+
+@torch.no_grad()
+def test_adapter_parity(orig_llama_adapter):
+ """Test parity between our implementation of LLaMA-Adapter and the reference code."""
+ import lit_llama.adapter as lit_llama
+ orig_llama = orig_llama_adapter
+
+ block_size = 32
+ vocab_size = 100
+ n_layer = 2
+ n_head = 4
+ n_embd = 16
+ adapter_prompt_length: int = 10
+ adapter_start_layer: int = 0
+
+ llama_config = lit_llama.LLaMAConfig(
+ block_size=block_size, vocab_size=vocab_size, n_layer=n_layer, n_head=n_head, n_embd=n_embd,
+ adapter_prompt_length=adapter_prompt_length, adapter_start_layer=adapter_start_layer,
+ )
+ orig_llama_config = orig_llama.ModelArgs(
+ dim=n_embd, n_layers=n_layer, n_heads=n_head, vocab_size=vocab_size, norm_eps=1e-5, max_seq_len=block_size,
+ adapter_len=adapter_prompt_length, adapter_layer=(n_layer - adapter_start_layer),
+ )
+
+ batch_size = 3
+ token_sample = torch.randint(
+ 0, orig_llama_config.vocab_size, size=(batch_size, orig_llama_config.max_seq_len), dtype=torch.int64
+ )
+
+ llama_model = lit_llama.LLaMA(llama_config)
+ llama_model.apply(llama_model._init_weights)
+ orig_llama_model = orig_llama.Transformer(orig_llama_config)
+
+ copy_weights(llama_model, orig_llama_model)
+ copy_adapter_weights(llama_model, orig_llama_model)
+
+ # make the gate non-zero, otherwise the adapter is disabled and the model
+ # identical to regular LLaMA
+ enable_gate(llama_model)
+ enable_gate(orig_llama_model)
+
+ expected = orig_llama_model(token_sample, 0)
+ out = llama_model(token_sample)
+ assert torch.allclose(out, expected)
+
+
+@pytest.mark.skipif(sys.platform in ("win32", "darwin"), reason="torch.compile not supported on this platform")
+def test_model_compile(lit_llama):
+ llama_config = lit_llama.LLaMAConfig(
+ block_size=8,
+ vocab_size=8,
+ n_layer=2,
+ n_head=2,
+ n_embd=4,
+ )
+ model = lit_llama.LLaMA(llama_config)
+ model.apply(model._init_weights)
+
+ model = torch.compile(model)
+
+ sample = torch.randint(model.config.vocab_size, size=(2, model.config.block_size), dtype=torch.int64)
+ for _ in range(3):
+ _ = model(sample)
diff --git a/tests/test_packed_dataset.py b/tests/test_packed_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b91fb4a7e8e48ed2af8e436a1c25df083a05154
--- /dev/null
+++ b/tests/test_packed_dataset.py
@@ -0,0 +1,203 @@
+import os
+from unittest.mock import MagicMock
+import requests
+
+from torch.utils.data import IterableDataset
+
+
+def train_tokenizer(destination_path):
+ destination_path.mkdir(parents=True, exist_ok=True)
+
+ # download the tiny shakespeare dataset
+ input_file_path = destination_path / "input.txt"
+ if not input_file_path.exists():
+ data_url = "https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt"
+ with open(input_file_path, "w") as f:
+ f.write(requests.get(data_url).text)
+
+ from lit_llama import Tokenizer
+ Tokenizer.train(
+ input=input_file_path,
+ destination=destination_path,
+ vocab_size=100,
+ )
+
+ return destination_path / "tokenizer.model"
+
+
+def test_packed_dataset(tmp_path):
+ tokenizer_path = train_tokenizer(tmp_path)
+
+ from lit_llama import Tokenizer
+ tokenizer = Tokenizer(tokenizer_path)
+
+ texts = [
+ "The moment of truth is upon us.",
+ "Time to open the fridge."
+ ]
+
+ from lit_llama.packed_dataset import PackedDatasetBuilder, PackedDataset, HDR_SIZE
+
+ block_size = 10
+ n_blocks = 2
+ chunk_size = block_size * n_blocks
+
+ builder = PackedDatasetBuilder(
+ outdir=tmp_path,
+ prefix="packed_dataset",
+ chunk_size=chunk_size,
+ sep_token=tokenizer.bos_id,
+ dtype="auto",
+ vocab_size=100,
+ )
+
+ text_ids = []
+
+ for text in texts:
+ text_ids = tokenizer.encode(text)
+ assert text_ids[0] == tokenizer.bos_id
+ builder.add_array(text_ids)
+
+ filenames = builder.filenames
+
+ assert len(filenames) == 2
+ assert os.path.basename(filenames[0]) == "packed_dataset_0000000000.bin"
+ assert os.path.basename(filenames[1]) == "packed_dataset_0000000001.bin"
+
+ import numpy as np
+
+ ex_tokenized = [
+ tokenizer.encode(text).numpy().astype(builder.dtype)
+ for text in texts
+ ]
+ ex_tokenized = np.concatenate(ex_tokenized)
+ ex_tokenized = ex_tokenized[:2 * chunk_size]
+
+ for filename, el in zip(filenames, np.array_split(ex_tokenized, 2)):
+ mmap = np.memmap(filename, mode="r", order="C", offset=HDR_SIZE)
+ count = len(mmap) // np.dtype(builder.dtype).itemsize
+ arr = np.frombuffer(
+ mmap, dtype=builder.dtype, count=count, offset=0
+ )
+ where_bos = np.where(arr == tokenizer.bos_id)
+ # we expect two BOS tokens, one per file
+ assert len(where_bos) == 1
+ assert np.array_equal(arr, el)
+
+ dataset = PackedDataset(filenames=filenames, n_chunks=2, block_size=block_size, shuffle=False)
+
+ ex_split = np.array_split(ex_tokenized, ex_tokenized.shape[0] // block_size)
+
+ for item, el in zip(dataset, ex_split):
+ assert np.array_equal(item, el)
+
+ dataset = PackedDataset(filenames=filenames, n_chunks=2, block_size=block_size, seed=12345)
+
+ for i, item in enumerate(dataset):
+ block_idxs = iter(dataset)._block_idxs
+ assert np.array_equal(item, ex_split[block_idxs[i]])
+
+ dataset = PackedDataset(filenames=filenames, n_chunks=2, block_size=block_size, seed=12345, wrap=True)
+
+ for i, item in enumerate(dataset):
+ if i > 24:
+ break
+
+ dataset = PackedDataset(filenames=filenames, n_chunks=1, block_size=block_size, seed=12345)
+
+ for i, item in enumerate(dataset):
+ block_idxs = iter(dataset)._block_idxs
+ chunk_idx = i // n_blocks * n_blocks
+ assert np.array_equal(item, ex_split[chunk_idx + block_idxs[i % n_blocks]])
+
+ block_size_ = block_size // 2
+ ex_split = np.array_split(ex_tokenized, ex_tokenized.shape[0] // block_size_)
+ dataset = PackedDataset(filenames=filenames, n_chunks=2, block_size=block_size_, seed=12345)
+
+ for i, item in enumerate(dataset):
+ block_idxs = iter(dataset)._block_idxs
+ assert np.array_equal(item, ex_split[block_idxs[i]])
+
+ block_size_ = block_size // 3
+ n_chunks = 2
+ ex_chunks = np.split(ex_tokenized, n_chunks)
+ n_splits = ex_tokenized.shape[0] // n_chunks // block_size_
+ ex_splits = [np.split(el[:n_splits * block_size_], n_splits) for el in ex_chunks]
+ ex_split = sum(ex_splits, [])
+
+ dataset = PackedDataset(filenames=filenames, n_chunks=n_chunks, block_size=block_size_, seed=12345)
+
+ for i, item in enumerate(dataset):
+ block_idxs = iter(dataset)._block_idxs
+ assert np.array_equal(item, ex_split[block_idxs[i]])
+
+
+class SimpleDataset(IterableDataset):
+ def __init__(self, start, end):
+ super().__init__()
+ self._start = start
+ self._end = end
+
+ def __iter__(self):
+ return iter(range(self._start, self._end))
+
+
+def test_combined_dataset(tmp_path):
+ from lit_llama.packed_dataset import CombinedDataset
+
+ dataset1 = SimpleDataset(0, 10)
+ dataset2 = SimpleDataset(10, 20)
+ dataset = CombinedDataset(datasets=[dataset1, dataset2], weights=[1.0, 0.0], seed=12345)
+
+ res = [el for el in dataset]
+ assert res == list(range(0, 10))
+
+ dataset1 = SimpleDataset(0, 10)
+ dataset2 = SimpleDataset(10, 20)
+ dataset = CombinedDataset(datasets=[dataset1, dataset2], weights=[0.0, 1.0], seed=12345)
+
+ res = [el for el in dataset]
+ assert res == list(range(10, 20))
+
+ dataset1 = SimpleDataset(0, 10)
+ dataset2 = SimpleDataset(10, 20)
+ dataset = CombinedDataset(datasets=[dataset1, dataset2], weights=[0.5, 0.5], seed=12345)
+
+ res = [el for el in dataset]
+ assert 9 in res or 19 in res
+ if len(res) > 10:
+ assert 0 in res and 10 in res
+
+
+def test_sharded_packed_dataset(monkeypatch):
+ import lit_llama.packed_dataset
+ from lit_llama.packed_dataset import PackedDataset
+
+ dataset_iterator_mock = MagicMock()
+ monkeypatch.setattr(lit_llama.packed_dataset, "PackedDatasetIterator", dataset_iterator_mock)
+ filenames = [str(i) for i in range(10)]
+
+ # world_size = 1, rank = 0
+ iter(PackedDataset(filenames=filenames, n_chunks=2, block_size=2))
+ assert dataset_iterator_mock.call_args[1]["filenames"] == filenames
+ dataset_iterator_mock.reset_mock()
+ # world_size = 2, rank = 0
+ iter(PackedDataset(filenames=filenames, n_chunks=2, block_size=2, num_processes=2, process_rank=0))
+ assert dataset_iterator_mock.call_args[1]["filenames"] == ["0", "2", "4", "6", "8"]
+ dataset_iterator_mock.reset_mock()
+ # world_size = 2, rank = 1
+ iter(PackedDataset(filenames=filenames, n_chunks=2, block_size=2, num_processes=2, process_rank=1))
+ assert dataset_iterator_mock.call_args[1]["filenames"] == ["1", "3", "5", "7", "9"]
+ dataset_iterator_mock.reset_mock()
+
+ # world_size = 3, rank = 0 (dataset size not cleanly divisible by world size)
+ iter(PackedDataset(filenames=filenames, n_chunks=2, block_size=2, num_processes=3, process_rank=0))
+ assert dataset_iterator_mock.call_args[1]["filenames"] == ["0", "3", "6"]
+ dataset_iterator_mock.reset_mock()
+ # world_size = 3, rank = 1 (dataset size not cleanly divisible by world size)
+ iter(PackedDataset(filenames=filenames, n_chunks=2, block_size=2, num_processes=3, process_rank=1))
+ assert dataset_iterator_mock.call_args[1]["filenames"] == ["1", "4", "7"]
+ dataset_iterator_mock.reset_mock()
+ # world_size = 3, rank = 2 (dataset size not cleanly divisible by world size)
+ iter(PackedDataset(filenames=filenames, n_chunks=2, block_size=2, num_processes=3, process_rank=2))
+ assert dataset_iterator_mock.call_args[1]["filenames"] == ["2", "5", "8"]
diff --git a/tests/test_prepare_redpajama.py b/tests/test_prepare_redpajama.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3e68a15b354138d67bf8049c929ba3aab8536fd
--- /dev/null
+++ b/tests/test_prepare_redpajama.py
@@ -0,0 +1,142 @@
+import json
+import os
+import subprocess
+import sys
+from pathlib import Path
+from unittest import mock
+from unittest.mock import Mock, call, ANY
+
+wd = (Path(__file__).parent.parent / "scripts").absolute()
+
+import requests
+
+
+def train_tokenizer(destination_path):
+ destination_path.mkdir(parents=True, exist_ok=True)
+
+ # download the tiny shakespeare dataset
+ input_file_path = destination_path / "input.txt"
+ if not input_file_path.exists():
+ data_url = "https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt"
+ with open(input_file_path, "w") as f:
+ f.write(requests.get(data_url).text)
+
+ from lit_llama import Tokenizer
+ Tokenizer.train(input=input_file_path, destination=destination_path, vocab_size=100)
+
+ return destination_path / "tokenizer.model"
+
+
+def test_prepare_sample(tmp_path):
+ sys.path.append(str(wd))
+
+ tokenizer_path = train_tokenizer(tmp_path)
+
+ sample_path = tmp_path / "sample"
+ source_path = sample_path / "source"
+ dest_path = sample_path / "dest"
+
+ source_path.mkdir(parents=True, exist_ok=True)
+
+ sample = {
+ "meta": {"some": "info"},
+ "text": "some text"
+ }
+
+ jsonl_sample = "\n".join([json.dumps(el) for el in [sample] * 2])
+
+ import prepare_redpajama
+
+ for filename in prepare_redpajama.filenames_sample:
+ with open(source_path / filename, "w") as f:
+ f.write(jsonl_sample)
+
+ prepare_redpajama.prepare(source_path=source_path, tokenizer_path=tokenizer_path, destination_path=dest_path, sample=True)
+
+ bin_files = [el.replace(".jsonl", "_0000000000.bin") for el in prepare_redpajama.filenames_sample]
+
+ assert set(os.listdir(dest_path)) == set(bin_files)
+
+ from lit_llama import Tokenizer
+ from lit_llama.packed_dataset import PackedDataset
+
+ tokenizer = Tokenizer(tokenizer_path)
+
+ # artificially set block_size to fit the text
+ block_size = len(tokenizer.encode("some text"))
+
+ for filename in bin_files:
+ filenames = [os.path.join(dest_path, filename)]
+ dataset = PackedDataset(filenames=filenames, n_chunks=1, block_size=block_size, shuffle=False)
+ dataset_iter = iter(dataset)
+ assert tokenizer.decode(next(dataset_iter)) == "some text"
+ assert tokenizer.decode(next(dataset_iter)) == "some text"
+
+
+def test_prepare_full(tmp_path):
+ sys.path.append(str(wd))
+
+ tokenizer_path = train_tokenizer(tmp_path)
+
+ full_path = tmp_path / "full"
+ source_path = full_path / "source"
+ dest_path = full_path / "dest"
+
+ source_path.mkdir(parents=True, exist_ok=True)
+
+ sample = {
+ "meta": {"some": "info"},
+ "text": "some text"
+ }
+
+ jsonl_sample = "\n".join([json.dumps(el) for el in [sample] * 2])
+
+ import prepare_redpajama
+
+ arxiv_file = source_path / "arxiv" / "arxiv_0.jsonl"
+ arxiv_file.parent.mkdir(parents=True, exist_ok=True)
+ with open(arxiv_file, "w") as f:
+ f.write(jsonl_sample)
+
+ import zstandard as zstd
+
+ cc_file = source_path / "common_crawl" / "cc_0.jsonl"
+ cc_file.parent.mkdir(parents=True, exist_ok=True)
+ with zstd.open(cc_file, "wt", encoding="utf-8") as f:
+ f.write(jsonl_sample)
+
+ filename_sets = {
+ "arxiv": "arxiv/arxiv*",
+ "common_crawl": "common_crawl/*",
+ }
+
+ with mock.patch("prepare_redpajama.filename_sets", filename_sets):
+ prepare_redpajama.prepare(source_path=source_path, tokenizer_path=tokenizer_path, destination_path=dest_path, sample=False)
+
+ all_names = prepare_redpajama.filename_sets.keys()
+ bin_files = [el + "_0000000000.bin" for el in all_names]
+
+ assert set(os.listdir(dest_path)) == set(bin_files)
+
+ from lit_llama import Tokenizer
+ from lit_llama.packed_dataset import PackedDataset
+
+ tokenizer = Tokenizer(tokenizer_path)
+
+ # artificially set block_size to fit the text
+ block_size = len(tokenizer.encode("some text"))
+
+ filenames = [os.path.join(dest_path, el) for el in bin_files]
+
+ for filename in filenames:
+ dataset = PackedDataset(filenames=[filename], n_chunks=1, block_size=block_size, shuffle=False)
+ dataset_iter = iter(dataset)
+ assert tokenizer.decode(next(dataset_iter)) == "some text"
+ assert tokenizer.decode(next(dataset_iter)) == "some text"
+
+
+def test_cli():
+ cli_path = wd / "prepare_redpajama.py"
+ output = subprocess.check_output([sys.executable, cli_path, "-h"])
+ output = str(output.decode())
+ assert 'Prepare the "Red Pajama"' in output
diff --git a/tests/test_prepare_shakespeare.py b/tests/test_prepare_shakespeare.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef6a80aaaad1e0bef1bc6b2865861f630d28be29
--- /dev/null
+++ b/tests/test_prepare_shakespeare.py
@@ -0,0 +1,23 @@
+import os
+import subprocess
+import sys
+from pathlib import Path
+
+wd = (Path(__file__).parent.parent / "scripts").absolute()
+
+
+def test_prepare(tmp_path):
+ sys.path.append(str(wd))
+
+ import prepare_shakespeare
+
+ prepare_shakespeare.prepare(tmp_path)
+
+ assert set(os.listdir(tmp_path)) == {"train.bin", "tokenizer.model", "tokenizer.vocab", "input.txt", "val.bin"}
+
+
+def test_cli():
+ cli_path = wd / "prepare_shakespeare.py"
+ output = subprocess.check_output([sys.executable, cli_path, "-h"])
+ output = str(output.decode())
+ assert 'Prepare the "Tiny Shakespeare"' in output
diff --git a/tests/test_rmsnorm.py b/tests/test_rmsnorm.py
new file mode 100644
index 0000000000000000000000000000000000000000..e88dc859a583262aac7b0900333da7d225f83187
--- /dev/null
+++ b/tests/test_rmsnorm.py
@@ -0,0 +1,15 @@
+import torch
+
+
+@torch.no_grad()
+def test_rmsnorm(lit_llama, orig_llama) -> None:
+ block_size = 16
+ vocab_size = 16
+
+ sample = torch.rand(size=(2, block_size, vocab_size), dtype=torch.float32)
+
+ eps = 1e-6
+ orig_llama_rmsnorm = orig_llama.RMSNorm(vocab_size, eps=eps)(sample)
+ llama_rmsnorm = lit_llama.RMSNorm(vocab_size, eps=eps)(sample)
+
+ assert torch.allclose(orig_llama_rmsnorm, llama_rmsnorm)
diff --git a/tests/test_rope.py b/tests/test_rope.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a42dcab96a47d5e01a1e06632f2b74f54f2c6c4
--- /dev/null
+++ b/tests/test_rope.py
@@ -0,0 +1,16 @@
+import torch
+
+@torch.no_grad()
+def test_rope(lit_llama, orig_llama) -> None:
+ torch.manual_seed(1)
+
+ bs, seq_len, n_head, n_embed = 1, 6, 2, 8
+ x = torch.randint(0, 10000, size=(bs, seq_len, n_head, n_embed // n_head)).float()
+
+ freqs_cis = orig_llama.precompute_freqs_cis(n_embed // n_head, seq_len)
+ llama_rope_cache = lit_llama.build_rope_cache(seq_len, n_embed // n_head, dtype=x.dtype, device=x.device)
+ torch.testing.assert_close(freqs_cis, torch.view_as_complex(llama_rope_cache))
+
+ llama_x_rope = lit_llama.apply_rope(x.transpose(1, 2), llama_rope_cache).transpose(1, 2)
+ orig_llama_x_rope, _ = orig_llama.apply_rotary_emb(x, x, freqs_cis)
+ torch.testing.assert_close(llama_x_rope, orig_llama_x_rope)
diff --git a/tests/test_utils.py b/tests/test_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6d7854406b153ba6411c8d00b9059207d52ea98
--- /dev/null
+++ b/tests/test_utils.py
@@ -0,0 +1,56 @@
+import tempfile
+import pathlib
+
+import torch
+
+
+class ATensor(torch.Tensor):
+ pass
+
+
+def test_lazy_load_basic(lit_llama):
+ import lit_llama.utils
+
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ m = torch.nn.Linear(5, 3)
+ path = pathlib.Path(tmpdirname)
+ fn = str(path / "test.pt")
+ torch.save(m.state_dict(), fn)
+ with lit_llama.utils.lazy_load(fn) as sd_lazy:
+ assert "NotYetLoadedTensor" in str(next(iter(sd_lazy.values())))
+ m2 = torch.nn.Linear(5, 3)
+ m2.load_state_dict(sd_lazy)
+
+ x = torch.randn(2, 5)
+ actual = m2(x)
+ expected = m(x)
+ torch.testing.assert_close(actual, expected)
+
+
+def test_lazy_load_subclass(lit_llama):
+ import lit_llama.utils
+
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ path = pathlib.Path(tmpdirname)
+ fn = str(path / "test.pt")
+ t = torch.randn(2, 3)[:, 1:]
+ sd = {
+ 1: t,
+ 2: torch.nn.Parameter(t),
+ 3: torch.Tensor._make_subclass(ATensor, t),
+ }
+ torch.save(sd, fn)
+ with lit_llama.utils.lazy_load(fn) as sd_lazy:
+ for k in sd.keys():
+ actual = sd_lazy[k]
+ expected = sd[k]
+ torch.testing.assert_close(actual._load_tensor(), expected)
+
+
+def test_find_multiple(lit_llama):
+ from lit_llama.utils import find_multiple
+
+ assert find_multiple(17, 5) == 20
+ assert find_multiple(30, 7) == 35
+ assert find_multiple(10, 2) == 10
+ assert find_multiple(5, 10) == 10
diff --git a/train_redpajama.py b/train_redpajama.py
new file mode 100644
index 0000000000000000000000000000000000000000..6313d7c85ad5a5146574362702af6cbacdab33af
--- /dev/null
+++ b/train_redpajama.py
@@ -0,0 +1,312 @@
+import os
+import math
+import glob
+import time
+from functools import partial
+from pathlib import Path
+from typing import Tuple, Optional
+
+import lightning as L
+from lightning.fabric.strategies import FSDPStrategy
+
+import torch
+from torch.utils.data import DataLoader
+from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
+
+import numpy as np
+
+from lit_llama.model import Block, LLaMA, LLaMAConfig
+from lit_llama.packed_dataset import PackedDataset, CombinedDataset
+from lit_llama.utils import save_model_checkpoint
+
+
+out_dir = "out/training"
+save_interval = 1000
+eval_interval = 1000
+eval_iters = 100
+log_interval = 1
+
+# compile = False
+
+# Hyperparameters
+learning_rate = 6e-4
+batch_size = 125
+micro_batch_size = 5
+max_iters = 600000 # num_epochs * epoch_size // devices
+weight_decay = 1e-1
+beta1 = 0.9
+beta2 = 0.95
+grad_clip = 1.0
+decay_lr = True
+warmup_iters = 2000
+lr_decay_iters = max_iters
+min_lr = 6e-5
+
+
+# Data proportions from https://arxiv.org/pdf/2302.13971.pdf Table 1
+data_config = [
+ ("arxiv", 2.5),
+ ("book", 4.5),
+ ("c4", 15.0),
+ ("cc", 67.0),
+ ("github", 4.5),
+ ("stackexchange", 2.0),
+ ("wikipedia", 4.5),
+]
+
+
+def main(
+ devices: int = 4,
+ train_data_dir: Path = "data/lit-redpajama",
+ val_data_dir: Optional[Path] = None,
+) -> None:
+ auto_wrap_policy = partial(
+ transformer_auto_wrap_policy, transformer_layer_cls={Block}
+ )
+ strategy = FSDPStrategy(
+ auto_wrap_policy=auto_wrap_policy, activation_checkpointing=Block
+ )
+
+ fabric = L.Fabric(
+ accelerator="cuda", devices=devices, precision="bf16-mixed", strategy=strategy
+ )
+ fabric.launch()
+ fabric.seed_everything(1337)
+
+ if fabric.global_rank == 0:
+ os.makedirs(out_dir, exist_ok=True)
+
+ config = LLaMAConfig.from_name("7B")
+
+ train_dataloader, val_dataloader = create_dataloaders(
+ batch_size=micro_batch_size,
+ block_size=config.block_size,
+ fabric=fabric,
+ train_data_dir=train_data_dir,
+ val_data_dir=val_data_dir,
+ seed=1338,
+ )
+ train_dataloader, val_dataloader = fabric.setup_dataloaders(train_dataloader, val_dataloader)
+
+ with fabric.device:
+ torch.set_default_dtype(torch.bfloat16)
+ model = LLaMA(config)
+ model.apply(model._init_weights)
+ torch.set_default_dtype(torch.float32)
+
+ # if compile:
+ # model = torch.compile(model)
+
+ optimizer = torch.optim.AdamW(
+ model.parameters(),
+ lr=learning_rate,
+ weight_decay=weight_decay,
+ betas=(beta1, beta2),
+ )
+
+ model, optimizer = fabric.setup(model, optimizer)
+
+ process_batch_size = batch_size // devices
+ grad_accum_steps = process_batch_size // micro_batch_size
+
+ train(fabric, model, optimizer, train_dataloader, val_dataloader, grad_accum_steps, devices)
+
+
+def train(
+ fabric: L.Fabric,
+ model: torch.nn.Module,
+ optimizer: torch.optim.Optimizer,
+ train_dataloader: DataLoader,
+ val_dataloader: Optional[DataLoader],
+ grad_accum_steps: int,
+ devices: int,
+) -> None:
+ """The training loop.
+
+ Loosely based on the nanoGPT implementation: https://github.com/karpathy/nanoGPT.
+ """
+
+ step_count = 0
+
+ step_time = 0.0
+ tokens = 0
+ tokens_sec = 0.0
+ prev_t1 = time.time()
+
+ for iter_num, train_data in enumerate(train_dataloader):
+ t0 = time.time()
+
+ # determine and set the learning rate for this iteration
+ lr = get_lr(iter_num) if decay_lr else learning_rate
+ for param_group in optimizer.param_groups:
+ param_group["lr"] = lr
+
+
+ input_ids = train_data[:, 0 : model.config.block_size].contiguous()
+ targets = train_data[:, 1 : model.config.block_size + 1].contiguous()
+
+ is_accumulating = (iter_num + 1) % grad_accum_steps != 0
+
+ with fabric.no_backward_sync(model, enabled=is_accumulating):
+ logits = model(input_ids)
+ loss = torch.nn.functional.cross_entropy(
+ logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1
+ )
+ fabric.backward(loss / grad_accum_steps)
+
+ t1 = time.time()
+
+ if not is_accumulating:
+ fabric.clip_gradients(model, optimizer, max_norm=grad_clip)
+
+ optimizer.step()
+ optimizer.zero_grad()
+ step_count += 1
+
+ t1 = time.time()
+
+ if val_dataloader is not None and step_count % eval_interval == 0:
+ val_loss = validate(fabric, model, val_dataloader)
+ fabric.print(f"step {iter_num}: val loss {val_loss:.4f}")
+ fabric.barrier()
+ fabric.log_dict(
+ {"iter": iter_num, "val_loss": val_loss, "step": step_count, "lr": lr}
+ )
+
+ if step_count % save_interval == 0:
+ fabric.print(f"Saving checkpoint to {out_dir}")
+ save_model_checkpoint(
+ fabric, model, os.path.join(out_dir, f"iter-{iter_num:06d}-ckpt.pth")
+ )
+
+ dt = t1 - t0
+
+ tokens += micro_batch_size * model.config.block_size
+ step_time += t1 - prev_t1
+ prev_t1 = t1
+
+ if iter_num % log_interval == 0:
+ tokens_sec_str = f"{tokens / step_time:.0f}" if not is_accumulating else "-"
+
+ fabric.log_dict(
+ {"iter": iter_num, "train_loss": loss, "step": step_count, "lr": lr}
+ )
+ fabric.print(
+ f"iter {iter_num}: loss {loss.item():.4f}, time: {dt*1000:.2f}ms, speed: {tokens_sec_str} toks/s/device"
+ )
+
+ if not is_accumulating:
+ tokens = 0
+ step_time = 0.0
+
+ if iter_num > max_iters:
+ break
+
+
+@torch.no_grad()
+def validate(
+ fabric: L.Fabric, model: torch.nn.Module, val_dataloader: DataLoader
+) -> torch.Tensor:
+ fabric.print("Validating ...")
+ model.eval()
+ losses = torch.zeros(eval_iters)
+ for k, val_data in enumerate(val_dataloader):
+ input_ids = val_data[:, 0 : model.config.block_size].contiguous()
+ targets = val_data[:, 1 : model.config.block_size + 1].contiguous()
+ logits = model(input_ids)
+ loss = torch.nn.functional.cross_entropy(
+ logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1
+ )
+ losses[k] = loss.item()
+ out = losses.mean()
+ model.train()
+ return out
+
+
+def create_dataloader(
+ batch_size: int,
+ block_size: int,
+ data_dir: str,
+ fabric,
+ shuffle: bool = True,
+ seed: int = 12345,
+) -> DataLoader:
+ datasets = []
+ for prefix, _ in data_config:
+ filenames = glob.glob(os.path.join(data_dir, prefix + "*"))
+ dataset = PackedDataset(
+ filenames, n_chunks=4, block_size=block_size, shuffle=shuffle, seed=seed,
+ num_processes=fabric.world_size, process_rank=fabric.global_rank,
+ )
+ datasets.append(dataset)
+
+ if not datasets:
+ raise RuntimeError(
+ f"No data found at {data_dir}. Make sure you ran prepare_redpajama.py to create the dataset."
+ )
+
+ weights = [weight for _, weight in data_config]
+ sum_weights = sum(weights)
+ weights = [el / sum_weights for el in weights]
+
+ combined_dataset = CombinedDataset(datasets=datasets, seed=seed, weights=weights)
+
+ return DataLoader(combined_dataset, batch_size=batch_size, shuffle=False, pin_memory=True)
+
+
+def create_dataloaders(
+ batch_size: int,
+ block_size: int,
+ fabric,
+ train_data_dir: str = "data/lit-redpajama",
+ val_data_dir: Optional[str] = None,
+ seed: int = 12345,
+) -> Tuple[DataLoader, DataLoader]:
+ # Increase by one because we need the next word as well
+ effective_block_size = block_size + 1
+ train_dataloader = create_dataloader(
+ batch_size=batch_size,
+ block_size=effective_block_size,
+ fabric=fabric,
+ data_dir=train_data_dir,
+ shuffle=True,
+ seed=seed,
+ )
+ val_dataloader = (
+ create_dataloader(
+ batch_size=batch_size,
+ block_size=effective_block_size,
+ fabric=fabric,
+ data_dir=val_data_dir,
+ shuffle=False,
+ seed=seed,
+ )
+ if val_data_dir
+ else None
+ )
+ return train_dataloader, val_dataloader
+
+
+# learning rate decay scheduler (cosine with warmup)
+def get_lr(it):
+ # 1) linear warmup for warmup_iters steps
+ if it < warmup_iters:
+ return learning_rate * it / warmup_iters
+ # 2) if it > lr_decay_iters, return min learning rate
+ if it > lr_decay_iters:
+ return min_lr
+ # 3) in between, use cosine decay down to min learning rate
+ decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters)
+ assert 0 <= decay_ratio <= 1
+ coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) # coeff ranges 0..1
+ return min_lr + coeff * (learning_rate - min_lr)
+
+
+if __name__ == "__main__":
+ # Uncomment this line if you see an error: "Expected is_sm80 to be true, but got false"
+ # torch.backends.cuda.enable_flash_sdp(False)
+ torch.set_float32_matmul_precision("high")
+
+ from jsonargparse.cli import CLI
+
+ CLI(main)
diff --git a/train_shakespeare.py b/train_shakespeare.py
new file mode 100644
index 0000000000000000000000000000000000000000..f33cad7c0b1ed6f1054a99fdee188f66d657beb7
--- /dev/null
+++ b/train_shakespeare.py
@@ -0,0 +1,161 @@
+"""
+This script is a placeholder for training LLaMA from scratch.
+Currently, it just trains on the Shakespeare dataset.
+"""
+
+import os
+import time
+from functools import partial
+from typing import Tuple
+
+import lightning as L
+from lightning.fabric.strategies import FSDPStrategy
+
+import torch
+from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
+
+import numpy as np
+
+from lit_llama.model import Block, LLaMA, LLaMAConfig
+from lit_llama.utils import save_model_checkpoint
+
+
+out_dir = "out/training"
+eval_interval = 2000
+eval_iters = 200
+log_interval = 1
+# compilation fails as it does not support torch.complex64 for RoPE
+# compile = False
+
+# Hyperparameters
+learning_rate = 6e-4
+batch_size = 2
+max_iters = 600000
+weight_decay = 1e-1
+beta1 = 0.9
+beta2 = 0.95
+grad_clip = 1.0
+
+# For shakespeare, choose smaller block size than vanilla LLaMA
+block_size = 1024
+
+
+def main() -> None:
+ auto_wrap_policy = partial(transformer_auto_wrap_policy, transformer_layer_cls={Block})
+ strategy = FSDPStrategy(auto_wrap_policy=auto_wrap_policy, activation_checkpointing=Block)
+
+ fabric = L.Fabric(accelerator="cuda", devices=4, precision="bf16-mixed", strategy=strategy)
+ fabric.launch()
+ fabric.seed_everything(1337 + fabric.global_rank)
+
+ if fabric.global_rank == 0:
+ os.makedirs(out_dir, exist_ok=True)
+
+ train_data, val_data = load_datasets()
+
+ config = LLaMAConfig.from_name("7B")
+ config.block_size = block_size
+ config.vocab_size = 100 # from prepare_shakespeare.py
+
+ with fabric.device:
+ model = LLaMA(config)
+
+ # if compile:
+ # model = torch.compile(model)
+
+ model = fabric.setup_module(model)
+
+ optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=weight_decay, betas=(beta1, beta2))
+ optimizer = fabric.setup_optimizers(optimizer)
+
+ train(fabric, model, optimizer, train_data, val_data)
+
+
+def train(
+ fabric: L.Fabric,
+ model: torch.nn.Module,
+ optimizer: torch.optim.Optimizer,
+ train_data: np.ndarray,
+ val_data: np.ndarray,
+) -> None:
+ """The training loop.
+
+ Loosely based on the nanoGPT implementation: https://github.com/karpathy/nanoGPT.
+ """
+
+ iter_num = 0
+
+ while True:
+ # TODO: add learning rate scheduling
+
+ # evaluate the loss on train/val sets and write checkpoints
+ if iter_num > 0 and iter_num % eval_interval == 0:
+ val_loss = validate(fabric, model, val_data)
+ fabric.print(f"step {iter_num}: val loss {val_loss:.4f}")
+ fabric.print(f"Saving checkpoint to {out_dir}")
+ save_model_checkpoint(fabric, model, os.path.join(out_dir, f"iter-{iter_num:06d}-ckpt.pth"))
+
+ t0 = time.time()
+
+ input_ids, targets = get_batch(
+ fabric,
+ train_data,
+ block_size=model.config.block_size, # type: ignore[union-attr,arg-type]
+ )
+ logits = model(input_ids)
+ loss = torch.nn.functional.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
+
+ fabric.backward(loss)
+
+ # TODO: Gradient clipping
+ # if grad_clip != 0.0:
+ # fabric.clip_gradients(model, optimizer, max_norm=grad_clip)
+
+ optimizer.step()
+ optimizer.zero_grad()
+
+ dt = time.time() - t0
+ if iter_num % log_interval == 0:
+ fabric.print(f"iter {iter_num}: loss {loss.item():.4f}, time: {dt*1000:.2f}ms")
+ iter_num += 1
+
+ if iter_num > max_iters:
+ break
+
+
+@torch.no_grad()
+def validate(fabric: L.Fabric, model: torch.nn.Module, val_data: np.ndarray) -> torch.Tensor:
+ fabric.print("Validating ...")
+ model.eval()
+ losses = torch.zeros(eval_iters)
+ for k in range(eval_iters):
+ input_ids, targets = get_batch(
+ fabric,
+ val_data,
+ block_size=model.config.block_size, # type: ignore[union-attr,arg-type]
+ )
+ logits = model(input_ids)
+ loss = torch.nn.functional.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
+ losses[k] = loss.item()
+ out = losses.mean()
+ model.train()
+ return out
+
+
+def get_batch(fabric: L.Fabric, data: np.ndarray, block_size: int) -> Tuple[torch.Tensor, torch.Tensor]:
+ ix = torch.randint(len(data) - block_size, (batch_size,))
+ x = torch.stack([torch.from_numpy((data[i : i + block_size]).astype(np.int64)) for i in ix])
+ y = torch.stack([torch.from_numpy((data[i + 1 : i + 1 + block_size]).astype(np.int64)) for i in ix])
+ x, y = fabric.to_device((x.pin_memory(), y.pin_memory()))
+ return x, y
+
+
+def load_datasets(data_dir: str = "data/shakespeare") -> Tuple[np.ndarray, np.ndarray]:
+ train_data = np.memmap(os.path.join(data_dir, "train.bin"), dtype=np.uint16, mode="r")
+ val_data = np.memmap(os.path.join(data_dir, "val.bin"), dtype=np.uint16, mode="r")
+ return train_data, val_data
+
+
+if __name__ == "__main__":
+ torch.set_float32_matmul_precision("high")
+ main()