diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000000000000000000000000000000000000..38790238fcd2123afcbf8c4dd41416f53b85b6ad --- /dev/null +++ b/.clang-format @@ -0,0 +1,155 @@ +--- +# Refer to the following link for the explanation of each params: +# http://releases.llvm.org/8.0.0/tools/clang/docs/ClangFormatStyleOptions.html +Language: Cpp +# BasedOnStyle: Google +AccessModifierOffset: -4 +AlignAfterOpenBracket: Align +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: false +AlignEscapedNewlines: Left +AlignOperands: true +AlignTrailingComments: true +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortBlocksOnASingleLine: true +AllowShortCaseLabelsOnASingleLine: true +AllowShortFunctionsOnASingleLine: All +AllowShortIfStatementsOnASingleLine: true +AllowShortLoopsOnASingleLine: true +# This is deprecated +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: true +BinPackArguments: false +BinPackParameters: false +BraceWrapping: + AfterClass: false + AfterControlStatement: false + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + AfterExternBlock: false + BeforeCatch: false + BeforeElse: false + IndentBraces: false + # disabling the below splits, else, they'll just add to the vertical length of source files! + SplitEmptyFunction: false + SplitEmptyRecord: false + SplitEmptyNamespace: false +BreakBeforeBinaryOperators: None +BreakBeforeBraces: WebKit +BreakBeforeInheritanceComma: false +BreakInheritanceList: BeforeColon +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BreakConstructorInitializers: BeforeColon +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: true +ColumnLimit: 100 +CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false +ConstructorInitializerAllOnOneLineOrOnePerLine: true +# Kept the below 2 to be the same as `IndentWidth` to keep everything uniform +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DerivePointerAlignment: false +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: true +ForEachMacros: + - foreach + - Q_FOREACH + - BOOST_FOREACH +IncludeBlocks: Preserve +IncludeCategories: + - Regex: '^' + Priority: 2 + - Regex: '^<.*\.h>' + Priority: 1 + - Regex: '^<.*' + Priority: 2 + - Regex: '.*' + Priority: 3 +IncludeIsMainRegex: '([-_](test|unittest))?$' +IndentCaseLabels: true +IndentPPDirectives: None +IndentWidth: 4 +IndentWrappedFunctionNames: false +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLinesAtTheStartOfBlocks: false +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBinPackProtocolList: Never +ObjCBlockIndentWidth: 4 +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PenaltyBreakAssignment: 4 +PenaltyBreakBeforeFirstCallParameter: 1 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyBreakTemplateDeclaration: 10 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 200 +PointerAlignment: Left +RawStringFormats: + - Language: Cpp + Delimiters: + - cc + - CC + - cpp + - Cpp + - CPP + - 'c++' + - 'C++' + CanonicalDelimiter: '' + - Language: TextProto + Delimiters: + - pb + - PB + - proto + - PROTO + EnclosingFunctions: + - EqualsProto + - EquivToProto + - PARSE_PARTIAL_TEXT_PROTO + - PARSE_TEST_PROTO + - PARSE_TEXT_PROTO + - ParseTextOrDie + - ParseTextProtoOrDie + CanonicalDelimiter: '' + BasedOnStyle: google +# Enabling comment reflow causes doxygen comments to be messed up in their formats! +ReflowComments: true +SortIncludes: true +SortUsingDeclarations: true +SpaceAfterCStyleCast: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeCpp11BracedList: false +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true +SpaceBeforeParens: ControlStatements +SpaceBeforeRangeBasedForLoopColon: true +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +SpacesInAngles: false +SpacesInContainerLiterals: true +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: Cpp11 +StatementMacros: + - Q_UNUSED + - QT_REQUIRE_VERSION +# Be consistent with indent-width, even for people who use tab for indentation! +TabWidth: 4 +UseTab: Never diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000000000000000000000000000000000..70c72ec2a23bc1889be29f5e8aba957727fc1a00 --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +20B_checkpoints/ diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..4f1a448d0fc15960d77f7901d971cb44a07d69bf 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +images/memory_profiling.png filter=lfs diff=lfs merge=lfs -text diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000000000000000000000000000000000..3cb082e801db942fea244b77a74aabed58a19815 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @Quentin-Anthony diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000000000000000000000000000000000..643d547c77f6c4a12dc2a511106eb7a5c30e9190 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,34 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: bug +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Proposed solution** +If you have an idea for how we can fix this problem, describe it here. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Environment (please complete the following information):** + - GPUs: +- Configs: + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000000000000000000000000000000000..e301d68ce745bee7fff0e32ff94d22e15adba266 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: feature request +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/workflows/.cpu_ci_on_pr.yml b/.github/workflows/.cpu_ci_on_pr.yml new file mode 100644 index 0000000000000000000000000000000000000000..43ce025c0d89923e2ee7181afe5da60e80033aa2 --- /dev/null +++ b/.github/workflows/.cpu_ci_on_pr.yml @@ -0,0 +1,19 @@ +# This file is hidden (.cpu_cpi_on_pr.yml) to minimize the number of runner minutes consumed. + +name: "Pull Request CPU Tests" + +on: + pull_request: + paths: # job only triggers when the PR changes files under megatron directory + - "megatron/**" + +jobs: + run-tests: + runs-on: ubuntu-22.04 # ubuntu-latest currently points to ubuntu-22.04 but 24.04 is in beta - recommend testing on 24.04 and then changing instead of using ubuntu-latest + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + - name: Run CPU tests + uses: ./tests/cpu_tests + with: + target_test_ref: ${{ github.event.pull_request.base.sha }} diff --git a/.github/workflows/coverity_scan.yml b/.github/workflows/coverity_scan.yml new file mode 100644 index 0000000000000000000000000000000000000000..128d279ccf691b53086d67c393454e0d9fff3c65 --- /dev/null +++ b/.github/workflows/coverity_scan.yml @@ -0,0 +1,61 @@ +name: Coverity +on: + workflow_dispatch: + inputs: + build_version: + description: "Version of GPT-NeoX being submitted for scan" + required: false + default: "GPT-NeoX build version" + build_description: + description: "Description of the current build" + required: false + default: "Current build of GPT-NeoX" + +jobs: + coverity: + + runs-on: ubuntu-latest + + env: + COV_USER: ${{ secrets.COV_USER }} # needs to be an email with access to the Coverity stream - add to secrets/actions + COVERITY_PROJECT: ${{ secrets.COVERITY_PROJECT }} + COVERITY_TOKEN: ${{ secrets.COVERITY_TOKEN }} # you can get this token from Coverity stream dashboard: + # https://scan.coverity.com/projects/?tab=project_settings + + steps: + - uses: actions/checkout@v2 + with: + path: gpt-neox + + - name: Install utils + run: | + sudo apt update -y && sudo apt upgrade -y + sudo apt install curl jq wget -y + + - name: Coverity Download + run: | + wget https://scan.coverity.com/download/linux64 --post-data "token=$COVERITY_TOKEN&project=$COVERITY_PROJECT" -O coverity_tool.tgz --no-verbose + mkdir $GITHUB_WORKSPACE/coverity && tar xvf coverity_tool.tgz -C $GITHUB_WORKSPACE/coverity --strip-components=1 + $GITHUB_WORKSPACE/coverity/bin/cov-configure --python + $GITHUB_WORKSPACE/coverity/bin/cov-configure --gcc + + - name: Coverity Scan and Upload + run: | + set -x + pushd $GITHUB_WORKSPACE + cd $GITHUB_WORKSPACE/gpt-neox + $GITHUB_WORKSPACE/coverity/bin/cov-build --dir $GITHUB_WORKSPACE/cov-int --no-command --fs-capture-search ./ + popd + tar caf build-results.bz2 cov-int + curl --form token=$COVERITY_TOKEN \ + --form email=$COV_USER \ + --form file=@build-results.bz2 \ + --form version="${{ inputs.build_version }}" \ + --form description="${{ inputs.build_description }}" \ + https://scan.coverity.com/builds?project=$COVERITY_PROJECT + + - name: Upload Scan Build as Artifact + uses: actions/upload-artifact@v3 + with: + name: coverity-build-${{ github.sha }} + path: build-results.bz2 diff --git a/.github/workflows/cpu_ci.yml b/.github/workflows/cpu_ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..6910b8a1c0d7fe536e02112eed139c0c35beacb7 --- /dev/null +++ b/.github/workflows/cpu_ci.yml @@ -0,0 +1,34 @@ +name: "Run CPU Tests" + +on: "push" + +jobs: + run-tests: + #runs-on: ubuntu-latest + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v3 + + - name: Install Python + uses: actions/setup-python@v4 + with: + python-version: "3.8" + cache: "pip" + cache-dependency-path: "**/requirements*.txt" + + - name: Upgrade Pip + run: python -m pip install --upgrade pip + + - name: Install Dependencies + run: | + sudo apt-get install libopenmpi-dev -y + pip install torch==1.8.2 torchvision==0.9.2 torchaudio==0.8.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cpu + pip install -r requirements/requirements.txt + pip install -r requirements/requirements-dev.txt + pip install -r requirements/requirements-wandb.txt + + - name: Prepare Data + run: python prepare_data.py + + - name: Run CPU Tests + run: PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python pytest tests -m cpu diff --git a/.github/workflows/cpu_ci_dispatch.yml b/.github/workflows/cpu_ci_dispatch.yml new file mode 100644 index 0000000000000000000000000000000000000000..38485d6a644ba23ea3dae492141441d8adb67999 --- /dev/null +++ b/.github/workflows/cpu_ci_dispatch.yml @@ -0,0 +1,20 @@ +name: "Workflow Dispatch CPU Tests" + +on: + workflow_dispatch: + inputs: + ref: + description: 'Target ref / SHA to run tests against' + required: true + default: 'main' + +jobs: + run-tests: + runs-on: ubuntu-22.04 + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + - name: Run CPU tests + uses: ./tests/cpu_tests + with: + target_test_ref: ${{ inputs.ref }} diff --git a/.github/workflows/docker_build.yml b/.github/workflows/docker_build.yml new file mode 100644 index 0000000000000000000000000000000000000000..abd0cdfa92b4e286639fa168113942139f5f5fc8 --- /dev/null +++ b/.github/workflows/docker_build.yml @@ -0,0 +1,50 @@ +name: docker_build + +on: + push: + branches: + - '**' + +jobs: + main: + runs-on: ubuntu-latest + steps: + - + name: Checkout + uses: actions/checkout@v2 + + - + name: Docker meta + id: docker_meta + uses: crazy-max/ghaction-docker-meta@v1 + with: + images: leogao2/gpt-neox # list of Docker images to use as base name for tags + tag-sha: true # add git short SHA as Docker tag + + - + name: Set up QEMU + uses: docker/setup-qemu-action@v1 + + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + + - + name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - + name: Build and push + id: docker_build + uses: docker/build-push-action@v2 + with: + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.docker_meta.outputs.tags }} + labels: ${{ steps.docker_meta.outputs.labels }} + + - + name: Image digest + run: echo ${{ steps.docker_build.outputs.digest }} diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml new file mode 100644 index 0000000000000000000000000000000000000000..7b06256bf47bdabe86ea359a169f38cdcc4bd0ff --- /dev/null +++ b/.github/workflows/pull_request.yml @@ -0,0 +1,60 @@ +name: Pull Request + +#on: [pull_request, workflow_dispatch] +on: workflow_dispatch + +jobs: + pre-commit: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v4 + with: + python-version: "3.10.14" + cache: "pip" + cache-dependency-path: "**/requirements*.txt" + # Need the right version of clang-format + - run: pip install -r requirements/requirements-dev.txt + - uses: pre-commit/action@v2.0.3 + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - + name: Docker build + id: docker_build + uses: docker/build-push-action@v2 + + update-documentation: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.ref}} + - run: | + rm megatron/__init__.py + pip install shortuuid + rm megatron/neox_arguments/__init__.py + python configs/gen_docs.py + git config user.name github-actions + git config user.email github-actions@github.com + git add configs/neox_arguments.md + git commit -m "Update NeoXArgs docs automatically" + git push + run-tests: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v4 + with: + python-version: "3.10.13" + cache-dependency-path: "**/requirements*.txt" + - name: prepare data + run: python3 prepare_data.py + - name: install pytest + run: python3 -m pip install pytest pytest-forked pyyaml requests wandb + - name: install torch + run: python3 -m pip install torch + - name: install requirements + run: pip install -r requirements/requirements.txt + - name: Run Tests + run: pytest --forked tests diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..dbc83e94949cae5f9bf5fb30f829473742f3f00b --- /dev/null +++ b/.gitignore @@ -0,0 +1,157 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# wandb logs +wandb/ + +# data files +data/**/*.idx +data/**/*.bin +data/**/*.json* +data/**/*.txt +data/**/*.gz +data/**/*.zip +data/**/*.np* +data/**/*.npy +checkpoints/ +.vscode/ +*.pt +*.ckpt + +#test logs +test_checkpoint/ +test_logs/ +logs/ +tensorboard/ +src/ + +# test data files +tests/data/*.bin +tests/data/*.idx diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2492553068017e8ab94836b42bc1e33903b7dbf5 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,40 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.1.0 + hooks: + - id: check-case-conflict + - id: check-json + - id: check-symlinks + - id: check-yaml + - id: destroyed-symlinks + - id: end-of-file-fixer + exclude: ^(docs/CNAME/|configs/neox_arguments.md) + - id: fix-byte-order-marker + - id: fix-encoding-pragma + args: [--remove] + - id: mixed-line-ending + args: [--fix=lf] + - id: requirements-txt-fixer + - id: trailing-whitespace + exclude: ^(docs/CNAME/|configs/neox_arguments.md) + - repo: https://gitlab.com/daverona/pre-commit/cpp + rev: 0.8.0 + hooks: + - id: clang-format # formatter of C/C++ code based on a style guide: LLVM, Google, Chromium, Mozilla, and WebKit available + args: [] + + - repo: https://github.com/psf/black + rev: 22.3.0 + hooks: + - id: black + language_version: python3 + - repo: https://github.com/codespell-project/codespell + rev: v2.1.0 + hooks: + - id: codespell + args: [ + '--ignore-words-list=reord,dout,te', # Word used in error messages that need rewording. te --> transformerengine + --check-filenames, + --check-hidden, + ] + exclude: tests/data/hf_cache/tokenizer/gpt2.json diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 0000000000000000000000000000000000000000..248d01c81ca0c11403f2094e22c35785fc77294f --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,79 @@ +# YAML 1.2 +--- +authors: + - affiliation: EleutherAI + family-names: Andonian + given-names: Alex + - affiliation: EleutherAI + family-names: Anthony + given-names: Quentin + - affiliation: EleutherAI + family-names: Biderman + given-names: Stella + - affiliation: EleutherAI + family-names: Black + given-names: Sid + - affiliation: EleutherAI + family-names: Gali + given-names: Preetham + - affiliation: EleutherAI + family-names: Gao + given-names: Leo + - affiliation: EleutherAI + family-names: Hallahan + given-names: Eric + - affiliation: EleutherAI + family-names: Levy-Kramer + given-names: Josh + - affiliation: EleutherAI + family-names: Leahy + given-names: Connor + - affiliation: EleutherAI + family-names: Nestler + given-names: Lucas + - affiliation: EleutherAI + family-names: Parker + given-names: Kip + - affiliation: EleutherAI + family-names: Pieler + given-names: Michael + - affiliation: EleutherAI + family-names: Phang + given-names: Jason + - affiliation: EleutherAI + family-names: Purohit + given-names: Shivanshu + - affiliation: EleutherAI + family-names: Schoelkopf + given-names: Hailey + - affiliation: EleutherAI + family-names: Stander + given-names: Dashiell + - affiliation: EleutherAI + family-names: Songz + given-names: Tri + - affiliation: EleutherAI + family-names: Tigges + given-names: Curt + - affiliation: EleutherAI + family-names: Thérien + given-names: Benjamin + - affiliation: EleutherAI + family-names: Wang + given-names: Phil + - affiliation: EleutherAI + family-names: Weinbach + given-names: Samuel +cff-version: "1.1.0" +keywords: + - "Transformers" + - "Massive language model" + - "Autoregressive language model" +license: "Apache-2.0" +message: "If you use this software, please cite it using these metadata." +repository-code: "https://www.github.com/eleutherai/gpt-neox" +title: "GPT-NeoX: Large Scale Autoregressive Language Modeling in PyTorch" +version: "2.0.0" +doi: "10.5281/zenodo.5879544" +date-released: 2021-08-23 +... diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..ee633e8c5b4ed5387e511f430adb910068ba6e24 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,86 @@ +# Contributing +GPT-NeoX welcomes your contributions! + +## Prerequisites +GPT-NeoX uses [pre-commit](https://pre-commit.com/) to ensure that formatting is +consistent across GPT-NeoX. First, ensure that `pre-commit` is installed with +`pip install pre-commit`. Next, the pre-commit hooks must be installed once +before commits can be made: +```bash +pre-commit install +``` +Please install `clang-format` from Conda: +```bash +conda install clang-format +``` + +Afterwards, our suite of formatting tests run automatically before each `git commit`. You +can also run these manually: +```bash +pre-commit run --all-files +``` +If a formatting test fails, it will fix the modified code in place and abort +the `git commit`. After looking over the changes, you can `git add ` +and then repeat the previous `git commit` command. + + +## Testing +GPT-NeoX tracks two types of tests: unit tests and more costly model convergence tests. +Unit tests are found in `tests/unit/` and the model convergence tests are found in +`tests/model/`. + +### Unit Tests +[PyTest](https://docs.pytest.org/en/latest/) is used to execute tests. PyTest can be +installed from PyPI via `pip install pytest`. Simply invoke `pytest --forked` to run the +unit tests: +```bash +pytest --forked tests/unit/ +``` +You can also provide the `-v` flag to `pytest` to see additional information about the +tests. Note that [pytest-forked](https://github.com/pytest-dev/pytest-forked) and the +`--forked` flag are required to test CUDA functionality in distributed tests. + +### Model Tests +To execute model tests, first install GPT-NeoX. Next, execute the model test driver: +```bash +cd tests/model/ +pytest run_sanity_check.py +``` +Note that the `--forked` flag is not necessary for the model tests. + +## Contributor License Agreement +This project welcomes contributions and suggestions. Most contributions require you to +agree to a Contributor License Agreement (CLA) declaring that you have the right to, and +actually do, grant us the rights to use your contribution. For details, visit +https://cla-assistant.io/EleutherAI/gpt-neox. + +When you submit a pull request, a CLA bot will automatically determine whether you need +to provide a CLA and decorate the PR appropriately (e.g., status check, comment). Simply +follow the instructions provided by the bot. You will only need to do this once across +all repos using our CLA. + +## New Feature Contribution Guidelines +Unlike bug fix or improving existing feature (where users usually directly submit a PR and we review it), adding a new feature to GPT-NeoX requires several steps: (1) proposal and discussion, (2) implementation and verification, (3) release and maintenance. This general guideline applies to all new feature contributions. Core GPT-NeoX team member contributions may complete step 1 internally. + +### Step 1: Proposal and Discussion +We ask users to first post your intended feature in an issue. This issue needs to include: + +* A description of the proposed feature. +* A motivation of why it will be useful to GPT-NeoX users. +* A rough design of how you implement the feature inside GPT-NeoX. +* (Important) Results or planned experiments to demonstrate the effectiveness and correctness of the feature. + * If the feature only affects performance and does not affect training convergence, we require testing on a fraction of training to demonstrate that the training/validation loss are consistent with baseline, and that the performance is better than baseline. + * If the feature does affect training convergence, we require testing the whole training to demonstrate that the feature achieves better/on-par final model quality and training performance compared to baseline. + +Based on the issue we shall discuss the merit of the new feature and decide whether to accept or decline the proposal. Once accepted and after we confirm the design and implementation plan, we are ready for step 2. + +### Step 2: Implementation and Verification +The contributor will proceed and implement the feature, and the GPT-NeoX team will provide guidance/helps as needed. The required deliverables include: + +* A PR to [EleutherAI/GPT-NeoX](https://github.com/EleutherAI/gpt-neox) including (1) the feature implementation (2) unit tests (3) documentation (4) example usage. +* In the implementation (code, documentation, tutorial), we require the feature author to record their GitHub username as a contact method for future questions/maintenance. + +After receiving the PRs, we will review them and merge them after necessary tests/fixes. + +### Step 3: Release and Maintenance +After the PRs are merged, we will announce the feature on our website (with credit to the feature author). We ask the feature author to commit to the maintenance of the feature. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..c338baceceef59bfccfe76138f69a0e7323047f9 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,90 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM nvcr.io/nvidia/pytorch:24.02-py3 + +ENV DEBIAN_FRONTEND=noninteractive + +# metainformation +LABEL org.opencontainers.image.version = "2.0" +LABEL org.opencontainers.image.authors = "contact@eleuther.ai" +LABEL org.opencontainers.image.source = "https://www.github.com/eleutherai/gpt-neox" +LABEL org.opencontainers.image.licenses = " Apache-2.0" +LABEL org.opencontainers.image.base.name="nvcr.io/nvidia/pytorch:24.02-py3" + +#### System package (uses default Python 3 version in Ubuntu 20.04) +RUN apt-get update -y && \ + apt-get install -y \ + python3-pip sudo pdsh \ + htop tmux zstd software-properties-common \ + nfs-common pdsh cmake htop iftop iotop ssh \ + iputils-ping net-tools libcupti-dev libmlx4-1 infiniband-diags ibutils \ + rdmacm-utils perftest rdma-core && \ + update-alternatives --install /usr/bin/python python /usr/bin/python3 1 && \ + update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \ + python -m pip install --upgrade pip && \ + python -m pip install gpustat + +### SSH +RUN mkdir /var/run/sshd && \ + # Prevent user being kicked off after login + sed -i 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' /etc/pam.d/sshd && \ + echo 'AuthorizedKeysFile .ssh/authorized_keys' >> /etc/ssh/sshd_config && \ + echo 'PasswordAuthentication yes' >> /etc/ssh/sshd_config && \ + # FIX SUDO BUG: https://github.com/sudo-project/sudo/issues/42 + echo "Set disable_coredump false" >> /etc/sudo.conf + +# Expose SSH port +EXPOSE 22 + +# Needs to be in docker PATH if compiling other items & bashrc PATH (later) +ENV PATH=/usr/local/mpi/bin:${PATH} \ + LD_LIBRARY_PATH=/usr/local/lib:/usr/local/mpi/lib:/usr/local/mpi/lib64:${LD_LIBRARY_PATH} + +# Create a wrapper for OpenMPI to allow running as root by default +RUN mv /usr/local/mpi/bin/mpirun /usr/local/mpi/bin/mpirun.real && \ + echo '#!/bin/bash' > /usr/local/mpi/bin/mpirun && \ + echo 'mpirun.real --allow-run-as-root --prefix /usr/local/mpi "$@"' >> /usr/local/mpi/bin/mpirun && \ + chmod a+x /usr/local/mpi/bin/mpirun + +#### User account +RUN useradd --create-home --uid 1000 --shell /bin/bash mchorse && \ + usermod -aG sudo mchorse && \ + echo "mchorse ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + +## SSH config and bashrc +RUN mkdir -p /home/mchorse/.ssh /job && \ + echo 'Host *' > /home/mchorse/.ssh/config && \ + echo ' StrictHostKeyChecking no' >> /home/mchorse/.ssh/config && \ + echo 'export PDSH_RCMD_TYPE=ssh' >> /home/mchorse/.bashrc && \ + echo 'export PATH=/home/mchorse/.local/bin:$PATH' >> /home/mchorse/.bashrc && \ + echo 'export PATH=/usr/local/mpi/bin:$PATH' >> /home/mchorse/.bashrc && \ + echo 'export LD_LIBRARY_PATH=/usr/local/lib:/usr/local/mpi/lib:/usr/local/mpi/lib64:$LD_LIBRARY_PATH' >> /home/mchorse/.bashrc + +#### Python packages +COPY requirements/* ./ +RUN python -m pip install --no-cache-dir -r requirements.txt && pip install -r requirements-onebitadam.txt +RUN python -m pip install -r requirements-wandb.txt +RUN python -m pip install protobuf==3.20.* + +COPY megatron/fused_kernels/ /megatron/fused_kernels +WORKDIR /megatron/fused_kernels +RUN python setup.py install + +# Clear staging +RUN mkdir -p /tmp && chmod 0777 /tmp + +#### SWITCH TO mchorse USER +USER mchorse +WORKDIR /home/mchorse diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..b7224a6144483944a22605dff0d6e90a471d21aa --- /dev/null +++ b/LICENSE @@ -0,0 +1,467 @@ + Apache License + Version 2.0, January 2024 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-- + +This repository also contains code from Hugging Face Inc., Google Research, +and Facebook (from their Fairseq project). Files from these +organizations have notices at the top of each file. Below are licenses +used in those files, as indicated. + + +------------- LICENSE FOR NVIDIA code -------------- + + +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of NVIDIA CORPORATION nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------- LICENSE FOR huggingface and Google Research code -------------- + + + Apache License + Version 2.0, January 2024 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------- LICENSE FOR Facebook Fairseq code -------------- + +MIT License + +Copyright (c) Facebook, Inc. and its affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..f44791183e894ad5f6bb72cf4498db5c8566d498 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,2 @@ +include megatron/data/Makefile +include megatron/data/helpers.cpp diff --git a/README-MUP.md b/README-MUP.md new file mode 100644 index 0000000000000000000000000000000000000000..b2cb01a5538b3820c9c46db87e6bb484a4d4379f --- /dev/null +++ b/README-MUP.md @@ -0,0 +1,49 @@ +# How to use Mup (https://github.com/microsoft/mup) + +## Add mup neox args to your config + +``` +# mup + +"use-mup": true, + +"save-base-shapes": false, # this only needs to be enabled once in order to generate the base-shapes-file on each rank + +"base-shapes-file": "base-shapes", # load base shapes from this file + +"coord-check": false, # generate coord check plots to verify mup's implementation in neox + +# mup hp search + +"mup-init-scale": 1.0, + +"mup-attn-temp": 1.0, + +"mup-output-temp": 1.0, + +"mup-embedding-mult": 1.0, + +"mup-rp-embedding-mult": 1.0, +``` + +## Generate base shapes + +1. Set use-mup to true +2. Set save-base-shapes to true +3. Run once. gpt-neox will instantiate a base model and a delta model, then save one file per rank named .. gpt-neox will exit immediately. +4. Set save-base-shapes to false + +## Generate coord check plots (optional) + +1. Keep use-mup true +2. Set coord-check to true +3. Run once. gpt-neox will output jpg images similar to https://github.com/microsoft/mutransformers/blob/main/README.md#coord-check. gpt-neox will exit immediately +4. Set coord-check to false + +## Tune mup hyperparameters and LR + +The values under `mup hp search` were added and correspond to appendix F.4 from https://arxiv.org/pdf/2203.03466.pdf. These and LR are tuned with a random search using the scaled-up config (tested with 6-7B.yml) but with hidden-size set to the value from the scaled-down config (125M.yml). + +## Transfer + +With the best LR set and the best mup HPs set, revert the value of hidden-size in the scaled-up config and run again. diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0d4e2939fddb50d6ba89a7de8c966980aa66cb09 --- /dev/null +++ b/README.md @@ -0,0 +1,863 @@ +[![GitHub issues](https://img.shields.io/github/issues/EleutherAI/gpt-neox)](https://github.com/EleutherAI/gpt-neox/issues) +[Weights & Biases monitoring](https://wandb.ai/eleutherai/neox) + +# GPT-NeoX + +This repository records [EleutherAI](https://www.eleuther.ai)'s library for training large-scale language models on GPUs. Our current framework is based on NVIDIA's [Megatron Language Model](https://github.com/NVIDIA/Megatron-LM) and has been augmented with techniques from [DeepSpeed](https://www.deepspeed.ai) as well as some novel optimizations. We aim to make this repo a centralized and accessible place to gather techniques for training large-scale autoregressive language models, and accelerate research into large-scale training. This library is in widespread use in [academic, industry, and government labs](https://github.com/EleutherAI/gpt-neox#adoption-and-publications), including by researchers at Oak Ridge National Lab, CarperAI, Stability AI, Together.ai, Korea University, Carnegie Mellon University, and the University of Tokyo among others. Uniquely among similar libraries GPT-NeoX supports a wide variety of systems and hardwares, including launching via Slurm, MPI, and the IBM Job Step Manager, and has been run at scale on [AWS](https://aws.amazon.com/), [CoreWeave](https://www.coreweave.com/), [ORNL Summit](https://www.olcf.ornl.gov/summit/), [ORNL Frontier](https://www.olcf.ornl.gov/frontier/), [LUMI](https://www.lumi-supercomputer.eu/), and others. + +**If you are not looking to train models with billions of parameters from scratch, this is likely the wrong library to use. For generic inference needs, we recommend you use the Hugging Face `transformers` library instead which supports GPT-NeoX models.** + +## Why GPT-NeoX? + +GPT-NeoX leverages many of the same features and technologies as the popular Megatron-DeepSpeed library but with substantially increased usability and novel optimizations. Major features include: +* Distributed training with ZeRO and 3D parallelism +* A wide variety of systems and hardwares, including launching via Slurm, MPI, and the IBM Job Step Manager, and has been run at scale on [AWS](https://aws.amazon.com/), [CoreWeave](https://www.coreweave.com/), Oak Ridge's [Summit](https://www.olcf.ornl.gov/summit/) and [Frontier](https://www.olcf.ornl.gov/frontier/), [Pacific Northwest National Laboratory](https://hpc.pnl.gov/index.shtml), Argonne's [Polaris](https://docs.alcf.anl.gov/polaris/data-science-workflows/applications/gpt-neox/), [LUMI](https://www.lumi-supercomputer.eu/), and more. +* Cutting edge architectural innovations including rotary and alibi positional embeddings, parallel feedforward attention layers, and flash attention. +* Predefined configurations for popular architectures including Pythia, PaLM, Falcon, and LLaMA 1 \& 2 +* Curriculum Learning +* Easy connections with the open source ecosystem, including Hugging Face's [tokenizers](https://github.com/huggingface/tokenizers) and [transformers](https://github.com/huggingface/transformers/) libraries, monitor experiments via [WandB](https://wandb.ai/site)/[Comet](https://www.comet.com/site/)/TensorBoard, and evaluation via our [Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness). + +## News +**[9/9/2024]** We now support preference learning via [DPO](https://arxiv.org/abs/2305.18290), [KTO](https://arxiv.org/abs/2402.01306), and reward modeling + +**[9/9/2024]** We now support integration with [Comet ML](https://www.comet.com/site/), a machine learning monitoring platform + +**[5/21/2024]** We now support [RWKV](https://www.rwkv.com/) with pipeline parallelism!. See the PRs for [RWKV](https://github.com/EleutherAI/gpt-neox/pull/1198) and [RWKV+pipeline](https://github.com/EleutherAI/gpt-neox/pull/1221) + +**[3/21/2024]** We now support Mixture-of-Experts (MoE) + +**[3/17/2024]** We now support AMD MI250X GPUs + +**[3/15/2024]** We now support [Mamba](https://github.com/state-spaces/mamba) with tensor parallelism! See [the PR](https://github.com/EleutherAI/gpt-neox/pull/1184) + +**[8/10/2023]** We now support checkpointing with AWS S3! Activate with the `s3_path` config option (for more detail, see [the PR](https://github.com/EleutherAI/gpt-neox/pull/1010)) + +**[9/20/2023]** As of https://github.com/EleutherAI/gpt-neox/pull/1035, we have deprecated Flash Attention 0.x and 1.x, and migrated support to Flash Attention 2.x. We don't believe this will cause problems, but if you have a specific use-case that requires old flash support using the latest GPT-NeoX, please raise an issue. + +**[8/10/2023]** We have experimental support for LLaMA 2 and Flash Attention v2 supported in our [math-lm](https://github.com/EleutherAI/math-lm) project that will be upstreamed later this month. + +**[5/17/2023]** After fixing some miscellaneous bugs we now fully support bf16. + +**[4/11/2023]** We have upgraded our Flash Attention implementation to now support Alibi positional embeddings. + +**[3/9/2023]** We have released GPT-NeoX 2.0.0, an upgraded version built on the latest DeepSpeed which will be regularly synced with going forward. + +## Versions + +Prior to 3/9/2023, GPT-NeoX relied on [DeeperSpeed](https://github.com/EleutherAI/DeeperSpeed), which was based on an old version of DeepSpeed (0.3.15). In order to migrate to the latest upstream DeepSpeed version while allowing users to access the old versions of GPT-NeoX and DeeperSpeed, we have introduced two versioned releases for both libraries: + +- Version 2.0 of [GPT-NeoX](https://github.com/EleutherAI/gpt-neox/releases/tag/v2.0) and [DeeperSpeed](https://github.com/EleutherAI/DeeperSpeed/releases/tag/v2.0) are the latest versions built on the latest DeepSpeed, and will be maintained going forward. +- Version 1.0 of [GPT-NeoX](https://github.com/EleutherAI/gpt-neox/releases/tag/v1.0) and [DeeperSpeed](https://github.com/EleutherAI/DeeperSpeed/releases/tag/v1.0) maintain snapshots of the old stable versions that [GPT-NeoX-20B](https://arxiv.org/abs/2204.06745) and the [Pythia Suite](https://github.com/EleutherAI/pythia) were trained on. + +# Contents + +- [GPT-NeoX](#gpt-neox) + * [Why GPT-NeoX?](#why-gpt-neox) + * [News](#news) + * [Versions](#versions) +- [Contents](#contents) +- [Quick Start](#quick-start) + * [Environment and Dependencies](#environment-and-dependencies) + + [Host Setup](#host-setup) + + [Flash Attention](#flash-attention) + + [Multi-Node Launching](#multi-node-launching) + + [Containerized Setup](#containerized-setup) + * [Usage](#usage) +- [Configuration](#configuration) + * [Mixture of Experts](#mixture-of-experts) +- [Datasets](#datasets) + * [Preconfigured Datasets](#preconfigured-datasets) + * [Using Custom Data](#using-custom-data) +- [Training and Finetuning](#training-and-finetuning) + * [Pretrained Models](#pretrained-models) + + [GPT-NeoX-20B](#gpt-neox-20b) + + [Pythia](#pythia) + + [Polyglot](#polyglot) +- [Inference](#inference) +- [Evaluation](#evaluation) +- [Exporting to Hugging Face](#exporting-to-hugging-face) +- [Monitoring](#monitoring) + * [Weights and Biases](#weights-and-biases) + * [TensorBoard](#tensorboard) +- [Running on multi-node](#running-on-multi-node) +- [Profiling](#profiling) +- [Adoption and Publications](#adoption-and-publications) + * [Publications](#publications) + * [Models](#models) + + [English LLMs](#english-llms) + + [Non-English LLMs](#non-english-llms) + + [Code Models](#code-models) + + [Other Modalities](#other-modalities) +- [Administrative Notes](#administrative-notes) + * [Citing GPT-NeoX](#citing-gpt-neox) + * [Contributing](#contributing) + * [Licensing](#licensing) + * [Acknowledgements](#acknowledgements) + +# Quick Start + +## Environment and Dependencies + +### Host Setup + +First make sure you are in an environment with Python 3.8 with an appropriate version of PyTorch 1.8 or later installed. **Note:** Some of the libraries that GPT-NeoX depends on have not been updated to be compatible with Python 3.10+. Python 3.9 appears to work, but this codebase has been developed and tested for Python 3.8. + +To install the remaining basic dependencies, run: + +```bash +pip install -r requirements/requirements.txt +pip install -r requirements/requirements-wandb.txt # optional, if logging using WandB +pip install -r requirements/requirements-tensorboard.txt # optional, if logging via tensorboard +pip install -r requirements/requirements-comet.txt # optional, if logging via Comet +``` + +from the repository root. + +> [!Warning] +> Our codebase relies on [DeeperSpeed](https://github.com/EleutherAI/DeeperSpeed), our fork of the [DeepSpeed](https://github.com/microsoft/DeepSpeed) library with some added changes. We strongly recommend using Anaconda, a virtual machine, or some other form of environment isolation before continuing. Failure to do so may cause other repositories that rely on DeepSpeed to break. + + + +### Fused Kernels +We now support AMD GPUs (MI100, MI250X) through JIT fused-kernel compilation. Fused kernels will be built and loaded as needed. To avoid waiting during job launching, you can also do the following for manual pre-build: + +```python +python +from megatron.fused_kernels import load +load() +``` +This will automatically adapts building process over different GPU vendors (AMD, NVIDIA) without platform specific code changes. To further test fused kernels using `pytest`, use `pytest tests/model/test_fused_kernels.py` + +### Flash Attention + +To use [Flash-Attention](https://github.com/HazyResearch/flash-attention), install the additional dependencies in `./requirements/requirements-flashattention.txt` and set the attention type in your configuration accordingly (see [configs](./configs/)). This can provide significant speed-ups over regular attention on certain GPU architectures, including Ampere GPUs (such as A100s); see the repository for more details. + + +### Multi-Node Launching + +NeoX and Deep(er)Speed support training on multiple different nodes and you have the option of using a variety of different launchers to orchestrate multi-node jobs. + +In general there needs to be a "hostfile" somewhere accessible with the format: + +```bash +node1_ip slots=8 +node2_ip slots=8 +``` + +where the first column contains the IP address for each node in your setup and the number of slots is the number of GPUs that node has access to. In your config you must pass in the path to the hostfile with `"hostfile": "/path/to/hostfile"`. Alternatively the path to the hostfile can be in the environment variable `DLTS_HOSTFILE`. + +#### pdsh + +`pdsh` is the default launcher, and if you're using `pdsh` then all you must do (besides ensuring that pdsh is installed in your environment) is set `{"launcher": "pdsh"}` in your config files. + +#### MPI + +If using MPI then you must specify the MPI library (DeepSpeed/GPT-NeoX currently supports `mvapich`, `openmpi`, `mpich`, and `impi`, though `openmpi` is the most commonly used and tested) as well as pass the `deepspeed_mpi` flag in your config file: + +```json +{ + "launcher": "openmpi", + "deepspeed_mpi": true +} +``` + +With your environment properly set up and the correct configuration files you can use `deepy.py` like a normal python script and start (for example) a training job with: + +`python3 deepy.py train.py /path/to/configs/my_model.yml` + +#### Slurm + +Using Slurm can be slightly more involved. Like with MPI, you must add the following to your config: + +```json +{ + "launcher": "slurm", + "deepspeed_slurm": true +} +``` +If you do not have ssh access to the compute nodes in your Slurm cluster you need to add `{"no_ssh_check": true}` + +#### (Advanced) Custom Launching + +There are many cases where the above default launching options are not sufficient + +- Many clusters have their own unique job scheduler or specific MPI/Slurm arguments necessary for launching jobs such as [Summit JSRun](https://docs.olcf.ornl.gov/systems/summit_user_guide.html#job-launcher-jsrun) or [LLNL Flux](https://computing.llnl.gov/projects/flux-building-framework-resource-management) +- While the above Slurm/MPI/pdsh default options are enough for most job runs, advanced users may want to add arguments for optimization or debugging purposes + +In these cases, you will need to modify the DeepSpeed [multinode runner](https://github.com/microsoft/DeepSpeed/blob/17957728c0362bf8ae70feca308e491e55ef9feb/deepspeed/launcher/multinode_runner.py) utility to support your usecase. Broadly, these enhancements fall under two categories: + +##### 1. Adding a Launcher (e.g. [JSRun](https://docs.olcf.ornl.gov/systems/summit_user_guide.html#job-launcher-jsrun), [Flux](https://computing.llnl.gov/projects/flux-building-framework-resource-management), etc) + +In this case, you must add a new multinode runner class to `deepspeed/launcher/multinode_runner.py` and expose it as a configuration option in GPT-NeoX. Examples on how we did this for [Summit JSRun](https://docs.olcf.ornl.gov/systems/summit_user_guide.html#job-launcher-jsrun) are in [this DeeperSpeed commit](https://github.com/EleutherAI/DeeperSpeed/commit/9aed6c8500d7c492d85c5c88687322dbda70e370) and [this GPT-NeoX commit](https://github.com/EleutherAI/gpt-neox/commit/3782c7ae60f8624e566e3879b89bb09e8b59b869), respectively. + +##### 2. Modifying Run Command or Environment Variables + +We have encountered many cases where we wish to modify the MPI/Slurm run command for an optimization or to debug (e.g. to modify the [Slurm srun CPU binding](https://slurm.schedmd.com/srun.html#OPT_cpu-bind) or to tag MPI logs with the rank). In this case, you must modify the multinode runner class' run command under its `get_cmd` method (e.g. [mpirun_cmd](https://github.com/microsoft/DeepSpeed/blob/17957728c0362bf8ae70feca308e491e55ef9feb/deepspeed/launcher/multinode_runner.py#L135-L147) for OpenMPI). Examples on how we did this to provide optimized and rank-tagged run commands using Slurm and OpenMPI for the Stability cluster are in [this DeeperSpeed branch](https://github.com/microsoft/DeepSpeed/compare/master...EleutherAI:DeeperSpeed:v2.0-stability) + + +#### Hostfile Generation + +In general you will not be able to have a single fixed hostfile, so you need to have a script to generate one dynamically when your job starts. An example script to dynamically generate a hostfile using [Slurm](https://slurm.schedmd.com/documentation.html) and 8 GPUs per node is: + +```bash +#!/bin/bash +GPUS_PER_NODE=8 +mkdir -p /sample/path/to/hostfiles +# need to add the current slurm jobid to hostfile name so that we don't add to previous hostfile +hostfile=/sample/path/to/hostfiles/hosts_$SLURM_JOBID +# be extra sure we aren't appending to a previous hostfile +rm $hostfile &> /dev/null +# loop over the node names +for i in `scontrol show hostnames $SLURM_NODELIST` +do + # add a line to the hostfile + echo $i slots=$GPUS_PER_NODE >>$hostfile +done +``` + +`$SLURM_JOBID` and `$SLURM_NODELIST` being environment variables Slurm will create for you. See the [sbatch documentation](https://slurm.schedmd.com/sbatch.html#SECTION_OUTPUT-ENVIRONMENT-VARIABLES) for a full list of available Slurm environment variables set at job creation time. + +#### Job Launching + +Then you can create an [sbatch](https://slurm.schedmd.com/sbatch.html) script from which to kick off your GPT-NeoX job. A bare-bones sbatch script on a Slurm-based cluster with 8 GPUs per node would look like this: + +```bash +#!/bin/bash +#SBATCH --job-name="neox" +#SBATCH --partition=your-partition +#SBATCH --nodes=1 +#SBATCH --ntasks-per-node=8 +#SBATCH --gres=gpu:8 + +# Some potentially useful distributed environment variables +export HOSTNAMES=`scontrol show hostnames "$SLURM_JOB_NODELIST"` +export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1) +export MASTER_PORT=12802 +export COUNT_NODE=`scontrol show hostnames "$SLURM_JOB_NODELIST" | wc -l` + +# Your hostfile creation script from above +./write_hostfile.sh +# Tell DeepSpeed where to find our generated hostfile via DLTS_HOSTFILE +export DLTS_HOSTFILE=/sample/path/to/hostfiles/hosts_$SLURM_JOBID + +# Launch training +python3 deepy.py train.py /sample/path/to/your/configs/my_model.yml + +``` + +You can then kick off a training run with `sbatch my_sbatch_script.sh` + + +### Containerized Setup + +We also provide a Dockerfile and docker-compose configuration if you prefer to run NeoX in a container. + +Requirements to run the container are to have appropriate GPU drivers, an up-to-date installation of Docker, and [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) installed. To test if your installation is good you can use their "sample workload", which is: + +``` +docker run --rm --runtime=nvidia --gpus all ubuntu nvidia-smi +``` + +Provided that will run, you need to export NEOX_DATA_PATH and NEOX_CHECKPOINT_PATH in your environment to specify your data directory and directory for storing and loading checkpoints: + +``` +export NEOX_DATA_PATH=/mnt/sda/data/enwiki8 #or wherever your data is stored on your system +export NEOX_CHECKPOINT_PATH=/mnt/sda/checkpoints +``` + +And then, from the gpt-neox directory, you can build the image and run a shell in a container with + +``` +docker compose run gpt-neox bash +``` + +After the build, you should be able to do this: +``` +mchorse@537851ed67de:~$ echo $(pwd) +/home/mchorse +mchorse@537851ed67de:~$ ls -al +total 48 +drwxr-xr-x 1 mchorse mchorse 4096 Jan 8 05:33 . +drwxr-xr-x 1 root root 4096 Jan 8 04:09 .. +-rw-r--r-- 1 mchorse mchorse 220 Feb 25 2020 .bash_logout +-rw-r--r-- 1 mchorse mchorse 3972 Jan 8 04:09 .bashrc +drwxr-xr-x 4 mchorse mchorse 4096 Jan 8 05:35 .cache +drwx------ 3 mchorse mchorse 4096 Jan 8 05:33 .nv +-rw-r--r-- 1 mchorse mchorse 807 Feb 25 2020 .profile +drwxr-xr-x 2 root root 4096 Jan 8 04:09 .ssh +drwxrwxr-x 8 mchorse mchorse 4096 Jan 8 05:35 chk +drwxrwxrwx 6 root root 4096 Jan 7 17:02 data +drwxr-xr-x 11 mchorse mchorse 4096 Jan 8 03:52 gpt-neox +``` + +For a long-running job, you should run + +``` +docker compose up -d +``` + +to run the container in detached mode, and then, in a separate terminal session, run + +``` +docker compose exec gpt-neox bash +``` + +You can then run any job you want from inside the container. + +Concerns when running for a long time or in detached mode include + - You will have to terminate the container manually when you are no longer using it + - If you want processes to continue running when your shell session ends, you will need to background them. + - If you then want logging, you will have to make sure to pipe logs to disk, and set up wandb and/or Comet logging. + +If you prefer to run the prebuilt container image from dockerhub, you can run the docker compose commands with ```-f docker-compose-dockerhub.yml``` instead, e.g., + +``` +docker compose run -f docker-compose-dockerhub.yml gpt-neox bash +``` + +## Usage + +All functionality should be launched using `deepy.py`, a wrapper around the `deepspeed` launcher. + +We currently offer three main functions: +1. `train.py` is used for training and finetuning models. +2. `eval.py` is used to evaluate a trained model using the [language model evaluation harness](https://github.com/EleutherAI/lm-evaluation-harness). +3. `generate.py` is used to sample text from a trained model. + +which can be launched with: + +```bash +./deepy.py [script.py] [./path/to/config_1.yml] [./path/to/config_2.yml] ... [./path/to/config_n.yml] +``` + +For example, to launch training you can run +```bash +./deepy.py train.py ./configs/20B.yml ./configs/local_cluster.yml +``` + +For more details on each entry point, see the [Training and Finetuning](#training-and-finetuning), [Inference](#inference) and [Evaluation](#evaluation) respectively. + +# Configuration + +GPT-NeoX parameters are defined in a YAML configuration file which is passed to the deepy.py launcher. We have provided some example .yml files in [configs](./configs/), showing a diverse array of features and model sizes. + +These files are generally complete, but non-optimal. For example, depending on your specific GPU configuration, you may need to change some settings such as `pipe-parallel-size`, `model-parallel-size` to increase or decrease the degree of parallelisation, `train_micro_batch_size_per_gpu` or `gradient-accumulation-steps` to modify batch size related settings, or the `zero_optimization` dict to modify how optimizer states are parallelised across workers. + +For a more detailed guide to the features available and how to configure them, see [the configuration README](configs/README.md), and for documentation of every possible argument, see [configs/neox_arguments.md](configs/neox_arguments.md). + +## Mixture of Experts + +GPT-NeoX includes multiple expert implementations for MoE. To select between them, specify `moe_type` of `megablocks` (default) or `deepspeed`. + +Both are based on the DeepSpeed MoE parallelism framework, which supports tensor-expert-data parallelism. +Both allow you to toggle between token-dropping and dropless (default, and this is what Megablocks was designed for). +Sinkhorn routing to come soon! + +For an example of a basic complete configuration, see configs/125M-dmoe.yml (for Megablocks dropless) or configs/125M-moe.yml. + +Most MoE related configuration arguments are prefixed with `moe`. Some common configuration parameters and their defaults are as follows: + +``` +moe_type: megablocks +moe_num_experts: 1 # 1 disables MoE. 8 is a reasonable value. +moe_loss_coeff: 0.1 +expert_interval: 2 # See details below +enable_expert_tensor_parallelism: false # See details below +moe_expert_parallel_size: 1 # See details below +moe_token_dropping: false +``` + +DeepSpeed can be further configured with the following: + +``` +moe_top_k: 1 +moe_min_capacity: 4 +moe_train_capacity_factor: 1.0 # Setting to 1.0 +moe_eval_capacity_factor: 1.0 # Setting to 1.0 +``` + +One MoE layer is present every `expert_interval` transformer layers including the first, so with 12 layers total: + +``` +0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 +``` + +Experts would be in these layers: + +``` +0, 2, 4, 6, 8, 10 +``` + +By default, we use expert-data parallelism, so any available tensor parallelism (`model_parallel_size`) will be used for expert routing. For instance, given the following: + +``` +expert_parallel_size: 4 +model_parallel_size: 2 # aka tensor parallelism +``` + +With 32 GPUs, the behavior will be look like: + +- In non-expert layers: + - Tensor parallelism is 2. (There are 32 / 2 = 16 such tensor parallel groups, each of size 2.) + - Data parallelism implicitly becomes 32 / 2 = 16. +- In expert layers: + - There is no tensor parallelism. + - Expert parallelism is 4. (There are 32 / 4 = 8 expert parallel groups, each of size 4.) + - Data parallelism implicitly becomes 32 / 4 = 8. Some cross-node token routing happens as a result of this redivision of data parallelism between 16 and 8. To avoid it, ensure that `expert_parallel_size == model_parallel_size`. + +Setting `enable_expert_tensor_parallelism` enables tensor-expert-data (TED) parallelism. The way to interpret the above would then be: + +- In non-expert layers: same as before. +- In expert layers: + - Tensor parallelism is 2. (There are 32 / 2 = 16 tensor parallel groups, each of size 2.) + - Expert parallelism is 4. (There are 32 / 4 = 8 expert parallel groups, each of size 4.) + - Data parallelism implicitly becomes 32 / (2 * 4) = 4. Again, cross-node token routing happens. To avoid, ensure `expert_parallel_size == 1` or `model_parallel_size == 1`. + +So note that DP must be divisible by (MP * EP). For more details, see the [TED paper]. + +Pipeline parallelism is not yet supported - coming soon! + +[TED paper]: https://arxiv.org/abs/2303.06318 + +# Datasets + +## Preconfigured Datasets + +Several preconfigured datasets are available, including most components from [the Pile](https://arxiv.org/abs/2101.00027), as well as the Pile train set itself, for straightforward tokenization using the `prepare_data.py` entry point. + +E.G, to download and tokenize the enwik8 dataset with the GPT2 Tokenizer, saving them to `./data` you can run: + +``` +python prepare_data.py -d ./data +``` + +or a single shard of the pile (`pile_subset`) with the GPT-NeoX-20B tokenizer (assuming you have it saved at `./20B_checkpoints/20B_tokenizer.json`): + +``` +python prepare_data.py -d ./data -t HFTokenizer --vocab-file ./20B_checkpoints/20B_tokenizer.json pile_subset +``` + +The tokenized data will be saved out to two files: `[data-dir]/[dataset-name]/[dataset-name]_text_document.bin`and `[data-dir]/[dataset-name]/[dataset-name]_text_document.idx`. You will need to add the prefix that both these files share to your training configuration file under the `data-path` field. E.G: + +```yaml + "data-path": "./data/enwik8/enwik8_text_document", +``` + +## Using Custom Data + +To prepare your own dataset for training with custom data, format it as one large [jsonl](https://jsonlines.org/)-formatted file with each item in the list of dictionaries being a separate document. The document text should be grouped under one JSON key, i.e `"text"`. Any auxiliary data stored in other fields will not be used. + +Next make sure to download the GPT2 tokenizer vocab, and merge files from the following links: + +- Vocab: https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json +- Merge: https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt + +Or use the 20B tokenizer (for which only a single Vocab file is needed): + +- Vocab: https://the-eye.eu/public/AI/models/GPT-NeoX-20B/slim_weights/20B_tokenizer.json + +(alternatively, you can provide any tokenizer file that can be loaded by Hugging Face's tokenizers library with the `Tokenizer.from_pretrained()` command) + +You can now pretokenize your data using `tools/datasets/preprocess_data.py`, the arguments for which are detailed below: + +``` +usage: preprocess_data.py [-h] --input INPUT [--jsonl-keys JSONL_KEYS [JSONL_KEYS ...]] [--num-docs NUM_DOCS] --tokenizer-type {HFGPT2Tokenizer,HFTokenizer,GPT2BPETokenizer,CharLevelTokenizer} [--vocab-file VOCAB_FILE] [--merge-file MERGE_FILE] [--append-eod] [--ftfy] --output-prefix OUTPUT_PREFIX + [--dataset-impl {lazy,cached,mmap}] [--workers WORKERS] [--log-interval LOG_INTERVAL] + +optional arguments: + -h, --help show this help message and exit + +input data: + --input INPUT Path to input jsonl files or lmd archive(s) - if using multiple archives, put them in a comma separated list + --jsonl-keys JSONL_KEYS [JSONL_KEYS ...] + space separate listed of keys to extract from jsonl. Default: text + --num-docs NUM_DOCS Optional: Number of documents in the input data (if known) for an accurate progress bar. + +tokenizer: + --tokenizer-type {HFGPT2Tokenizer,HFTokenizer,GPT2BPETokenizer,CharLevelTokenizer} + What type of tokenizer to use. + --vocab-file VOCAB_FILE + Path to the vocab file + --merge-file MERGE_FILE + Path to the BPE merge file (if necessary). + --append-eod Append an token to the end of a document. + --ftfy Use ftfy to clean text + +output data: + --output-prefix OUTPUT_PREFIX + Path to binary output file without suffix + --dataset-impl {lazy,cached,mmap} + Dataset implementation to use. Default: mmap + +runtime: + --workers WORKERS Number of worker processes to launch + --log-interval LOG_INTERVAL + Interval between progress updates + +``` + +For example: + +```bash +python tools/datasets/preprocess_data.py \ + --input ./data/mydataset.jsonl.zst \ + --output-prefix ./data/mydataset \ + --vocab ./data/gpt2-vocab.json \ + --merge-file gpt2-merges.txt \ + --dataset-impl mmap \ + --tokenizer-type GPT2BPETokenizer \ + --append-eod +``` + +You would then run training with the following settings added to your configuration file: + +```yaml + "data-path": "data/mydataset_text_document", +``` + +# Training and Finetuning + +Training is launched using `deepy.py`, a wrapper around DeepSpeed's launcher, which launches the same script in parallel across many GPUs / nodes. + +The general usage pattern is: + +```bash +python ./deepy.py train.py [path/to/config1.yml] [path/to/config2.yml] ... +``` + +You can pass in an arbitrary number of configs which will all be merged at runtime. + +You can also optionally pass in a config prefix, which will assume all your configs are in the same folder and append that prefix to their path. + +For example: + +```bash +python ./deepy.py train.py -d configs 125M.yml local_setup.yml +``` + +This will deploy the `train.py` script on all nodes with one process per GPU. The worker nodes and number of GPUs are specified in the `/job/hostfile` file (see [parameter documentation](configs/README.md)), or can simply be passed in as the `num_gpus` arg if running on a single node setup. + +Although this is not strictly necessary, we find it useful to define the model parameters in one config file (e.g `configs/125M.yml`) and the data path parameters in another (e.g `configs/local_setup.yml`). + + +## Pretrained Models + +### GPT-NeoX-20B + +GPT-NeoX-20B is a 20 billion parameter autoregressive language model trained on [the Pile](https://arxiv.org/abs/2101.00027). Technical details about GPT-NeoX-20B can be found in [the associated paper](https://arxiv.org/abs/2204.06745). The configuration file for this model is both available at [`./configs/20B.yml`](./configs/20B.yml) and included in the download links below. + +[Slim weights](https://the-eye.eu/public/AI/models/GPT-NeoX-20B/slim_weights/) - (No optimizer states, for inference or finetuning, 39GB) + +To download from the command line to a folder named `20B_checkpoints`, use the following command: + +```bash +wget --cut-dirs=5 -nH -r --no-parent --reject "index.html*" https://the-eye.eu/public/AI/models/GPT-NeoX-20B/slim_weights/ -P 20B_checkpoints +``` + +[Full weights](https://the-eye.eu/public/AI/models/GPT-NeoX-20B/full_weights/) - (Including optimizer states, 268GB) + +To download from the command line to a folder named `20B_checkpoints`, use the following command: + +```bash +wget --cut-dirs=5 -nH -r --no-parent --reject "index.html*" https://the-eye.eu/public/AI/models/GPT-NeoX-20B/full_weights/ -P 20B_checkpoints +``` + +Weights can be alternatively be downloaded using a BitTorrent client. Torrent files can be downloaded here: [slim weights](https://the-eye.eu/public/AI/models/GPT-NeoX-20B/slim_weights.torrent), [full weights](https://the-eye.eu/public/AI/models/GPT-NeoX-20B/full_weights.torrent). + +We additionally have 150 checkpoints saved throughout training, one every 1,000 steps. We are working on figuring out how to best serve these at scale, but in the meanwhile people interested in working with the partially trained checkpoints can email us at contact@eleuther.ai to arrange access. + +### Pythia + +The Pythia Scaling Suite is a suite of models ranging from 70M parameters to 12B parameters trained on [the Pile](https://pile.eleuther.ai) intended to promote research on interpretability and training dynamics of large language models. Further details about the project and links to the models can be found in the [in the paper](https://arxiv.org/abs/2304.01373) and [on the project's GitHub](https://github.com/EleutherAI/pythia). + +### Polyglot + +The Polyglot Project is an effort to train powerful non-English pretrained language models to promote the accessibility of this technology to researchers outside the dominant powerhouses of machine learning. EleutherAI has trained and released 1.3B, 3.8B, and 5.8B parameter Korean language models, the largest of which outpreforms all other publicly available language models on Korean language tasks. Further details about the project and links to the models can be found [here](https://github.com/EleutherAI/polyglot). + +# Inference + +**For most uses we recommend deploying models trained using the GPT-NeoX library via the Hugging Face Transformers library which is better optimized for inference.** + +We support three types of generation from a pretrained model: +1. Unconditional generation +2. Conditional generation based on an input read from a file +3. Interactive generation, which allows for multiple rounds of back-and-forth between a user and the language model via a command line interface + +All three types of text generation can be launched via `python ./deepy.py generate.py -d configs 125M.yml local_setup.yml text_generation.yml` with the appropriate values set in `configs/text_generation.yml`. + +# Evaluation + +GPT-NeoX supports evaluation on downstream tasks through the [language model evaluation harness](https://github.com/EleutherAI/lm-evaluation-harness). + +To evaluate a trained model on the evaluation harness, simply run: + +```bash +python ./deepy.py eval.py -d configs your_configs.yml --eval_tasks task1 task2 ... taskn +``` + +where `--eval_tasks` is a list of evaluation tasks followed by spaces, e.g `--eval_tasks lambada hellaswag piqa sciq`. For details of all tasks available, refer to the [lm-evaluation-harness repo](https://github.com/EleutherAI/lm-evaluation-harness). + +# Exporting to Hugging Face + +GPT-NeoX is optimized heavily for training only, and GPT-NeoX model checkpoints are not compatible out of the box with other deep learning libraries. To make models easily loadable and shareable with end users, and for further exporting to various other frameworks, GPT-NeoX supports checkpoint conversion to the [Hugging Face Transformers](https://arxiv.org/abs/1910.03771) format. + +Though NeoX supports a number of different architectural configurations, including AliBi positional embeddings, not all of these configurations map cleanly onto the supported configurations within Hugging Face Transformers. + +NeoX supports export of compatible models into the following architectures: +- GPTNeoXForCausalLM +- LlamaForCausalLM +- MistralForCausalLM + +Training a model which does not fit into one of these Hugging Face Transformers architectures cleanly will require writing custom modeling code for the exported model. + +To convert a GPT-NeoX library checkpoint to Hugging Face-loadable format, run: +```bash +python ./tools/ckpts/convert_neox_to_hf.py --input_dir /path/to/model/global_stepXXX --config_file your_config.yml --output_dir hf_model/save/location --precision {auto,fp16,bf16,fp32} --architecture {neox,mistral,llama} +``` + +Then to upload a model to [the Hugging Face Hub](https://huggingface.co/), run: +```bash +huggingface-cli login +python ./tools/ckpts/upload.py +``` +and input the requested information, including HF hub user token. + +### Importing Models Into GPT-NeoX + +NeoX supplies several utilities for converting a pretrained model checkpoint into a format that can be trained within the library. + +The following models or model families can be loaded in GPT-NeoX: +- Llama 1 +- Llama 2 +- CodeLlama +- Mistral-7b-v0.1 + +We provide two utilities for converting from two different checkpoint formats into a format compatible with GPT-NeoX. + +To convert a Llama 1 or Llama 2 checkpoint distributed by Meta AI from its original file format (downloadable [here](https://github.com/facebookresearch/llama) or [here](https://huggingface.co/meta-llama/Llama-2-7b)) into the GPT-NeoX library, run + +``` +python tools/ckpts/convert_raw_llama_weights_to_neox.py --input_dir /path/to/model/parent/dir/7B --model_size 7B --output_dir /path/to/save/ckpt --num_output_shards (--pipeline_parallel if pipeline-parallel-size >= 1) +``` + + +To convert from a Hugging Face model into a NeoX-loadable, run `tools/ckpts/convert_hf_to_sequential.py`. See documentation within that file for further options. + + +# Monitoring + +In addition to storing logs locally, we provide built-in support for two popular experiment monitoring frameworks: [Weights & Biases](https://wandb.ai/site), [TensorBoard](https://www.tensorflow.org/tensorboard/), and [Comet](https://www.comet.com/site) + +## Weights and Biases + +[Weights & Biases to record our experiments](https://wandb.ai/eleutherai/neox) is a machine learning monitoring platform. To use wandb to monitor your gpt-neox experiments: +1. Create an account at https://wandb.ai/site to generate your API key +2. Log into Weights & Biases on your machine—you can do this by executing `wandb login`—your runs will automatically be recorded. +3. Dependencies required for wandb monitoring can be found in and installed from `./requirements/requirements-wandb.txt`. An example config is provided in `./configs/local_setup_wandb.yml`. +4. There are two optional fields associated with Weights & Biases: wandb_group allows you to name the run group and wandb_team allows you to assign your runs to an organization or team account. An example config is provided in `./configs/local_setup_wandb.yml`. + +## TensorBoard + +We support using TensorBoard via the tensorboard-dir field. Dependencies required for TensorBoard monitoring can be found in and installed from `./requirements/requirements-tensorboard.txt`. + +## Comet + +[Comet](https://www.comet.com/site) is a machine learning monitoring platform. To use comet to monitor your gpt-neox experiments: +1. Create an account at https://www.comet.com/login to generate your API key. +2. Once generated, link your API key at runtime by running `comet login` or passing `export COMET_API_KEY=` +3. Install `comet_ml` and any dependency libraries via `pip install -r requirements/requirements-comet.txt` +4. Enable Comet with `use_comet: True`. You can also customize where data is being logged with `comet_workspace` and `comet_project`. A full example config with comet enabled is provided in `configs/local_setup_comet.yml`. +5. Run your experiment, and monitor metrics in the Comet workspace that you passed! + +# Running on multi-node + +If you need to supply a hostfile for use with the MPI-based DeepSpeed launcher, you can set the environment variable `DLTS_HOSTFILE` to point to the hostfile. + +# Profiling + +We support profiling with Nsight Systems, the PyTorch Profiler, and PyTorch Memory Profiling. + +## Nsight Systems Profiling + +To use the Nsight Systems profiling, set config options `profile`, `profile_step_start`, and `profile_step_stop` (see [here](https://github.com/EleutherAI/gpt-neox/blob/main/configs/neox_arguments.md) for argument usage, and [here](https://github.com/EleutherAI/gpt-neox/blob/main/configs/prof.yml) for a sample config). + +To populate nsys metrics, launch training with: + +``` +nsys profile -s none -t nvtx,cuda -o --force-overwrite true \ +--capture-range=cudaProfilerApi --capture-range-end=stop python $TRAIN_PATH/deepy.py \ +$TRAIN_PATH/train.py --conf_dir configs +``` + +The generated output file can then by viewed with the Nsight Systems GUI: + +![nsight-prof](images/nsight_profiling.png) + +## PyTorch Profiling + +To use the built-in PyTorch profiler, set config options `profile`, `profile_step_start`, and `profile_step_stop` (see [here](https://github.com/EleutherAI/gpt-neox/blob/main/configs/neox_arguments.md) for argument usage, and [here](https://github.com/EleutherAI/gpt-neox/blob/main/configs/prof.yml) for a sample config). + +The PyTorch profiler will save traces to your `tensorboard` log directory. You can view these traces within +TensorBoard by following the steps [here](https://pytorch.org/tutorials/intermediate/tensorboard_profiler_tutorial.html). + +![torch-prof](images/pytorch_profiling.png) + +## PyTorch Memory Profiling + +To use PyTorch Memory Profiling, set config options `memory_profiling` and `memory_profiling_path` (see [here](https://github.com/EleutherAI/gpt-neox/blob/main/configs/neox_arguments.md) for argument usage, and [here](https://github.com/EleutherAI/gpt-neox/blob/main/configs/prof.yml) for a sample config). + +![mem-prof](images/memory_profiling.png) + +View the generated profile with the [memory_viz.py](https://github.com/pytorch/pytorch/blob/main/torch/cuda/_memory_viz.py) script. Run with: + +``` +python _memory_viz.py trace_plot -o trace.html +``` + +# Adoption and Publications + +The GPT-NeoX library was been widely adopted by academic and industry researchers and ported on to many HPC systems. + +If you have found this library useful in your research, please reach out and let us know! We would love to add you to our lists. + +## Publications + +EleutherAI and our collaborators have used it in the following publications: + - **Sid Black**, **Stella Biderman**, **Eric Hallahan**, **Quentin Anthony**, **Leo Gao**, **Laurence Golding**, **Horace He**, **Connor Leahy**, **Kyle McDonell**, **Jason Phang**, **Michael Pieler**, **Shivanshu Purohit**, **Laria Reynolds**, **Jon Tow**, **Ben Wang**, and **Samuel Weinbach**. "[GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745)." In *Proceedings of the ACL Workshop on Challenges \& Perspectives in Creating Large Language Models*, 2022. + - **Stella Biderman**, **Hailey Schoelkopf**, **Quentin Anthony**, **Herbie Bradley**, **Kyle O'Brien**, **Eric Hallahan**, **Mohammad Aflah Khan**, **Shivanshu Purohit**, **USVSN Sai Prashanth**, Edward Raff, **Aviya Skowron**, **Lintang Sutawika**, **Oskar van der Wal**. "[Pythia: A suite for analyzing large language models across training and scaling](https://arxiv.org/abs/2304.01373)." In _International Conference on Machine Learning_, pp. 2397-2430. _PMLR_, 2023. + - Zhangir Azerbayev, Bartosz Piotrowski, **Hailey Schoelkopf**, Edward W. Ayers, Dragomir Radev, and Jeremy Avigad. "[Proofnet: Autoformalizing and formally proving undergraduate-level mathematics](https://arxiv.org/abs/2302.12433). *arXiv preprint arXiv:2302.12433*, 2023. + - **Stella Biderman**, **USVSN Sai Prashanth**, **Lintang Sutawika**, **Hailey Schoelkopf**, **Quentin Anthony**, **Shivanshu Purohit**, and Edward Raff. "[Emergent and predictable memorization in large language models.](https://arxiv.org/abs/2304.11158)" In _Neural Information Processing Systems_, 2023. + - **Hyunwoong Ko**, **Kichang Yang**, **Minho Ryu**, **Taekyoon Choi**, **Seungmu Yang,** and Sungho Park. "[A Technical Report for Polyglot-Ko: Open-Source Large-Scale Korean Language Models](https://arxiv.org/abs/2306.02254)." *arXiv preprint arXiv:2306.02254*, 2023. + - Kshitij Gupta, Benjamin Thérien, Adam Ibrahim, Mats Leon Richter, **Quentin Anthony**, Eugene Belilovsky, Irina Rish, and Timothée Lesort. "[Continual Pre-Training of Large Language Models: How to re-warm your model?](https://arxiv.org/abs/2308.04014)" In _Workshop on Efficient Systems for Foundation Models @ ICML_, 2023. + - **Zhangir Azerbayev**, **Hailey Schoelkopf**, Keiran Paster, Marco Dos Santos, Stephen McAleer, Albert Q Jiang, Jia Deng, **Stella Biderman**, and Sean Welleck. "[Llemma: An open language model for mathematics]([https://arxiv.org/abs/2308.04014](https://arxiv.org/abs/2310.10631))" In _Math-AI Workshop @ NeurIPS_, 2023. + - Alexander Havrilla, Maksym Zhuravinskyi, Duy Phung, Aman Tiwari, Jonathan Tow, **Stella Biderman**, **Quentin Anthony**, and **Louis Castricato**. "[trlX: A Framework for Large Scale Reinforcement Learning from Human Feedback](https://aclanthology.org/2023.emnlp-main.530/)." In _Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing_, 2023. + - **Quentin Anthony**, **Jacob Hatef**, Deepak Narayanan, **Stella Biderman**, Stas Bekman, Junqi Yin, Aamir Shafi, Hari Subramoni, and Dhabaleswar Panda. "[The Case for Co-Designing Model Architectures with Hardware](https://arxiv.org/abs/2401.14489)." In _arXiv preprint_, 2024. + - Adam Ibrahim, Benjamin Thérien, Kshitij Gupta, Mats L. Richter, **Quentin Anthony**, Timothée Lesort, Eugene Belilovsky, Irina Rish. "[Simple and Scalable Strategies to Continually Pre-train Large Language Models](https://arxiv.org/abs/2403.08763)." In _arXiv preprint_, 2024. + - Junqi Yin, Avishek Bose, Guojing Cong, Isaac Lyngaas, **Quentin Anthony**. "[Comparative Study of Large Language Model Architectures on Frontier](https://arxiv.org/abs/2402.00691)." In _arXiv preprint_, 2024. + +The following publications by other research groups use this library: +- Ta-Chung Chi, Ting-Han Fan, Peter J. Ramadge, and Alexander Rudnicky. "[KERPLE: Kernelized Relative Positional Embedding for Length Extrapolation](https://arxiv.org/abs/2205.09921)." In *Advances in Neural Information Processing Systems* 35, 2022. +- Sameera Horawalavithana, Ellyn Ayton, Shivam Sharma, Scott Howland, Megha Subramanian, Scott Vasquez, Robin Cosbey, Maria Glenski, and Svitlana Volkova. "[Foundation Models of Scientific Knowledge for Chemistry: Opportunities, Challenges and Lessons Learned](https://aclanthology.org/2022.bigscience-1.12/)." In *Proceedings of the ACL Workshop on Challenges \& Perspectives in Creating Large Language Models*, 2022. +- Sophia Kolak, Ruben Martins, Claire Le Goues, and Vincent J. Hellendoorn. "[Patch Generation with Language Models: Feasibility and Scaling Behavior](https://par.nsf.gov/biblio/10340618)"." In *Proceedings of the Deep Learning for Code Workshop at ICLR*, 2022. +- Frank F. Xu, Uri Alon, Graham Neubig, and Vincent J. Hellendoorn. "[A Systematic Evaluation of Large Language Models of Code](https://arxiv.org/abs/2202.13169)." In *Proceedings of the ICLR Workshop on Deep Learning For Code*, 2022. +- Byung-Doh Oh and William Schuler. "[Transformer-Based LM Surprisal Predicts Human Reading Times Best with About Two Billion Training Tokens](https://arxiv.org/abs/2304.11389)." In *Findings of the Association for Computational Linguistics*, 2023. +- Ta-Chung Chi, Ting-Han Fan, Alexander Rudnicky, and Peter Ramadge. "[Dissecting Transformer Length Extrapolation via the Lens of Receptive Field Analysis](https://aclanthology.org/2023.acl-long.756/)." In _Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)_, pp. 13522-13537, 2023. +- Ta-Chung Chi, Ting-Han Fan, Li-Wei Chen, Alexander Rudnicky, and Peter Ramadge. "[Latent Positional Information is in the Self-Attention Variance of Transformer Language Models Without Positional Embeddings](https://aclanthology.org/2023.acl-short.102/)." In _Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)_, pp. 13522-13537, 2023. +- Xidong Feng, Yicheng Luo, Ziyan Wang, Hongrui Tang, Mengyue Yang, Kun Shao, David Mguni, Yali Du, and Jun Wang. "[ChessGPT: Bridging Policy Learning and Language Modeling.](https://arxiv.org/abs/2306.09200)" _arXiv preprint arXiv:2306.09200_, 2023. +- Orion Walker Dollar, Sameera Horawalavithana, Scott Vasquez, W. James Pfaendtner, and Svitlana Volkova. "[MolJET: Multimodal Joint Embedding Transformer for Conditional de novo Molecular Design and Multi-Property Optimization.](https://openreview.net/pdf?id=7UudBVsIrr)" _preprint under review_, 2023. +- Jean Kaddour and Qi Liu. "[Text Data Augmentation in Low-Resource Settings via Fine-Tuning of Large Language Models](https://arxiv.org/abs/2310.01119)." _arXiv:2310.01119_, 2023. +- Alon Albalak, Liangming Pan, Colin Raffel, and William Yang Wang. "[Efficient Online Data Mixing For Language Model Pre-Training](https://arxiv.org/abs/2312.02406)." In _NeurIPS Workshop on R0-FoMo: Robustness of Few-shot and Zero-shot Learning in Large Foundation Models_, 2023. +- Eghbal A. Hosseini and Evelina Fedorenko. "[Large language models implicitly learn to straighten neural sentence trajectories to construct a predictive representation of natural language](https://www.biorxiv.org/content/10.1101/2023.11.05.564832v1)." In _Neural Information Processing Systems_, 2023. +- Junqi Yin, Sajal Dash, Feiyi Wang, and Mallikarjun Shankar. "[FORGE: Pre-Training Open Foundation Models for Science](https://dl.acm.org/doi/abs/10.1145/3581784.3613215). In _Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis_, 1-13, 2023. +- Jean Kaddour and Qi Liu. "[Text Data Augmentation in Low-Resource Settings via Fine-Tuning of Large Language Models](https://arxiv.org/abs/2310.01119)." In _arXiv preprint arXiv:2310.01119_, 2023. +- Peng Di, Jianguo Li, Hang Yu, Wei Jiang, Wenting Cai, Yang Cao, Chaoyu Chen, Dajun Chen, Hongwei Chen, Liang Chen, Gang Fan, Jie Gong, Zi Gong, Wen Hu, Tingting Guo, Zhichao Lei, Ting Li, Zheng Li, Ming Liang, Cong Liao, Bingchang Liu, Jiachen Liu, Zhiwei Liu, Shaojun Lu, Min Shen, Guangpei Wang, Huan Wang, Zhi Wang, Zhaogui Xu, Jiawei Yang, Qing Ye, Gehao Zhang, Yu Zhang, Zelin Zhao, Xunjin Zheng, Hailian Zhou, Lifu Zhu, and Xianying Zhu. "[CodeFuse-13B: A Pretrained Multi-lingual Code Large Language Model](https://arxiv.org/abs/2310.06266)." In _arXiv preprint arXiv:2310.06266_, 2023. +- Nikitha Rao, Kush Jain, Uri Alon, Claire Le Goues, and Vincent J Hellendoorn. "[CAT-LM Training Language Models on Aligned Code And Tests](https://arxiv.org/abs/2310.01602)." In _38th IEEE/ACM International Conference on Automated Software Engineering (ASE)_, pp. 409-420. IEEE, 2023. +- Pratyush Patel, Esha Choukse, Chaojie Zhang, Íñigo Goiri, Brijesh Warrier, Nithish Mahalingam, Ricardo Bianchini. "[POLCA: Power Oversubscription in LLM Cloud Providers](https://arxiv.org/abs/2308.12908)." In _arXiv preprint_, 2023. +- Junqi Yin, Sajal Dash, John Gounley, Feiyi Wang, and Georgia Tourassi. "[Evaluation of pre-training large language models on leadership-class supercomputers](https://link.springer.com/article/10.1007/s11227-023-05479-7)." In _the Journal of Supercomputing_ 79, no. 18, 2023. +- Tal Kadosh, Niranjan Hasabnis, Vy A. Vo, Nadav Schneider, Neva Krien, Mihai Capota, Abdul Wasay, Nesreen Ahmed, Ted Willke, Guy Tamir, Yuval Pinter, Timothy Mattson, and Gal Oren. "[Domain-Specific Code Language Models: Unraveling the Potential for HPC Codes and Tasks](https://arxiv.org/abs/2312.13322)." In _arXiv preprint_, 2023. +- Guobin Shen, Dongcheng Zhao, Yiting Dong, Yang Li, Jindong Li, Kang Sun, and Yi Zeng. "[Astrocyte-Enabled Advancements in Spiking Neural Networks for Large Language Modeling](https://arxiv.org/abs/2312.07625)." In _arXiv preprint_, 2023. +- Eghbal A. Hosseini, Martin A. Schrimpf, Yian Zhang, Samuel Bowman, Noga Zaslavsky, and Evelina Fedorenko. "[Artificial neural network language models align neurally and behaviorally with humans even after a developmentally realistic amount of training.](https://www.biorxiv.org/content/10.1101/2022.10.04.510681)" In _Neurobiology of Language_, 2024. +- Xiongye Xiao, Chenyu Zhou, Heng Ping, Defu Cao, Yaxing Li, Yizhuo Zhou, Shixuan Li, and Paul Bogdan. "[Exploring Neuron Interactions and Emergence in LLMs: From the Multifractal Analysis Perspective](https://arxiv.org/abs/2402.09099)." In _arXiv preprint_, 2024. +- Zhiyuan Zeng, Qipeng Guo, Zhaoye Fei, Zhangyue Yin, Yunhua Zhou, Linyang Li, Tianxiang Sun, Hang Yan, Dahua Lin, and Xipeng Qiu. "[Turn Waste into Worth: Rectifying Top-k Router of MoE](https://arxiv.org/abs/2402.12399)." In _arXiv preprint_, 2024. + +## Models +The following models were trained using this library: + +### English LLMs +- EleutherAI's [GPT-NeoX-20B](https://huggingface.co/EleutherAI/gpt-neox-20b) and [Pythia (70M through 13B)](https://github.com/EleutherAI/pythia) +- CarperAI's [FIM-NeoX-1.3B](https://huggingface.co/CarperAI/FIM-NeoX-1.3B) +- StabilityAI's [StableLM (3B and 7B)](https://github.com/Stability-AI/StableLM) +- Together.ai's [RedPajama-INCITE (3B and 7B)](https://together.ai/blog/redpajama-models-v1) +- Carnegie Mellon University's [proofGPT (1.3B and 6.7B)](https://huggingface.co/hoskinson-center/proofGPT-v0.1-6.7B) +- Dampish's [StellarX (2.8B and 4B)](https://huggingface.co/Dampish/StellarX-4B-V0.2) +- Chinese Academy of Sciences's [AstroSNN (1.5B)](https://arxiv.org/abs/2312.07625) + +### Non-English LLMs +- EleutherAI's [Polyglot-Ko (1.3B through 12.8B)](https://github.com/EleutherAI/polyglot) (Korean) +- Korea University's [KULLM-Polyglot (5.8B and 12.8B)](https://github.com/nlpai-lab/KULLM) (Korean) +- Stability AI's [Japanese Stable LM (7B)](https://huggingface.co/stabilityai/japanese-stablelm-base-alpha-7b) (Japanese) +- LearnItAnyway's [LLaVA-Polyglot-Ko (1.3B)](https://huggingface.co/LearnItAnyway/llava-polyglot-ko-1.3b-hf) (Korean) +- Rinna Co.'s [japanese-gpt-neox-3.6b](https://huggingface.co/rinna/japanese-gpt-neox-3.6b) (Japanese) and [bilingual-gpt-neox-4b](https://huggingface.co/rinna/bilingual-gpt-neox-4b) (English / Japanese) +- CyberAgent's [Open-CLM (125M through 7B)](https://huggingface.co/cyberagent/open-calm-7b) (Japanese) +- The Hungarian Research Centre for Linguistics's [PULI GPTrio (6.7B)](https://huggingface.co/NYTK/PULI-GPTrio) (Hungarian / English / Chinese) +- The University of Tokyo's [weblab-10b](https://huggingface.co/Kojima777/weblab-10b) and [weblab-10b-instruct](https://huggingface.co/Kojima777/weblab-10b-instruction-sft) (Japanese) +- nolando.ai's [Hi-NOLIN (9B)](https://blog.nolano.ai/Hi-NOLIN/) (English, Hindi) +- Renmin University of China's [YuLan (12B)](https://huggingface.co/yulan-team/YuLan-Base-12b) (English, Chinese) +- The Basque Center for Language Technology's [Latixna (70B)](https://huggingface.co/HiTZ/latxa-70b-v1.2) (Basque) + +### Code Models +- Carnegie Mellon University's [PolyCoder (160M through 2.7B)](https://github.com/VHellendoorn/Code-LMs) and [CAT-LM (2.7B)](https://huggingface.co/nikitharao/catlm) +- StabilityAI's [StableCode (1.3B)](https://stability.ai/blog/stablecode-llm-generative-ai-coding) and [StableCode-Completion-Alpha (3B)](https://stability.ai/blog/stablecode-llm-generative-ai-coding) +- CodeFuse AI's [CodeFuse (13B)](https://huggingface.co/codefuse-ai/CodeFuse-13B) + +### AI for Science +- EleutherAI's [LLeMMA (34B)](https://arxiv.org/abs/2310.10631) +- Oak Ridge National Lab's [FORGE (26B)](https://github.com/at-aaims/forge) +- Oak Ridge National Lab's [Unnamed Material Science Domain Models (7B)](https://arxiv.org/abs/2402.00691) +- Pacific Northwest National Lab's [MolJet (undisclosed size)](https://openreview.net/pdf?id=7UudBVsIrr) + +### Other Modalities +- Rinna Co.'s [PSLM (7B)](https://arxiv.org/abs/2406.12428) (speech / text) +- University College London's [ChessGPT-3B](https://huggingface.co/Waterhorse/chessgpt-base-v1) +- Gretel's [Text-to-Table (3B)](https://huggingface.co/gretelai/text2table) + +# Administrative Notes + +## Citing GPT-NeoX + +If you have found the GPT-NeoX library helpful in your work, you can cite this repository as + +```bibtex +@software{gpt-neox-library, + title = {{GPT-NeoX: Large Scale Autoregressive Language Modeling in PyTorch}}, + author = {Andonian, Alex and Anthony, Quentin and Biderman, Stella and Black, Sid and Gali, Preetham and Gao, Leo and Hallahan, Eric and Levy-Kramer, Josh and Leahy, Connor and Nestler, Lucas and Parker, Kip and Pieler, Michael and Phang, Jason and Purohit, Shivanshu and Schoelkopf, Hailey and Stander, Dashiell and Songz, Tri and Tigges, Curt and Thérien, Benjamin and Wang, Phil and Weinbach, Samuel}, + url = {https://www.github.com/eleutherai/gpt-neox}, + doi = {10.5281/zenodo.5879544}, + month = {9}, + year = {2023}, + version = {2.0.0}, +} +``` + +To cite the 20 billion parameter model named `GPT-NeoX-20B`, please use + +```bibtex +@inproceedings{gpt-neox-20b, + title={{GPT-NeoX-20B}: An Open-Source Autoregressive Language Model}, + author={Black, Sid and Biderman, Stella and Hallahan, Eric and Anthony, Quentin and Gao, Leo and Golding, Laurence and He, Horace and Leahy, Connor and McDonell, Kyle and Phang, Jason and Pieler, Michael and Prashanth, USVSN Sai and Purohit, Shivanshu and Reynolds, Laria and Tow, Jonathan and Wang, Ben and Weinbach, Samuel}, + booktitle={Proceedings of the ACL Workshop on Challenges \& Perspectives in Creating Large Language Models}, + url={https://arxiv.org/abs/2204.06745}, + year={2022} +} +``` + +## Contributing +GPT-NeoX is built by the open-source AI community, and relies on our amazing contributors! Please see our +[contributing](CONTRIBUTING.md) guide for more details on our CLA, code formatting, testing, +etc. + +## Licensing + +This repository hosts code that is part of EleutherAI's GPT-NeoX project. Copyright (c) 2024, EleutherAI. Licensed under the Apache License: + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +This repository is based off code written by NVIDIA that is licensed under the Apache License, Version 2.0. In accordance with the Apache License, all files that are modifications of code originally written by NVIDIA maintain a NVIDIA copyright header. All files that do not contain such a header are the exclusive copyright of EleutherAI. When the NVIDIA code has been modified from its original version, that fact is noted in the copyright header. All derivative works of this repository must preserve these headers under the terms of the Apache License. + +This repository also contains code written by a number of other authors. Such contributions are marked and the relevant licensing is included where appropriate. + +For full terms, see the `LICENSE` file. If you have any questions, comments, or concerns about licensing please email us at contact@eleuther.ai. + +## Acknowledgements + +We run our experiments on a Kubernetes cluster provided by [CoreWeave](https://coreweave.com/) and a Slurm cluster provided by [Stability AI](https://stability.ai). We are thankful to the DeepSpeed team for their advice and consultation. diff --git a/configs/1-3B.yml b/configs/1-3B.yml new file mode 100644 index 0000000000000000000000000000000000000000..ea3fdb9bfabc2525e634b1d749cd7a09da130354 --- /dev/null +++ b/configs/1-3B.yml @@ -0,0 +1,93 @@ +# GPT-2 pretraining setup +{ + # parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages + # across the node boundaries ) + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + # model settings + "num_layers": 24, + "hidden_size": 2048, + "num_attention_heads": 16, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "layernorm", + "pos_emb": "rotary", + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + + # these should provide some speedup but takes a while to build, set to true if desired + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + "layernorm_fusion": false, + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0002, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + "min_lr": 0.00002, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + # precision settings + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, +} diff --git a/configs/125M-dmoe.yml b/configs/125M-dmoe.yml new file mode 100644 index 0000000000000000000000000000000000000000..229191b4d1b09537a5bdcf3cef90ae1ec2b01b4b --- /dev/null +++ b/configs/125M-dmoe.yml @@ -0,0 +1,101 @@ +# GPT-2 pretraining setup +{ + # See README for MoE config docs! + "moe_type": "megablocks", + "moe_token_dropping": false, + # Have 4 experts per layer (every 2 layers by default) + "moe_num_experts": 4, + # parallelism settings + "enable_expert_tensor_parallelism": true, + "pipe_parallel_size": 1, # not yet supported for MoE + "model_parallel_size": 1, + "moe_expert_parallel_size": 1, + + # model settings + "num_layers": 12, + "hidden_size": 768, + "num_attention_heads": 12, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "layernorm", + "pos_emb": "rotary", + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + + # these should provide some speedup but takes a while to build, set to true if desired + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0006, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + "min_lr": 0.00006, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 0, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0.0, + "attention_dropout": 0.0, + + # precision settings + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 10, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, + + # networking + "hostfile": "/mock_path" +} diff --git a/configs/125M-json.yml b/configs/125M-json.yml new file mode 100644 index 0000000000000000000000000000000000000000..467318f2419b2b38d61b32195bbdd90530e80871 --- /dev/null +++ b/configs/125M-json.yml @@ -0,0 +1,80 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + "num_layers": 12, + "hidden_size": 768, + "num_attention_heads": 12, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "layernorm", + "pos_emb": "rotary", + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + "layernorm_fusion": false, + + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0006, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + "min_lr": 0.00006, + + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 500000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 500000000, + "contiguous_gradients": true + }, + + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0.0, + "attention_dropout": 0.0, + + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, + + "hostfile": "/mock_path" +} diff --git a/configs/125M-moe.yml b/configs/125M-moe.yml new file mode 100644 index 0000000000000000000000000000000000000000..1d08d78a480127d8504014d29651818dbb58b2f6 --- /dev/null +++ b/configs/125M-moe.yml @@ -0,0 +1,101 @@ +# GPT-2 pretraining setup +{ + # See README for MoE config docs! + "moe_type": "deepspeed", + "moe_token_dropping": true, + # Have 4 experts per layer (every 2 layers by default) + "moe_num_experts": 4, + # parallelism settings + "enable_expert_tensor_parallelism": true, + "pipe_parallel_size": 1, # not yet supported for MoE + "model_parallel_size": 1, + "moe_expert_parallel_size": 1, + + # model settings + "num_layers": 12, + "hidden_size": 768, + "num_attention_heads": 12, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "layernorm", + "pos_emb": "rotary", + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + + # these should provide some speedup but takes a while to build, set to true if desired + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0006, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + "min_lr": 0.00006, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0.0, + "attention_dropout": 0.0, + + # precision settings + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 10, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, + + # networking + "hostfile": "/mock_path" +} diff --git a/configs/125M.yml b/configs/125M.yml new file mode 100644 index 0000000000000000000000000000000000000000..466492f7be4f8661141b7eefa7f6d1ba83896eca --- /dev/null +++ b/configs/125M.yml @@ -0,0 +1,96 @@ +# GPT-2 pretraining setup +{ + # parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages + # across the node boundaries ) + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + # model settings + "num_layers": 12, + "hidden_size": 768, + "num_attention_heads": 12, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "layernorm", + "pos_emb": "rotary", + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + + # these should provide some speedup but takes a while to build, set to true if desired + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + "layernorm_fusion": false, + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0006, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + "min_lr": 0.00006, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0.0, + "attention_dropout": 0.0, + + # precision settings + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, + + # networking + "hostfile": "/mock_path" +} diff --git a/configs/13B.yml b/configs/13B.yml new file mode 100644 index 0000000000000000000000000000000000000000..99caab58590b4229aefe5998fe3e4c36a47e9aba --- /dev/null +++ b/configs/13B.yml @@ -0,0 +1,94 @@ +# GPT-2 pretraining setup +{ + # parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages + # across the node boundaries ) + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + # model settings + "num_layers": 40, + "hidden_size": 5120, + "num_attention_heads": 40, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "layernorm", + "pos_emb": "rotary", + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + + # these should provide some speedup but takes a while to build, set to true if desired + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + "layernorm_fusion": false, + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0001, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + "min_lr": 0.00001, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + # precision settings + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, +} diff --git a/configs/175B.yml b/configs/175B.yml new file mode 100644 index 0000000000000000000000000000000000000000..4d011f1b47c3027b601eec17ff3badef39263a62 --- /dev/null +++ b/configs/175B.yml @@ -0,0 +1,92 @@ +# GPT-2 pretraining setup +{ + # parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages + # across the node boundaries ) + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + # model settings + "num_layers": 96, + "hidden_size": 12288, + "num_attention_heads": 96, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "layernorm", + "pos_emb": "rotary", + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + + # these should provide some speedup but takes a while to build, set to true if desired + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + "layernorm_fusion": false, + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.00006, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + "min_lr": 0.000006, + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + # precision settings + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, +} diff --git a/configs/19M.yml b/configs/19M.yml new file mode 100644 index 0000000000000000000000000000000000000000..c14ebe8eae3b76000394ad3924fcaf3bcebe179c --- /dev/null +++ b/configs/19M.yml @@ -0,0 +1,97 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + # model settings + "num_layers": 6, + "hidden_size": 512, + "num_attention_heads": 8, + "seq_length": 2048, + "max_position_embeddings": 2048, + "pos_emb": "rotary", + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + "layernorm_fusion": false, + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.001, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + "min_lr": 0.0001, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + "train_micro_batch_size_per_gpu": 4, #32, + "gradient_accumulation_steps": 1, + "data_impl": "mmap", + "num_workers": 1, + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + # precision settings + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 12, + "hysteresis": 2, + "min_loss_scale": 1, + }, + + "train_iters": 143000, + "lr_decay_iters": 143000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 1000, + "eval_interval": 100000, + "eval_iters": 10, + + "log_interval": 10, + "steps_per_print": 10, + "wall_clock_breakdown": true, + + # additional deepspeed args not specified above + "deepspeed_extra_args": { + "comms_logger": { + "enabled": true, + "verbose": true, + "prof_all": true, + "debug": false + }, + } + +} diff --git a/configs/2-7B.yml b/configs/2-7B.yml new file mode 100644 index 0000000000000000000000000000000000000000..9e6a47e1544f157580ed5be50d16f4db5c925ea1 --- /dev/null +++ b/configs/2-7B.yml @@ -0,0 +1,93 @@ +# GPT-2 pretraining setup +{ + # parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages + # across the node boundaries ) + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + # model settings + "num_layers": 32, + "hidden_size": 2560, + "num_attention_heads": 32, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "layernorm", + "pos_emb": "rotary", + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + + # these should provide some speedup but takes a while to build, set to true if desired + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + "layernorm_fusion": false, + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.00016, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + "min_lr": 0.000016, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + # precision settings + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, +} diff --git a/configs/20B.yml b/configs/20B.yml new file mode 100644 index 0000000000000000000000000000000000000000..0a4ce63354a9ab66b17c92e33cafd123b0d9f229 --- /dev/null +++ b/configs/20B.yml @@ -0,0 +1,113 @@ +# DISCLAIMER: This is the configuration file for the GPT-NeoX-20B model as it was trained on 96x 40GB A100 +# GPUs. Depending on your system configuration, you may need to change some parameters in order to fit +# the model in memory. + +{ + # Tokenizer / checkpoint settings - you will need to change these to the location you have them saved in + "vocab_file": "./20B_checkpoints/20B_tokenizer.json", + "save": "./20B_checkpoints", + "load": "./20B_checkpoints", + + # If finetuning, edit the following to the location of your finetuning dataset: + "data_path": "./data/pile_20B_tokenizer/pile_20B_tokenizer_text_document", + + # parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages + # across the node boundaries ) + "pipe_parallel_size": 4, + "model_parallel_size": 2, + + # model settings + "num_layers": 44, + "hidden_size": 6144, + "num_attention_heads": 64, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "layernorm", + "pos_emb": "rotary", + "rotary_pct": 0.25, + "no_weight_tying": true, + "gpt_j_residual": true, + "output_layer_parallelism": "column", + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": true, + "rope_fusion": false, + "layernorm_fusion": false, + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.97e-4, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + + "min_lr": 0.97e-5, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 1260000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 1260000000, + "contiguous_gradients": True, + }, + + # batch / data settings (assuming 96 GPUs) + "train_micro_batch_size_per_gpu": 4, + "gradient_accumulation_steps": 32, + "data_impl": "mmap", + "split": "995,4,1", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": false, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.01, + "hidden_dropout": 0, + "attention_dropout": 0, + + # precision settings + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 12, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 150000, + "lr_decay_iters": 150000, + + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 500, # this variable previously called `save-interval` + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 2, + "steps_per_print": 2, + "wall_clock_breakdown": false, + + ### NEW DATA: #### + "tokenizer_type": "HFTokenizer", + "tensorboard-dir": "./tensorboard", + "log_dir": "./logs", + +} diff --git a/configs/350M.yml b/configs/350M.yml new file mode 100644 index 0000000000000000000000000000000000000000..00a174433736a7fec39ba185efb7fff947c1c007 --- /dev/null +++ b/configs/350M.yml @@ -0,0 +1,92 @@ +# GPT-2 pretraining setup +{ + # parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages + # across the node boundaries ) + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + # model settings + "num_layers": 24, + "hidden_size": 1024, + "num_attention_heads": 16, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "layernorm", + "pos_emb": "rotary", + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + + # these should provide some speedup but takes a while to build, set to true if desired + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + "layernorm_fusion": false, + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0003, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + "min_lr": 0.00003, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + # precision settings + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, +} diff --git a/configs/49M.yml b/configs/49M.yml new file mode 100644 index 0000000000000000000000000000000000000000..099af6a48fc3ee7344b723ddbabc6c4eff0ece7b --- /dev/null +++ b/configs/49M.yml @@ -0,0 +1,93 @@ +{ + # parallelism settings + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + # model settings + "num_layers": 10, + "hidden_size": 640, + "num_attention_heads": 10, + "seq_length": 2048, + "max_position_embeddings": 2048, + "pos_emb": "rotary", + "rotary_pct": 0.25, + "no_weight_tying": true, + "gpt_j_residual": true, + "output_layer_parallelism": "column", + + # these should provide some speedup but takes a while to build, set to true if desired + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + "layernorm_fusion": false, + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0008, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + "min_lr": 0.00008, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + # batch / data settings + "train_micro_batch_size_per_gpu": 32, + "gradient_accumulation_steps": 1, + "data_impl": "mmap", + "num_workers": 1, + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + # precision settings + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 12, + "hysteresis": 2, + "min_loss_scale": 1, + }, + + # misc. training settings + "train_iters": 143000, + "lr_decay_iters": 143000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 1000, + "eval_interval": 100000, + "eval_iters": 10, + + # logging + "log_interval": 10, + "steps_per_print": 10, + "wall_clock_breakdown": true, +} diff --git a/configs/6-7B.yml b/configs/6-7B.yml new file mode 100644 index 0000000000000000000000000000000000000000..087b7d763aad98636945bc89f30b98fd47168379 --- /dev/null +++ b/configs/6-7B.yml @@ -0,0 +1,93 @@ +# GPT-2 pretraining setup +{ + # parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages + # across the node boundaries ) + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + # model settings + "num_layers": 32, + "hidden_size": 4096, + "num_attention_heads": 32, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "layernorm", + "pos_emb": "rotary", + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + + # these should provide some speedup but takes a while to build, set to true if desired + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + "layernorm_fusion": false, + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.00012, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + "min_lr": 0.000012, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + # precision settings + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, +} diff --git a/configs/760M.yml b/configs/760M.yml new file mode 100644 index 0000000000000000000000000000000000000000..6d62dc0f37ec65b9778f4fe8b3c651c83b23723a --- /dev/null +++ b/configs/760M.yml @@ -0,0 +1,93 @@ +# GPT-2 pretraining setup +{ + # parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages + # across the node boundaries ) + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + # model settings + "num_layers": 24, + "hidden_size": 1536, + "num_attention_heads": 16, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "layernorm", + "pos_emb": "rotary", + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + + # these should provide some speedup but takes a while to build, set to true if desired + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + "layernorm_fusion": false, + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.00025, + "betas": [0.9, 0.999], + "eps": 1.0e-8, + } + }, + "min_lr": 0.000025, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + # precision settings + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, +} diff --git a/configs/800M.yml b/configs/800M.yml new file mode 100644 index 0000000000000000000000000000000000000000..4fb9559a86b4e7313a7701d279dcafe20c7b2f94 --- /dev/null +++ b/configs/800M.yml @@ -0,0 +1,86 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + # model settings + "num_layers": 16, + "hidden_size": 2048, + "num_attention_heads": 8, + "seq_length": 2048, + "max_position_embeddings": 2048, + "pos_emb": "rotary", + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + "layernorm_fusion": false, + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.00025, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + "min_lr": 0.000025, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + "train_micro_batch_size_per_gpu": 16, + "gradient_accumulation_steps": 1, + "data_impl": "mmap", + "num_workers": 1, + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + # precision settings + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 12, + "hysteresis": 2, + "min_loss_scale": 1, + }, + + "train_iters": 143000, + "lr_decay_iters": 143000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 1000, + "eval_interval": 40000, + "eval_iters": 10, + + "log_interval": 10, + "steps_per_print": 10, + "wall_clock_breakdown": true, +} diff --git a/configs/README.md b/configs/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ac20ed89ba459e4a4a5a10aefcde979386644a06 --- /dev/null +++ b/configs/README.md @@ -0,0 +1,368 @@ +# Configuration and parameters + +GPT-NeoX parameters are defined in a YAML configuration file which is passed to the `deepy.py` launcher - for examples see the files contained in this folder. +Parameters originate from either the [DeepSpeed runner CLI (DSL)](https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/launcher/runner.py#L33), [DeepSpeed configuration file (DSC)](https://www.deepspeed.ai/docs/config-json/), [Megatron-LM CLI (Meg)](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/arguments.py#L224) or are GPT-NeoX (NeoX) modifications. + +## Example Configuration (GPT3 Small): + +Below is an example configuration `.yaml` to train a ~160M parameter GPT model. This readme will go through each section in the configuration and the options available. + +For a detailed list of all the arguments available for neox, see [neox_arguments.md](neox_arguments.md) + +Note: yaml arguments may be formatted with either '-' or '\_'. The standard separator used is a '\_' as shown in the example configurations below. However, the use of '-' as a separator may be deprecated in the future. +```yaml +# GPT-3 pretraining setup +{ + # parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages + # across the node boundaries ) + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + # model settings + "num_layers": 12, + "hidden_size": 768, + "num_attention_heads": 12, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "rmsnorm", + "pos_emb": "none", + "no_weight_tying": true, + # this should provide some speedup but takes a while to build, set to true if desired + "scaled_upper_triang_masked_softmax_fusion": false, + "train_iters": 320000, + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0006, + "max_grad_norm": 1.0, + "betas": [0.9, 0.95] + } + }, + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 0, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "gradient_accumulation_steps": 1, + "data_impl": "mmap", + "split": "949,50,1", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0, + "hidden_dropout": 0, + "attention_dropout": 0, + + # precision settings + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # lr decay settings + "lr_decay_iters": 320000, + "lr_decay_style": "cosine", + "warmup": 0.01, + + # misc. training settings + "distributed_backend": "nccl", + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, +} +``` + +### Parallelism Settings: + +The parallelism settings are left at 1 in all configs, as the settings you want will be highly dependent on your compute setup and network topology. +We have found it best to do model parallelism within a node, and schedule pipeline stages across node boundaries. + +```yaml + "pipe_parallel_size": 1, + "model_parallel_size": 1, +``` + +These can be set to any integer between `0` and `num_gpus`, and `num_gpus` must be divisible by `pipe_parallel_size` * `model_parallel_size`. + + +### Model Settings: +```yaml + # model settings + "num_layers": 12, + "hidden_size": 768, + "num_attention_heads": 12, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "rmsnorm", + "pos_emb": "none", + "no_weight_tying": true, + # this should provide some speedup but takes a while to build, set to true if desired + "scaled_upper_triang_masked_softmax_fusion": false, + "train_iters": 320000, + # alternatively, use train_epochs to automatically determine the number of training iterations + #"train_epochs": 1, +``` +An example of some basic settings used to configure your model's architecture and number of training steps. + +### Optimizer Settings: + +Our optimizer configuration has a similar syntax to deepspeed's. Different optimizers will have different arguments for "params". +Learning rate should be configured from here using the `"lr"` field of `optimizer["params"]`. + +```yaml + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0006, + "max_grad_norm": 1.0, + "betas": [0.9, 0.95] + } + } + ``` +Available optimizer types are: + +- `"Adam"`: regular Adam optimizer +- `"OneBitAdam"`: Deepspeed's [OneBitAdam optimizer](https://www.deepspeed.ai/docs/config-json/#optimizer-parameters). To use 1-bit adam, you'll also need to add the `freeze_step`, `cuda_aware`, and `comm_backend_name` fields, like so: +```yaml + "optimizer": { + "type": "OneBitAdam", + "params": { + "lr": 0.0001, + "freeze_step": 23000, + "betas": [0.9, 0.95], + "cuda_aware": false, + "comm_backend_name": "nccl" + } +``` + +- `"CPU_Adam"`/`"CPU_torch_adam"`: Adam optimizer on CPU. Either megatron's version ("CPU_Adam") or torch's ("CPU_torch_adam") +- `"SM3"`: SM3 or [Memory adaptive efficient optimization optimizer](https://arxiv.org/pdf/1901.11150.pdf). We have found this doesn't work well with fp16 training. +- `"madgrad_wd"`: MADGRAD or [A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic + Optimizer] weight decay has been implemented AdamW style instead of the original madgrad Adam style. https://arxiv.org/abs/2101.11075 + +### ZeRO Optimization: + +```yaml +# for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 0, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + "zero_allow_untested_optimizer": false, + +``` + +ZeRO optimization in NeoX is currently configured identically to how deepspeed configures it, please see [the deepspeed docs](https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training) for more information. + +If you want to combine an optimizer untested by DeepSpeed with ZeRO (i.e, not ADAM or LAMB), you must pass `"zero_allow_untested_optimizer": true` *outside* of the `"zero_optimization"` dictionary (see above). + +N.B - ZeRO stages 2+ are incompatible with pipeline parallelism. Please set `"pipe-parallel-size"` to 0 if you want to use ZeRO stage 2 or more. + +### Batch Size Settings: + +```yaml + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "gradient_accumulation_steps": 1, +``` +Our global batch size configuration follows deepspeed's and can be configured in a number of ways. At least any one of `"train_batch_size"` and `"train_micro_batch_size_per_gpu"`. +- `"train_batch_size"`: The effective training batch size. This is the amount of data samples that leads to one step of model update. train_batch_size is aggregated by the batch size that a single GPU processes in one forward/backward pass (a.k.a., train_step_batch_size), the gradient accumulation steps (a.k.a., gradient_accumulation_steps), and the number of GPUs. +- `"train_micro_batch_size_per_gpu""`: Batch size to be processed by one GPU in one step (without gradient accumulation). When specified, `gradient_accumulation_steps` is automatically calculated using train_batch_size and number of GPUs. +- `"gradient_accumulation_steps"`: Number of training steps to accumulate gradients before averaging and applying them. This feature is sometimes useful to improve scalability since it results in less frequent communication of gradients between steps. Another impact of this feature is the ability to train with larger batch sizes per GPU. When specified, train_step_batch_size is automatically calculated using train_batch_size and number of GPUs. + +### Extra DeepSpeed Settings + +```yaml +# additional deepspeed args not specified above +"deepspeed_extra_args": { + "comms_logger": { + "enabled": true, + "verbose": true, + "prof_all": true, + "debug": false + }, +} +``` +Additional DeepSpeed settings besides those mentioned above should be wrapped in the `"deepspeed_extra_args` argument, as in the example above. This functionality is designed to allow arguments not specified by existing dataclasses to be passed to DeepSpeed (e.g. when new functionalities are implemented). If any settings are duplicated here from elsewhere in the YAML, the system will throw an exception and notify the user. + +### Dataset / Tokenizer / Checkpoint / Logging Settings: + +```yaml + "data_impl": "mmap", + "split": "949,50,1", + # Suggested data paths when using GPT-NeoX locally + "data_path": "data/enwik8/enwik8_text_document", + #"train_data_path": "data/enwik8/enwik8_text_document", + #"test_data_path": "data/enwik8/enwik8_text_document", + #"valid_data_path": "data/enwik8/enwik8_text_document", + "vocab_file": "data/gpt2-vocab.json", + "merge_file": "data/gpt2-merges.txt", + "save": "checkpoints", + "load": "checkpoints", + "tensorboard_dir": "tensorboard", + "log_dir": "logs", + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, +``` + +For KTO style training, you'll need to add the reward & label data path, e.g.: + +```yaml + "data_impl": "mmap", + # Suggested data paths when using GPT-NeoX locally + "train_data_path": "data/enwik8/enwik8_text_document", + "train_label_data_path": "data/enwik8/enwik8_text_label_document", + "train_reward_data_path": "data/enwik8/enwik8_text_reward_document", + "test_data_path": "data/enwik8/enwik8_text_document", + "test_label_data_path": "data/enwik8/enwik8_text_label_document", + "test_reward_data_path": "data/enwik8/enwik8_text_reward_document", + "valid_data_path": "data/enwik8/enwik8_text_document", + "valid_label_data_path": "data/enwik8/enwik8_text_label_document", + "valid_reward_data_path": "data/enwik8/enwik8_text_reward_document", + "vocab_file": "data/gpt2-vocab.json", + "merge_file": "data/gpt2-merges.txt", + "save": "checkpoints", + "load": "checkpoints", + "tensorboard_dir": "tensorboard", + "log_dir": "logs", + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, +``` + +For DPO style training, you'll need to set pos/neg data paths instead of a single one, e.g. + +```yaml + "dataset_impl": "pairwise", + "train_impl": "dpo", + "pack_impl": "unpacked", + "dpo_beta": 0.1, + "dpo_fp32": true, + "pos_train_data_path": "data/enwik8/enwik8_text_pos_document", + "pos_valid_data_path": "data/enwik8/enwik8_text_pos_document", + "pos_test_data_path": "data/enwik8/enwik8_text_pos_document", + "neg_train_data_path": "data/enwik8/enwik8_text_neg_document", + "neg_valid_data_path": "data/enwik8/enwik8_text_neg_document", + "neg_test_data_path": "data/enwik8/enwik8_text_neg_document", + ## If you have labels... (likely to mask out user turns) + "pos_train_label_data_path": "data/enwik8/enwik8_text_pos_label_document", + "pos_valid_label_data_path": "data/enwik8/enwik8_text_pos_label_document", + "pos_test_label_data_path": "data/enwik8/enwik8_text_pos_label_document", + "neg_train_label_data_path": "data/enwik8/enwik8_text_neg_label_document", + "neg_valid_label_data_path": "data/enwik8/enwik8_text_neg_label_document", + "neg_test_label_data_path": "data/enwik8/enwik8_text_neg_label_document", + ## If you want to precompute the logits over your dataset... + "precompute_model_name": "gpt2", + ## Needed for the generation.py step, if precomputing + "text_gen_type": "precompute" +``` + +### LR Scheduler settings + +```yaml + "lr_decay_iters": 320000, + "lr_decay_style": "cosine", + "warmup": 0.01, +``` + +Settings used to modify the learning rate over time. + +N.B - `OneBitAdam` requires you to use deepspeed's internal lr scheduler because reasons. Currently the lr decay style defaults to deepspeed's `WarmupDecay + +### Activation Checkpointing Settings: + +```yaml + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, +``` + +Checkpointing works by trading compute for memory. Rather than storing all intermediate activations of the entire computation graph for computing backward, the checkpointed part does not save intermediate activations, and instead recomputes them in backward pass. + +### Mixed Precision Training Settings: +gpt-neox's fp16 training is configured identically to DeepSpeed's, please see [their documentation](https://www.deepspeed.ai/docs/config-json/#fp16-training-options) for more information. +An example config for fp16 training: + +```yaml + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, +``` + +Alternatively you can use the `precision` config which can be set to `fp16`, `bfloat16`, or `fp32`. If you set `"precision": "fp16"` without adding a `"fp16": {...}` dict, then it will simply use DeepSpeed's defaults for fp16 training. + + +### SLURM Settings + +If you are running GPT-NeoX on a SLURM cluster and wish to use SLURM to coordinate nodes, then you must set the following variables in your config: + +```yaml + "launcher": "slurm", + "deepspeed_slurm": true +``` + +Additionally, you need to modify _all_ of your configs to conform to the JSON. When launching a GPT-NeoX job you can specify multiple YAML config files. Internally, all of these files are merged into one config and then passed as a single long command line argument to Deep(er)Speed. When using SLURM and its internal command `srun`, python fails to parse this long command line argument unless it is in the more restrictive JSON format. In practice, the example NeoX configs are already very close to JSON. As an example, this is a snippet of a YAML-compatible config, N.B. the comment the capital-F `False`: + +```yaml + # optimizer settings + "optimizer": { + "type": "OneBitAdam", + "params": { + "lr": 0.0001, + "freeze_step": 23000, + "betas": [0.9, 0.95], + "cuda_aware": False, + "comm_backend_name": "nccl" + } +``` + +To make this JSON just remove the comment and use all lowercase for the boolean: + +```yaml + "optimizer": { + "type": "OneBitAdam", + "params": { + "lr": 0.0001, + "freeze_step": 23000, + "betas": [0.9, 0.95], + "cuda_aware": false, + "comm_backend_name": "nccl" + } +``` diff --git a/configs/autotuning_configs/small_tune.json b/configs/autotuning_configs/small_tune.json new file mode 100644 index 0000000000000000000000000000000000000000..52c99449b50f87c09c6a66b3b491afdde8116752 --- /dev/null +++ b/configs/autotuning_configs/small_tune.json @@ -0,0 +1,78 @@ +{ + "pipe-parallel-size": 1, + "model-parallel-size": 1, + + "num-layers": 12, + "hidden-size": 768, + "num-attention-heads": 12, + "seq-length": 2048, + "max-position-embeddings": 2048, + "norm": "layernorm", + "pos-emb": "rotary", + "no-weight-tying": true, + + "scaled-upper-triang-masked-softmax-fusion": false, + "bias-gelu-fusion": false, + + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0006, + "betas": [0.9, 0.999], + "eps": 1.0e-8 + } + }, + + "train_micro_batch_size_per_gpu": 1, + "data-impl": "mmap", + "split": "949,50,1", + + "checkpoint-activations": true, + "checkpoint-num-layers": 1, + "partition-activations": true, + "synchronize-each-layer": true, + + "gradient_clipping": 1.0, + "weight-decay": 0.0, + "hidden-dropout": 0.0, + "attention-dropout": 0.0, + + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "train-iters": 320000, + "lr-decay-iters": 320000, + "distributed-backend": "nccl", + "lr-decay-style": "cosine", + "warmup": 0.01, + "save-interval": 10000, + "eval-interval": 1000, + "eval-iters": 10, + + "log-interval": 100, + "steps_per_print": 10, + "keep-last-n-checkpoints": 4, + "wall_clock_breakdown": true, + "launcher": "slurm", + "deepspeed_slurm": true, + "comment": "neox", + "autotuning": { + "enabled": true, + "arg_mappings": { + "train_micro_batch_size_per_gpu": "--train_micro_batch_size_per_gpu", + "gradient_accumulation_steps ": "--gradient_accumulation_steps" + } + }, + "zero_optimization": { + "stage": [0, 1, 2, 3] + }, + "train-data-paths": ["/fsx/pile_deduped/pile_0.87_deduped_text_document"], + "valid-data-paths": ["/fsx/pile_deduped/pile_0.87_deduped_text_document"], + "test-data-paths": ["/fsx/pile_deduped/pile_0.87_deduped_text_document"] +} diff --git a/configs/autotuning_configs/tune.json b/configs/autotuning_configs/tune.json new file mode 100644 index 0000000000000000000000000000000000000000..b2f114539120aa721f1e903cbd6befa58c093469 --- /dev/null +++ b/configs/autotuning_configs/tune.json @@ -0,0 +1,72 @@ +{ + "pipe-parallel-size": 1, + "model-parallel-size": 1, + "num-layers": 12, + "hidden-size": 768, + "num-attention-heads": 12, + "seq-length": 2048, + "max-position-embeddings": 2048, + "norm": "layernorm", + "pos-emb": "rotary", + "no-weight-tying": true, + "scaled-upper-triang-masked-softmax-fusion": true, + "bias-gelu-fusion": true, + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0006, + "betas": [0.9, 0.999], + "eps": 1.0e-8 + } + }, + "zero_optimization": { + "stage": 0, + "allgather_partitions": true, + "allgather_bucket_size": 500000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 500000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + "train_micro_batch_size_per_gpu": 1, + "autotuning_config": { + "enabled": true, + "arg_mappings": { + "train_micro_batch_size_per_gpu": "--train_micro_batch_size_per_gpu", + "gradient_accumulation_steps ": "--gradient_accumulation_steps" + } + }, + "data-impl": "mmap", + "split": "949,50,1", + "checkpoint-activations": true, + "checkpoint-num-layers": 1, + "partition-activations": true, + "synchronize-each-layer": true, + "gradient_clipping": 1.0, + "weight-decay": 0.0, + "hidden-dropout": 0.0, + "attention-dropout": 0.0, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "train-iters": 200, + "lr-decay-iters": 320000, + "distributed-backend": "nccl", + "lr-decay-style": "cosine", + "warmup": 0.01, + "save-interval": 10000, + "eval-interval": 1000, + "eval-iters": 10, + "log-interval": 100, + "steps_per_print": 10, + "keep-last-n-checkpoints": 4, + "wall_clock_breakdown": true, + "launcher": "slurm", + "deepspeed_slurm": true, + "comment": "neox" +} diff --git a/configs/autotuning_configs/tune_1-3B.json b/configs/autotuning_configs/tune_1-3B.json new file mode 100644 index 0000000000000000000000000000000000000000..8207d0cfcca843823b57bb374dd399b6de238ad1 --- /dev/null +++ b/configs/autotuning_configs/tune_1-3B.json @@ -0,0 +1,86 @@ +{ + "pipe-parallel-size": 1, + "model-parallel-size": 1, + + "num-layers": 24, + "hidden-size": 2048, + "num-attention-heads": 16, + "seq-length": 2048, + "max-position-embeddings": 2048, + "norm": "layernorm", + "pos-emb": "rotary", + "no-weight-tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "attention_config": [[["flash"], 24]], + "scaled-upper-triang-masked-softmax-fusion": false, + "bias-gelu-fusion": false, + + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0002, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + "min_lr": 0.00002, + + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 500000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 500000000, + "contiguous_gradients": true + }, + "train_micro_batch_size_per_gpu": 1, + "autotuning": { + "enabled": true, + "arg_mappings": { + "train_micro_batch_size_per_gpu": "--train_micro_batch_size_per_gpu", + "gradient_accumulation_steps ": "--gradient_accumulation_steps" + } + }, + "data-impl": "mmap", + + "checkpoint-activations": false, + "checkpoint-num-layers": 1, + "partition-activations": true, + "synchronize-each-layer": true, + + "gradient_clipping": 1.0, + "weight-decay": 0.1, + "hidden-dropout": 0, + "attention-dropout": 0, + + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "train-iters": 320000, + "lr-decay-iters": 320000, + "distributed-backend": "nccl", + "lr-decay-style": "cosine", + "warmup": 0.01, + "checkpoint-factor": 10000, + "eval-interval": 1000, + "eval-iters": 10, + "launcher": "slurm", + "deepspeed_slurm": true, + "no_ssh_check": true, + + "log-interval": 10, + "steps_per_print": 10, + "keep-last-n-checkpoints": 1, + "wall_clock_breakdown": true +} diff --git a/configs/autotuning_configs/tune_6-7B.json b/configs/autotuning_configs/tune_6-7B.json new file mode 100644 index 0000000000000000000000000000000000000000..3d7aadf68118c08dc79ee835356d748accd8ca12 --- /dev/null +++ b/configs/autotuning_configs/tune_6-7B.json @@ -0,0 +1,77 @@ +{ + "pipe-parallel-size": 1, + "model-parallel-size": 8, + + "num-layers": 32, + "hidden-size": 4096, + "num-attention-heads": 32, + "seq-length": 2048, + "max-position-embeddings": 2048, + "norm": "layernorm", + "pos-emb": "rotary", + "no-weight-tying": true, + + "scaled-upper-triang-masked-softmax-fusion": false, + "bias-gelu-fusion": false, + + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.00012, + "betas": [0.9, 0.999], + "eps": 1.0e-8 + } + }, + + "train_micro_batch_size_per_gpu": 1, + "zero_optimization": { + "stage": [0, 1, 2, 3] + }, + "data-impl": "mmap", + "split": "949,50,1", + + "checkpoint-activations": true, + "checkpoint-num-layers": 1, + "partition-activations": true, + "synchronize-each-layer": true, + + "gradient_clipping": 1.0, + "weight-decay": 0, + "hidden-dropout": 0, + "attention-dropout": 0, + + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "train-iters": 100, + "lr-decay-iters": 320000, + "distributed-backend": "nccl", + "lr-decay-style": "cosine", + "warmup": 0.01, + "checkpoint-factor": 10000, + "eval-interval": 1000, + "eval-iters": 10, + "log-interval": 100, + "steps_per_print": 10, + "keep-last-n-checkpoints": 4, + "wall_clock_breakdown": true, + "launcher": "slurm", + "deepspeed_slurm": true, + "no_ssh_check": true, + "comment": "neox", + "autotuning": { + "enabled": true, + "mp_size": 8, + "arg_mappings": { + "train_micro_batch_size_per_gpu": "--train_micro_batch_size_per_gpu", + "gradient_accumulation_steps ": "--gradient_accumulation_steps" + } + } +} diff --git a/configs/bf16_125M.yml b/configs/bf16_125M.yml new file mode 100644 index 0000000000000000000000000000000000000000..87e86e7fbba9a5bc460e564e8b0343ea8a4e3183 --- /dev/null +++ b/configs/bf16_125M.yml @@ -0,0 +1,80 @@ +# GPT-2 pretraining setup +{ + # parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages + # across the node boundaries ) + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + # model settings + "num_layers": 12, + "hidden_size": 768, + "num_attention_heads": 12, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "layernorm", + "pos_emb": "rotary", + "no_weight_tying": true, + + # these should provide some speedup but takes a while to build, set to true if desired + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + "layernorm_fusion": false, + + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0006, + "betas": [0.9, 0.999], + "eps": 1.0e-8, + } + }, + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 0, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + "split": "949,50,1", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.0, + "hidden_dropout": 0.0, + "attention_dropout": 0.0, + + "precision": "bfloat16", + + "fp32_allreduce": True, # without a patch to torch, bf16 models have to do the allreduce in fp32 + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, +} diff --git a/configs/bnb_125M.yml b/configs/bnb_125M.yml new file mode 100644 index 0000000000000000000000000000000000000000..523b10c3935b97ecd77529c5b82560ce1e07ac4e --- /dev/null +++ b/configs/bnb_125M.yml @@ -0,0 +1,87 @@ +# GPT-2 pretraining setup +{ + # parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages + # across the node boundaries ) + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + # model settings + "num_layers": 12, + "hidden_size": 768, + "num_attention_heads": 12, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "layernorm", + "pos_emb": "rotary", + "no_weight_tying": true, + "use_bnb_optimizer": true, + + # these should provide some speedup but takes a while to build, set to true if desired + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + "layernorm_fusion": false, + + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0006, + "betas": [0.9, 0.999], + "eps": 1.0e-8, + } + }, + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 0, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + "split": "949,50,1", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.0, + "hidden_dropout": 0.0, + "attention_dropout": 0.0, + + # precision settings + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, +} diff --git a/configs/cpu_mock_config.yml b/configs/cpu_mock_config.yml new file mode 100644 index 0000000000000000000000000000000000000000..653aa21d8244878f67965aa55be0d9ce198e78eb --- /dev/null +++ b/configs/cpu_mock_config.yml @@ -0,0 +1,5 @@ +# CPU unit tests should be independent of the presence of GPUs on the test server +# host. This configuration mocks these GPU resources and other dependencies. +{ + "global_num_gpus": 1 +} diff --git a/configs/docker/pythia-paths.yml b/configs/docker/pythia-paths.yml new file mode 100644 index 0000000000000000000000000000000000000000..442d61cf3a480e6b9b3aad5a90743b5da107df01 --- /dev/null +++ b/configs/docker/pythia-paths.yml @@ -0,0 +1,12 @@ +{ + "train-data-paths": ["/home/mchorse/data/pile_deduped/pile_0.87_deduped_text_document"], + "valid-data-paths": ["/home/mchorse/data/pile_deduped/pile_0.87_deduped_text_document"], + "test-data-paths": ["/home/mchorse/data/pile_deduped/pile_0.87_deduped_text_document"], + + "tokenizer-type": "HFTokenizer", + "vocab-file": "/home/mchorse/data/tokenizers/20B_tokenizer.json", + + "save": "/home/mchorse/chk/", + "load": "/home/mchorse/chk/", + "checkpoint_validation_with_forward_pass": False +} diff --git a/configs/eleutherai_cluster.yml b/configs/eleutherai_cluster.yml new file mode 100644 index 0000000000000000000000000000000000000000..36e75d8b39cd7baaa4fe36e995afd301fc39bccc --- /dev/null +++ b/configs/eleutherai_cluster.yml @@ -0,0 +1,29 @@ +# Data paths and options when using EleutherAI cluster +{ + # you may include multiple distinct datasets if desired + "train_data_paths": ["/mnt/ssd-1/data/enwik8/enwik8_text_document"], + "valid_data_paths": ["/mnt/ssd-1/data/enwik8/enwik8_val_text_document"], + "test_data_paths": ["/mnt/ssd-1/data/enwik8/enwik8_test_text_document"], + + # if using multiple datasets, provide weights for them to be sampled with + # "train-data-weights": [1., 2.], + # "test-data-weights": [2., 1.], + # "valid-data-weights": [0.5, 0.4], + + + # If you would like the code to create val and test datasets from your training set use the following instead + # "split" determines the relative size of train, val, and test + + # "split" 995,4,1 + # "data_path": "/mnt/ssd-1/data/enwik8/enwik8_text_document", + + "vocab_file": "/mnt/ssd-1/data/gpt2-vocab.json", + "merge_file": "/mnt/ssd-1/data/gpt2-merges.txt", + "save": "/mnt/ssd-1/checkpoints", + "load": "/mnt/ssd-1/checkpoints", + "tensorboard_dir": "/mnt/ssd-1/tensorboard", + "log_dir": "/mnt/ssd-1/logs", + "wandb_team": "eleutherai", + "wandb_project": "neox", + "wandb_group": "example" +} diff --git a/configs/finetuning_configs/6-9B.yml b/configs/finetuning_configs/6-9B.yml new file mode 100644 index 0000000000000000000000000000000000000000..6598238cd97d23ba469adb244eabb89a950436d9 --- /dev/null +++ b/configs/finetuning_configs/6-9B.yml @@ -0,0 +1,89 @@ +{ + # finetuning option + "load": "/path/to/checkpoint", + "finetune": true, + + "pipe-parallel-size": 1, + "model-parallel-size": 2, + + "num-layers": 32, + "hidden-size": 4096, + "num-attention-heads": 32, + "seq-length": 2048, + "max-position-embeddings": 2048, + "norm": "layernorm", + "pos-emb": "rotary", + "rotary_pct": 0.25, + "no-weight-tying": true, + "gpt_j_residual": true, + "output_layer_parallelism": "column", + + "attention-config": [[["flash"], 32]], + + "scaled-upper-triang-masked-softmax-fusion": true, + "bias-gelu-fusion": true, + + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.00012, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + + "min_lr": 0.000012, + + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 1260000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 1260000000, + "contiguous_gradients": true, + "cpu_offload": false, + "load_from_fp32_weights": False, # if checkpoint has fp16/bf16 params + }, + + "train_micro_batch_size_per_gpu": 8, + "gradient_accumulation_steps": 2, + "data-impl": "mmap", + + "checkpoint-activations": true, + "checkpoint-num-layers": 1, + "partition-activations": true, + "synchronize-each-layer": true, + + "gradient_clipping": 1.0, + "weight-decay": 0.1, + "hidden-dropout": 0, + "attention-dropout": 0, + + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 12, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "train-iters": 143000, + "lr-decay-iters": 143000, + "distributed-backend": "nccl", + "lr-decay-style": "cosine", + "warmup": 0.01, + "checkpoint-factor": 1000, + "extra-save-iters": [0,1,2,4,8,16,32,64,128,256,512], + "eval-interval": 143000, + "eval-iters": 10, + + "log-interval": 10, + "steps_per_print": 10, + "wall_clock_breakdown": true, + + "tokenizer_type": "HFTokenizer" +} diff --git a/configs/gen_docs.py b/configs/gen_docs.py new file mode 100644 index 0000000000000000000000000000000000000000..1d8c5ba3badbd7c5089288e2bf676e1ccc2cf95c --- /dev/null +++ b/configs/gen_docs.py @@ -0,0 +1,96 @@ +import sys +import os + +sys.path.append( + os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)) +) +from megatron.neox_arguments import neox_args, deepspeed_args +from inspect import getmembers, getsource +from dataclasses import field, is_dataclass +from itertools import tee, zip_longest +import pathlib + + +def pairwise(iterable): + "s -> (s0,s1), (s1,s2), (s2, s3), ..." + a, b = tee(iterable) + next(b, None) + return zip_longest(a, b) + + +def get_docs(module): + ARGS_CLASSES = getmembers(module, is_dataclass) + results = {} + for name, dcls in ARGS_CLASSES: + assert is_dataclass(dcls) + src = getsource(dcls) + d = dcls() + loc = 0 + results[name] = {"doc": d.__doc__.strip(), "attributes": {}} + for cur, _next in pairwise(d.__dataclass_fields__.items()): + field_name, field_def = cur + field_type = field_def.type + if hasattr(field_type, "__name__"): + if field_type.__name__ == "Literal" or field_type.__name__ == "Union": + field_type = field_type + else: + field_type = str(field_type.__name__) + else: + field_type = str(field_type) + + field_default = field_def.default + + # try to find the field definition + loc = src.find(f" {field_name}:", loc + len(field_name) + 1) + + if _next is not None: + next_field_name, _ = _next + # try to find the next field definition + next_loc = src.find(f"{next_field_name}:", loc + len(field_name)) + else: + next_loc = len(src) + + # try to get the docstring + _src = src[loc:next_loc].strip() + if '"""' in _src: + doc = _src.split('"""')[1].strip() + elif "'''" in _src: + doc = _src.split("'''")[1].strip() + else: + doc = "" + results[name]["attributes"][field_name] = { + "name": field_name, + "type": field_type, + "default": field_default, + "doc": doc, + } + return results + + +def to_md(docs, intro_str=""): + """ + Writes the docs dictionary to markdown format + """ + lines = [] + lines.append(intro_str) + for name, doc in docs.items(): + lines.append(f"## {name}") + lines.append(f"{doc['doc']}") + lines.append("") + for field_name, field_def in doc["attributes"].items(): + # attribute name and type + lines.append(f"- **{field_name}**: {field_def['type']}") + # default value + lines.append(f" Default = {str(field_def['default'])}") + lines.append(f" {field_def['doc']}") + lines.append("") + return "\n\n".join(lines) + + +if __name__ == "__main__": + docs = get_docs(neox_args) + docs.update(get_docs(deepspeed_args)) + intro_str = """Arguments for gpt-neox. All of the following can be specified in your .yml config file(s):\n""" + md = to_md(docs, intro_str=intro_str) + with open(f"{pathlib.Path(__file__).parent.resolve()}/neox_arguments.md", "w") as f: + f.write(md) diff --git a/configs/gmlp_small.yml b/configs/gmlp_small.yml new file mode 100644 index 0000000000000000000000000000000000000000..2a5b02d6063d5be36f17a1e56c7d257ebd0b3dc7 --- /dev/null +++ b/configs/gmlp_small.yml @@ -0,0 +1,72 @@ +# GPT-2 pretraining setup +{ + # parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages + # across the node boundaries ) + "pipe_parallel_size": 1, + "model_parallel_size": 1, + "attention_config": [[["gmlp"], "all"]], + + + # model settings + "num_layers": 12, + "hidden_size": 768, # gmlp d_ff defaults to hidden_size * 4 + "gmlp_attn_dim": 64, + "num_attention_heads": 12, # this has no effect with gmlp - and amlp defaults to single head attention. + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "layernorm", + "pos_emb": "none", + "no_weight_tying": true, + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0006, + "betas": [0.9, 0.999], + "eps": 1.0e_8, + } + }, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + "split": "949,50,1", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": false, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0.0, + "attention_dropout": 0.0, + + # precision settings + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, +} diff --git a/configs/llama/13B.yml b/configs/llama/13B.yml new file mode 100644 index 0000000000000000000000000000000000000000..162e51719f0ff5ff2be1e99a8c50d1fceb23c07c --- /dev/null +++ b/configs/llama/13B.yml @@ -0,0 +1,26 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 2, + "make_vocab_size_divisible_by": 1, + + # model settings + "num_layers": 40, + "hidden_size": 5120, + "num_attention_heads": 40, + "seq_length": 2048, + "max_position_embeddings": 2048, + "pos_emb": "rotary", + "rotary_pct": 1, + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-6, + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": false, + "use_bias_in_norms": false, + "use_bias_in_attn_linear": false, + "activation": "swiglu", + "mlp_multiple_of": 256, +} diff --git a/configs/llama/30B.yml b/configs/llama/30B.yml new file mode 100644 index 0000000000000000000000000000000000000000..2c948e40cf35fc7f4594806243ebadcd59a2a679 --- /dev/null +++ b/configs/llama/30B.yml @@ -0,0 +1,26 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 4, + "make_vocab_size_divisible_by": 1, + + # model settings + "num_layers": 60, + "hidden_size": 6656, + "num_attention_heads": 52, + "seq_length": 2048, + "max_position_embeddings": 2048, + "pos_emb": "rotary", + "rotary_pct": 1, + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-6, + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": false, + "use_bias_in_norms": false, + "use_bias_in_attn_linear": false, + "activation": "swiglu", + "mlp_multiple_of": 256, +} diff --git a/configs/llama/65B.yml b/configs/llama/65B.yml new file mode 100644 index 0000000000000000000000000000000000000000..4ebd249b9dc0d00ec3331ce99fb45683a67dc9c3 --- /dev/null +++ b/configs/llama/65B.yml @@ -0,0 +1,26 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 8, + "make_vocab_size_divisible_by": 1, + + # model settings + "num_layers": 80, + "hidden_size": 8192, + "num_attention_heads": 64, + "seq_length": 2048, + "max_position_embeddings": 2048, + "pos_emb": "rotary", + "rotary_pct": 1, + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-6, + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": false, + "use_bias_in_norms": false, + "use_bias_in_attn_linear": false, + "activation": "swiglu", + "mlp_multiple_of": 256, +} diff --git a/configs/llama/7B.yml b/configs/llama/7B.yml new file mode 100644 index 0000000000000000000000000000000000000000..cc21446be955b09010819b6594a7fb9506e88b6d --- /dev/null +++ b/configs/llama/7B.yml @@ -0,0 +1,26 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 1, + "make_vocab_size_divisible_by": 1, + + # model settings + "num_layers": 32, + "hidden_size": 4096, + "num_attention_heads": 32, + "seq_length": 2048, + "max_position_embeddings": 2048, + "pos_emb": "rotary", + "rotary_pct": 1, + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-6, + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": false, + "use_bias_in_norms": false, + "use_bias_in_attn_linear": false, + "activation": "swiglu", + "mlp_multiple_of": 256, +} diff --git a/configs/llama/README.md b/configs/llama/README.md new file mode 100644 index 0000000000000000000000000000000000000000..48f5ae0d87997bc81c1fdaa7b400f73edc423632 --- /dev/null +++ b/configs/llama/README.md @@ -0,0 +1,23 @@ +# LLaMA + +## Training and Finetuning + +These configs contain the architecture settings required to run inference/training/finetuning on the [LLaMA](https://huggingface.co/docs/transformers/main/model_doc/llama) model suite. + +LLaMA finetuning can be launched with +```sh +python ./deepy.py ./train.py -d configs llama/7B.yml llama/train_config.yml local_setup.yml +``` + +If training from scratch, set `finetune=False` in `./configs/llama/train_config.yml`. + + +## Inference + + +LLaMA generation can be launched with +```sh +python ./deepy.py ./generate.py -d configs \ + llama/7B.yml llama/train_config.yml local_setup.yml text_generation.yml \ + -i input_prompt.txt -o prompt_out.txt +``` diff --git a/configs/llama/train_config.yml b/configs/llama/train_config.yml new file mode 100644 index 0000000000000000000000000000000000000000..7cc5a5968f9548cc92830d3fc77c8bc57b235d5a --- /dev/null +++ b/configs/llama/train_config.yml @@ -0,0 +1,74 @@ +{ + # finetuning option + "finetune": true, + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0002, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + "min_lr": 0.00002, + "override_lr_scheduler": true, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + # precision settings + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, + "mlp_multiple_of": 256, +} diff --git a/configs/llama2/13B.yml b/configs/llama2/13B.yml new file mode 100644 index 0000000000000000000000000000000000000000..5bf7a4f723382a9539ff5ce7985ca7dd2d1f281c --- /dev/null +++ b/configs/llama2/13B.yml @@ -0,0 +1,26 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 2, + "make_vocab_size_divisible_by": 1, + + # model settings + "num_layers": 40, + "hidden_size": 5120, + "num_attention_heads": 40, + "seq_length": 4096, + "max_position_embeddings": 4096, + "pos_emb": "rotary", + "rotary_pct": 1, + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": false, + "use_bias_in_norms": false, + "use_bias_in_attn_linear": false, + "activation": "swiglu", + "mlp_multiple_of": 256, +} diff --git a/configs/llama2/70B.yml b/configs/llama2/70B.yml new file mode 100644 index 0000000000000000000000000000000000000000..b628deffe168c1f5e1ae6ee3ccefd12963e14e01 --- /dev/null +++ b/configs/llama2/70B.yml @@ -0,0 +1,31 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 1, + "make_vocab_size_divisible_by": 1, + + # model settings + "num_layers": 80, + "hidden_size": 8192, + "intermediate_size": 28672, + "num_attention_heads": 64, + "num_kv_heads": 8, + "seq_length": 4096, + "max_position_embeddings": 4096, + "pos_emb": "rotary", + "rotary_pct": 1, + "rotary_emb_base": 1000000, + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + + "attention_config": [[["flash"], 80]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": false, + "use_bias_in_norms": false, + "use_bias_in_attn_linear": false, + "activation": "swiglu", + "mlp_multiple_of": 256, +} diff --git a/configs/llama2/7B.yml b/configs/llama2/7B.yml new file mode 100644 index 0000000000000000000000000000000000000000..eeba99c52ff35b04362b05be8eebb2fb0439c273 --- /dev/null +++ b/configs/llama2/7B.yml @@ -0,0 +1,26 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 1, + "make_vocab_size_divisible_by": 1, + + # model settings + "num_layers": 32, + "hidden_size": 4096, + "num_attention_heads": 32, + "seq_length": 4096, + "max_position_embeddings": 4096, + "pos_emb": "rotary", + "rotary_pct": 1, + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": false, + "use_bias_in_norms": false, + "use_bias_in_attn_linear": false, + "activation": "swiglu", + "mlp_multiple_of": 256, +} diff --git a/configs/llama2/codellama_34B.yml b/configs/llama2/codellama_34B.yml new file mode 100644 index 0000000000000000000000000000000000000000..e4cb2fc78ece7198fd6d2cd116d375a843d97022 --- /dev/null +++ b/configs/llama2/codellama_34B.yml @@ -0,0 +1,32 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 1, + "make_vocab_size_divisible_by": 1, + + # model settings + "num_layers": 48, + "hidden_size": 8192, + "num_attention_heads": 64, + "num_kv_heads": 8, + # Codellama was uptrained on 16k token sequence lengths + # with rotary_emb_base adjusted to 1_000_000. + "seq_length": 16384, + "max_position_embeddings": 16384, + "pos_emb": "rotary", + "rotary_pct": 1, + "rotary_emb_base": 1000000, + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + + "attention_config": [[["flash"], 48]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": false, + "use_bias_in_norms": false, + "use_bias_in_attn_linear": false, + "activation": "swiglu", + "mlp_multiple_of": 256, +} diff --git a/configs/llama2/codellama_7B.yml b/configs/llama2/codellama_7B.yml new file mode 100644 index 0000000000000000000000000000000000000000..e8775f3ebc0df0f6b63fe7f5691c69171df07104 --- /dev/null +++ b/configs/llama2/codellama_7B.yml @@ -0,0 +1,31 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 1, + "make_vocab_size_divisible_by": 1, + + # model settings + "num_layers": 32, + "hidden_size": 4096, + "num_attention_heads": 32, + # Codellama was uptrained on 16k token sequence lengths + # with rotary_emb_base adjusted to 1_000_000. + "seq_length": 16384, + "max_position_embeddings": 16384, + "pos_emb": "rotary", + "rotary_pct": 1, + "rotary_emb_base": 1000000, + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + + "attention_config": [[["flash"], 32]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": false, + "use_bias_in_norms": false, + "use_bias_in_attn_linear": false, + "activation": "swiglu", + "mlp_multiple_of": 256, +} diff --git a/configs/llemma/34B.yml b/configs/llemma/34B.yml new file mode 100644 index 0000000000000000000000000000000000000000..1a693c7f4f4f428c80da6f9c9d62667bb79417d1 --- /dev/null +++ b/configs/llemma/34B.yml @@ -0,0 +1,101 @@ +{ + "pipe_parallel_size": 0, + "model_parallel_size": 8, + "make_vocab_size_divisible_by": 1, + + # model settings + "num_layers": 48, + "hidden_size": 8192, + "num_attention_heads": 64, + "attention_type": "groupedquery", + "num_kv_heads": 8, + # NB: These rotary embedding and sequence length parameters + # May differ from CodeLlama configs. They match what we used for + # Llemma continued pretraining. See https://arxiv.org/abs/2310.10631 + # For detailed discussion + "seq_length": 4096, + "max_position_embeddings": 4096, + "pos_emb": "rotary", + "rotary_pct": 1, + "rotary_emb_base": 1000000, + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + + "attention_config": [[["flash"], 48]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": false, + "use_bias_in_norms": false, + "use_bias_in_attn_linear": false, + "activation": "swiglu", + "mlp_multiple_of": 256, + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.00005, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 1260000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 1260000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + + # trained on 256 gpus + "train_micro_batch_size_per_gpu": 2, + "gradient_accumulation_steps": 16, + "data_impl": "mmap", + + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + "precision": "bfloat16", + "fp32_allreduce": true, + "bf16": { + "enabled": true + }, + "data_types": { + "grad_accum_dtype": "fp32" + }, + + "train_iters": 12000, + "lr_decay_iters": 12000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "min_lr": 1.65e-6, + "warmup": 0.042, # warmup for ~500 iters + "checkpoint_factor": 250, + "eval_interval": 250, + "eval_iters": 25, + + "log_interval": 1, + "steps_per_print": 1, + "wall_clock_breakdown": true, + + "tokenizer_type": "SPMTokenizer", + #"vocab-file": # use 'tokenizer.model' from Meta CodeLlama download + + # "load": "" # set to same as "save" to resume from intermediate finetuning step + #"load": MP=8 CodeLlama-34B checkpoint, converted from Meta CodeLlama download. + # When resuming from mid-finetuning run, change "load" to the same as save location. + "finetune": true, # set to false once resuming from intermediate finetuning step +} diff --git a/configs/llemma/7B.yml b/configs/llemma/7B.yml new file mode 100644 index 0000000000000000000000000000000000000000..363cf4315b4da64382fa211b98206a444d056847 --- /dev/null +++ b/configs/llemma/7B.yml @@ -0,0 +1,100 @@ +{ + "pipe_parallel_size": 0, + "model_parallel_size": 2, + "make_vocab_size_divisible_by": 1, + + # model settings + "num_layers": 32, + "hidden_size": 4096, + "num_attention_heads": 32, + # NB: These rotary embedding and sequence length parameters + # May differ from CodeLlama configs. They match what we used for + # Llemma continued pretraining. See https://arxiv.org/abs/2310.10631 + # For detailed discussion + "seq_length": 4096, + "max_position_embeddings": 4096, + "pos_emb": "rotary", + "rotary_pct": 1, + "rotary_emb_base": 10000, + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + + "attention_config": [[["flash"], 32]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": false, + "use_bias_in_norms": false, + "use_bias_in_attn_linear": false, + "activation": "swiglu", + "mlp_multiple_of": 256, + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0001, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 1260000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 1260000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + + # trained on 256 gpus + "train_micro_batch_size_per_gpu": 4, + "gradient_accumulation_steps": 2, + "data_impl": "mmap", + + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + "precision": "bfloat16", + "fp32_allreduce": true, + "bf16": { + "enabled": true + }, + "data_types": { + "grad_accum_dtype": "fp32" + }, + + "train_iters": 48000, + "lr_decay_iters": 48000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "decay_lr_to": 0.033, + "warmup_iters": 500, + "checkpoint_factor": 500, + "eval_interval": 250, + "eval_iters": 50, + + "log_interval": 1, + "steps_per_print": 1, + "wall_clock_breakdown": true, + + "tokenizer_type": "SPMTokenizer", + "vocab-file": "codellama/tokenizer.model", # use tokenizer.model from Meta CodeLlama download + + "save": "/path/to/save/llema-replication", + #"load": "", # once run is started, to restart from intermediate ckpt use "load" = "save" + "load": "/path/to/converted/codellama_7b_weights_with_mp2", + + "finetune": true, # set to false once resuming from intermediate finetuning step +} diff --git a/configs/local_setup.yml b/configs/local_setup.yml new file mode 100644 index 0000000000000000000000000000000000000000..b8ec4b06aa1ee1fa97e7c031d7031232272ac04c --- /dev/null +++ b/configs/local_setup.yml @@ -0,0 +1,27 @@ +# Suggested data paths when using GPT-NeoX locally +{ + "data_path": "data/enwik8/enwik8_text_document", + + # or for weighted datasets: + # "train-data-paths": ["data/enwik8/enwik8_text_document", "data/enwik8/enwik8_text_document"], + # "test-data-paths": ["data/enwik8/enwik8_text_document", "data/enwik8/enwik8_text_document"], + # "valid-data-paths": ["data/enwik8/enwik8_text_document", "data/enwik8/enwik8_text_document"], + # "train-data-weights": [1., 2.], + # "test-data-weights": [2., 1.], + # "valid-data-weights": [0.5, 0.4], + + # If weight_by_num_documents is True, Builds dataset weights from a multinomial distribution over groups of data according to the number of documents in each group. + # WARNING: setting this to True will override any user provided weights + # "weight_by_num_documents": false, + # "weighted_sampler_alpha": 0.3, + + "vocab_file": "data/gpt2-vocab.json", + "merge_file": "data/gpt2-merges.txt", + + "save": "checkpoints", + "load": "checkpoints", + "checkpoint_validation_with_forward_pass": False, + + "tensorboard_dir": "tensorboard", + "log_dir": "logs", +} diff --git a/configs/local_setup_comet.yml b/configs/local_setup_comet.yml new file mode 100644 index 0000000000000000000000000000000000000000..12ff7b3883899a36e19d938bab207929fda3d8a1 --- /dev/null +++ b/configs/local_setup_comet.yml @@ -0,0 +1,33 @@ +# Suggested data paths when using GPT-NeoX locally +{ + "data_path": "/workspace/gpt-neox-main/data/enwik8/enwik8_text_document", + + # or for weighted datasets: + # "train-data-paths": ["data/enwik8/enwik8_text_document", "data/enwik8/enwik8_text_document"], + # "test-data-paths": ["data/enwik8/enwik8_text_document", "data/enwik8/enwik8_text_document"], + # "valid-data-paths": ["data/enwik8/enwik8_text_document", "data/enwik8/enwik8_text_document"], + # "train-data-weights": [1., 2.], + # "test-data-weights": [2., 1.], + # "valid-data-weights": [0.5, 0.4], + + # If weight_by_num_documents is True, Builds dataset weights from a multinomial distribution over groups of data according to the number of documents in each group. + # WARNING: setting this to True will override any user provided weights + # "weight_by_num_documents": false, + # "weighted_sampler_alpha": 0.3, + + "vocab_file": "/workspace/gpt-neox-main/data/gpt2-vocab.json", + "merge_file": "/workspace/gpt-neox-main/data/gpt2-merges.txt", + + "save": "checkpoints", + "load": "checkpoints", + "checkpoint_validation_with_forward_pass": False, + + "tensorboard_dir": "tensorboard", + "log_dir": "logs", + "use_comet": True, + # "comet_workspace": "test_workspace", # CHANGE ME + "comet_project": "test_project", + "comet_experiment_name": "test_experiment", + "comet_tags": ["test_tag1", "test_tag2"], + "comet_others": {"test_others"}, +} diff --git a/configs/local_setup_wandb.yml b/configs/local_setup_wandb.yml new file mode 100644 index 0000000000000000000000000000000000000000..d031a2ad85828f10ef1ea9be24bc3264b1e09d9d --- /dev/null +++ b/configs/local_setup_wandb.yml @@ -0,0 +1,30 @@ +# Suggested data paths when using GPT-NeoX locally +{ + "data_path": "data/enwik8/enwik8_text_document", + + # or for weighted datasets: + # "train-data-paths": ["data/enwik8/enwik8_text_document", "data/enwik8/enwik8_text_document"], + # "test-data-paths": ["data/enwik8/enwik8_text_document", "data/enwik8/enwik8_text_document"], + # "valid-data-paths": ["data/enwik8/enwik8_text_document", "data/enwik8/enwik8_text_document"], + # "train-data-weights": [1., 2.], + # "test-data-weights": [2., 1.], + # "valid-data-weights": [0.5, 0.4], + + # If weight_by_num_documents is True, Builds dataset weights from a multinomial distribution over groups of data according to the number of documents in each group. + # WARNING: setting this to True will override any user provided weights + # "weight_by_num_documents": false, + # "weighted_sampler_alpha": 0.3, + + "vocab_file": "data/gpt2-vocab.json", + "merge_file": "data/gpt2-merges.txt", + + "save": "checkpoints", + "load": "checkpoints", + "checkpoint_validation_with_forward_pass": False, + + "tensorboard_dir": "tensorboard", + "log_dir": "logs", + "use_wandb": True, + "wandb_host": "https://api.wandb.ai", + "wandb_project": "neox" +} diff --git a/configs/mamba/mamba-1.4B.yml b/configs/mamba/mamba-1.4B.yml new file mode 100644 index 0000000000000000000000000000000000000000..eae467d0ecb77b99ca342cba1b838ca4413a47c3 --- /dev/null +++ b/configs/mamba/mamba-1.4B.yml @@ -0,0 +1,89 @@ +{ + # Parallelism is not yet supported for Mamba + "pipe_parallel_size": 0, + "model_parallel_size": 1, + + "num_layers": 48, + "hidden_size": 2048, + "num_attention_heads": 12, # ignored when using mamba + "seq_length": 2048, + "max_position_embeddings": 2048, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + + "attention_config": [[["mamba"], 48]], + + "mamba_selective_scan_fusion": true, + "mamba_causal_conv_fusion": true, + "mamba_inner_func_fusion": true, # supersedes scan or conv fusion + "activation": "silu", + + # init methods + "init_method": "small_init", + "output_layer_init_method": "single_residual_scaled_normal", + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0002, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + "min_lr": 0.00002, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + # precision settings + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 1, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, +} diff --git a/configs/mamba/mamba-130M.yml b/configs/mamba/mamba-130M.yml new file mode 100644 index 0000000000000000000000000000000000000000..bd05723b280d4b25d9bf369e76a9856465d65a5e --- /dev/null +++ b/configs/mamba/mamba-130M.yml @@ -0,0 +1,89 @@ +{ + # Parallelism is not yet supported for Mamba + "pipe_parallel_size": 0, + "model_parallel_size": 1, + + "num_layers": 24, + "hidden_size": 768, + "num_attention_heads": 12, # ignored when using mamba + "seq_length": 2048, + "max_position_embeddings": 2048, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + + "attention_config": [[["mamba"], 24]], + + "mamba_selective_scan_fusion": true, + "mamba_causal_conv_fusion": true, + "mamba_inner_func_fusion": true, # supersedes scan or conv fusion + "activation": "silu", + + # init methods + "init_method": "small_init", + "output_layer_init_method": "single_residual_scaled_normal", + + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0006, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + "min_lr": 0.00006, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0.0, + "attention_dropout": 0.0, + + # precision settings + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, +} diff --git a/configs/mamba/mamba-2.8B.yml b/configs/mamba/mamba-2.8B.yml new file mode 100644 index 0000000000000000000000000000000000000000..d5afef3680abd6aaf5cac63dd1ad16855aee7418 --- /dev/null +++ b/configs/mamba/mamba-2.8B.yml @@ -0,0 +1,89 @@ +{ + # Parallelism is not yet supported for Mamba + "pipe_parallel_size": 0, + "model_parallel_size": 1, + + "num_layers": 64, + "hidden_size": 2560, + "num_attention_heads": 12, # ignored when using mamba + "seq_length": 2048, + "max_position_embeddings": 2048, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + + "attention_config": [[["mamba"], 64]], + + "mamba_selective_scan_fusion": true, + "mamba_causal_conv_fusion": true, + "mamba_inner_func_fusion": true, # supersedes scan or conv fusion + "activation": "silu", + + # init methods + "init_method": "small_init", + "output_layer_init_method": "single_residual_scaled_normal", + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.00016, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + "min_lr": 0.000016, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + # precision settings + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, +} diff --git a/configs/mamba/mamba-370M.yml b/configs/mamba/mamba-370M.yml new file mode 100644 index 0000000000000000000000000000000000000000..0058f1c0e64aa75769c6508f8ce514fcd0113293 --- /dev/null +++ b/configs/mamba/mamba-370M.yml @@ -0,0 +1,88 @@ +{ + # Parallelism is not yet supported for Mamba + "pipe_parallel_size": 0, + "model_parallel_size": 1, + + "num_layers": 48, + "hidden_size": 1024, + "num_attention_heads": 12, # ignored when using mamba + "seq_length": 2048, + "max_position_embeddings": 2048, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + + "attention_config": [[["mamba"], 48]], + + "mamba_selective_scan_fusion": true, + "mamba_causal_conv_fusion": true, + "mamba_inner_func_fusion": true, # supersedes scan or conv fusion + "activation": "silu", + + # init methods + "init_method": "small_init", + "output_layer_init_method": "single_residual_scaled_normal", + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0003, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + "min_lr": 0.00003, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + # precision settings + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, +} diff --git a/configs/mamba/mamba-790M.yml b/configs/mamba/mamba-790M.yml new file mode 100644 index 0000000000000000000000000000000000000000..4aef7e81315d33e3346fb3e2b3ae8c337b40d5de --- /dev/null +++ b/configs/mamba/mamba-790M.yml @@ -0,0 +1,89 @@ +{ + # Parallelism is not yet supported for Mamba + "pipe_parallel_size": 0, + "model_parallel_size": 1, + + "num_layers": 48, + "hidden_size": 1536, + "num_attention_heads": 12, # ignored when using mamba + "seq_length": 2048, + "max_position_embeddings": 2048, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + + "attention_config": [[["mamba"], 48]], + + "mamba_selective_scan_fusion": true, + "mamba_causal_conv_fusion": true, + "mamba_inner_func_fusion": true, # supersedes scan or conv fusion + "activation": "silu", + + # init methods + "init_method": "small_init", + "output_layer_init_method": "single_residual_scaled_normal", + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.00025, + "betas": [0.9, 0.999], + "eps": 1.0e-8, + } + }, + "min_lr": 0.000025, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + # precision settings + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, +} diff --git a/configs/mistral/7B.yml b/configs/mistral/7B.yml new file mode 100644 index 0000000000000000000000000000000000000000..ba4e543d63179c80eee25bd34080762c7fa6d025 --- /dev/null +++ b/configs/mistral/7B.yml @@ -0,0 +1,41 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 1, + "make_vocab_size_divisible_by": 1, + + # model settings + "num_layers": 32, + "hidden_size": 4096, + "intermediate_size": 14336, + "num_attention_heads": 32, + "num_kv_heads": 8, + # per Mistral, Mistral-7B-v0.1 was pretrained with 8192 seqlen + # and instruction tuned to 16384 seqlen, all with 4096 sliding window + "seq_length": 8192, + "sliding_window_width": 4096, + "max_position_embeddings": 131072, + "pos_emb": "rotary", + "rotary_pct": 1, + "rotary_emb_base": 10000, + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + + # Grouped Query Attention is supported for both default ("global") + # and Flash attention. However, we highly recommend the use of Flash attention + # to get FLOP + runtime speedups when using GQA, + # and sliding window attention is currently only supported by Flash attention. + "attention_config": [[["flash"], 32]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": false, + "use_bias_in_norms": false, + "use_bias_in_attn_linear": false, + "activation": "swiglu", + + "tokenizer_type": "SPMTokenizer", + #"vocab-file": ".../mistral-7B-v0.1/tokenizer.model", # use tokenizer.model from Mistral-7B-v0.1 direct download + +} diff --git a/configs/neox_arguments.md b/configs/neox_arguments.md new file mode 100644 index 0000000000000000000000000000000000000000..68697418189f3433f0446c0936da7bda50c7c2b1 --- /dev/null +++ b/configs/neox_arguments.md @@ -0,0 +1,2707 @@ +Arguments for gpt-neox. All of the following can be specified in your .yml config file(s): + + +## NeoXArgsLRScheduler + +LR Scheduler Arguments + + + +- **lr_decay_style**: typing.Literal['constant', 'linear', 'cosine', 'exponential'] + + Default = linear + + Learning rate decay function. Choose from 'constant', 'linear', 'cosine', 'exponential'. + + +- **lr_decay_iters**: int + + Default = None + + Number of iterations to decay learning rate over. If None, defaults to + --train-iters or the equivalent inferred value from train_epochs. + +- **lr_decay_fraction**: float + + Default = None + + Effective fraction of training over which to decay lr. Overrides lr_decay_iters. + Useful when specifying train_epochs. + +- **min_lr**: float + + Default = 0.0 + + Minimum value for learning rate. The scheduler clips values below this threshold. + + + +- **warmup**: float + + Default = 0.01 + + Percentage of total iterations to warmup on (.01 = 1 percent of all training iters). + + + +- **override_lr_scheduler**: bool + + Default = False + + Reset the values of the scheduler (learning rate,warmup iterations, minimum learning rate, maximum number of iterations, and decay style from input arguments and ignore values from checkpoints. Note that all the above values will be reset. + + + +- **use_checkpoint_lr_scheduler**: bool + + Default = False + + Use checkpoint to set the values of the scheduler (learning rate, warmup iterations, minimum learning rate, maximum number of iterations, and decay style from checkpoint and ignore input arguments. + + + +## NeoXArgsLogging + +Logging Arguments + + + +- **use_wandb**: bool + + Default = None + + Flag indicating if wandb is to be used. + + + +- **wandb_group**: str + + Default = None + + Weights and Biases group name - used to group together "runs". + + + +- **wandb_team**: str + + Default = None + + Team name for Weights and Biases. + + + +- **wandb_project**: str + + Default = neox + + wandb project name + + + +- **wandb_host**: str + + Default = https://api.wandb.ai + + url of the wandb host + + + +- **wandb_init_all_ranks**: bool + + Default = False + + Initialize wandb on all ranks. + + + +- **git_hash**: str + + Default = 62c9738a + + current git hash of repository + + + +- **log_dir**: str + + Default = None + + Directory to save logs to. + + + +- **tensorboard_dir**: str + + Default = None + + Write TensorBoard logs to this directory. + + + +- **use_comet**: bool + + Default = None + + Flag indicating if comet is to be used. + + + +- **comet_workspace**: Optional + + Default = None + + Comet workspace name, if not configured Comet Experiments will be created in the user configured default workspace. + + + +- **comet_project**: Optional + + Default = None + + Comet project name, if not configured Comet Experiments will be created in the Uncategorized Experiments project. + + + +- **comet_experiment_name**: Optional + + Default = None + + Custom name for the Comet experiment. If not provided, a random name is used. + + + +- **comet_tags**: Optional + + Default = None + + List of tags to attach to the created Comet Experiment. + + + +- **comet_others**: Optional + + Default = None + + Custom metadata to attach to the created Comet Experiment. + + + +- **log_interval**: int + + Default = 100 + + Interval between logging. + + + +- **log_grad_pct_zeros**: bool + + Default = False + + Log the percentage of zeros for the gradient of each parameter to wandb / tensorboard (useful for debugging). Needs wandb_init_all_ranks set to True if using pipeline parallelism to log all ranks. + + + +- **log_param_norm**: bool + + Default = False + + Log the frob norm of the parameters to wandb / tensorboard (useful for debugging). Needs wandb_init_all_ranks set to True if using pipeline parallelism to log all ranks. + + + +- **log_grad_norm**: bool + + Default = False + + Log the frob norm of the gradients to wandb / tensorboard (useful for debugging). + (N.B - this will only work with pp = 0 for now, as we don't have access to the gradients of the model because + deepspeed.) + + + +- **log_optimizer_states**: bool + + Default = False + + Log the frob norm of the optimizer states to wandb / tensorboard (useful for debugging). + + + +- **log_gradient_noise_scale**: bool + + Default = False + + Whether to log the gradient noise scale when training (cf. https://arxiv.org/abs/1812.06162 for explanation) + + + +- **gradient_noise_scale_n_batches**: int + + Default = 5 + + Number of batches to accumulate gradients for in the gradient noise scale logger. + + + +- **gradient_noise_scale_cpu_offload**: bool + + Default = False + + Whether to offload the buffered gradients to cpu when measuring gradient noise scale. + + + +- **memory_profiling**: bool + + Default = False + + Whether to take a memory snapshot of the model. Useful for debugging memory issues. + + + +- **memory_profiling_path**: str + + Default = None + + Path to save memory snapshot to. + + + +- **profile**: bool + + Default = False + + Enable nsys profiling. When using this option, + nsys options should be specified in commandline. + An example nsys commandline is + ``` + nsys profile -s none -t nvtx,cuda -o + --force-overwrite true + --capture-range=cudaProfilerApi + --capture-range-end=stop + ``` + + + +- **profile_step_start**: int + + Default = 10 + + Step to start profiling at. + + + +- **profile_step_stop**: int + + Default = 12 + + Step to stop profiling at. + + + +## NeoXArgsModel + +Model Arguments + + + +- **precision**: typing.Literal['fp16', 'fp32', 'bfloat16'] + + Default = None + + description of the used precision, either one of fp16 or fp32 (and in the future bf16). + + + +- **num_layers**: int + + Default = None + + Number of transformer layers. + + + +- **hidden_size**: int + + Default = None + + Transformer hidden size. + + + +- **intermediate_size**: int + + Default = None + + Transformer intermediate size. Default = 4h + + + +- **mlp_multiple_of**: int + + Default = 1 + + force mlp size to be a multiple of this value + + + +- **expansion_factor**: float + + Default = None + + Transformer intermediate size. Default = 4 + + + +- **num_attention_heads**: int + + Default = None + + Number of transformer attention heads. + + If num_kv_heads is set, will control only number of query heads. + + + +- **num_kv_heads**: int + + Default = None + + Number of transformer key/value attention heads. + + If set to None or the same value as num_attention_heads, will perform multi-head attention (MHA). + If set to < num_attention_heads but > 1, will perform grouped-query attention (GQA) (https://arxiv.org/pdf/2305.13245.pdf) + If set to 1, will perform multi-query attention. + + Must be < num_attention_heads and divide num_attention_heads evenly. + + + +- **seq_length**: int + + Default = None + + Maximum sequence length to process. + + + +- **sliding_window_width**: int + + Default = None + + Width of the attention sliding window. Only supported with Flash Attention 2. + + + +- **max_position_embeddings**: int + + Default = None + + Maximum number of position embeddings to use. This is the size of position embedding. + + + +- **norm**: typing.Literal['layernorm', 'rmsnorm', 'scalenorm', 'te_rmsnorm', 'te_layernorm'] + + Default = layernorm + + Normalization layer to use. Choose from "layernorm", "rmsnorm", "scalenorm", "te_rmsnorm", "te_layernorm". + + + +- **layernorm_fusion**: bool + + Default = False + + Use fused layer norm kernel (if `norm` is `layernorm`). + + + +- **rmsnorm_fusion**: bool + + Default = False + + Use fused RMS norm kernel (if `norm` is `rmsnorm`). + + + +- **use_qk_layernorm**: bool + + Default = False + + Use QK Normalization + + + +- **layernorm_epsilon**: float + + Default = 1e-05 + + Layer norm epsilon. + + + +- **rms_norm_epsilon**: float + + Default = 1e-08 + + Root mean squared norm epsilon + + + +- **scalenorm_epsilon**: float + + Default = 1e-08 + + Scalenorm epsilon + + + +- **pos_emb**: typing.Literal['learned', 'rotary', 'sinusoidal', 'rpe', 'alibi', 'none'] + + Default = learned + + Type of positional embedding to use - choose from 'learned', 'rotary', 'sinusoidal', 'rpe', 'none' + + + +- **rpe_num_buckets**: int + + Default = 32 + + T5 relative positional encoding number of buckets, default 32. + + + +- **rpe_max_distance**: int + + Default = 128 + + T5 relative positional encoding max distance, default 128. + + + +- **opt_pos_emb_offset**: int + + Default = 0 + + Learned position embedding offset (only used by OPT, where it should be set to 2). + + + +- **no_weight_tying**: bool + + Default = False + + Disables weight tying between embedding weights and final Linear layer + + + +- **attention_config**: list + + Default = None + + Attention configuration for gpt-neox + + The first item in the list specifies the attention type(s), and should be a list of strings. The second item + specifies the number of times to repeat those attention types in the full list. + + attention type choices: [global, local, sparse_fixed, sparse_variable, bslongformer, bigbird, "gmlp", "amlp", "flash", "mamba", "rwkv"] + + So a 12 layer network with only global attention could be specified like: + [[[`global`], 12]] + + or a 12 layer network with alternating global / local like: + [[[`global`, `local`], 6]] + + If none is specified, this defaults to + [[[`global`], n_layers]] + + + +- **sparsity_config**: dict + + Default = None + + Sparsity configuration dict as defined in https://www.deepspeed.ai/docs/config-json/#sparse-attention + + Note that since neox is autoregressive, attention is always "unidirectional" and `horizontal_global_attention` is + always false. + + The main difference between our sparsity config and deepspeed's is that `mode` is ignored - since it is instead + specified in attention_config defining each layer. + + An example config is given below: + "sparse_attention": { + "block": 16, + "different_layout_per_head": true, + "num_local_blocks": 4, + "num_global_blocks": 1, + "num_different_global_patterns": 4, + "num_random_blocks": 0, + "local_window_blocks": [4], + "global_block_indices": [0], + "global_block_end_indices": None, + "num_sliding_window_blocks": 3 + } + + + +- **num_unique_layers**: int + + Default = None + + Number of unique transformer layers. num-layers should be divisible by this value. Currently only has an effect when pipe_parallel_size=0. + + + +- **param_sharing_style**: str + + Default = grouped + + Ordering of the shared parameters. For example, for a num-layers=4 and --num-unique-layers=2, we will have the following ordering for two unique layers 1 and 2-: grouped: [1, 2, 1, 2] and spaced: [1, 1, 2, 2]. + + + +- **make_vocab_size_divisible_by**: int + + Default = 128 + + Pad the vocab size to be divisible by this value. This is added for computational efficiency reasons. + + + +- **activation**: typing.Literal['gelu', 'geglu', 'relu', 'softsign', 'swish', 'mish', 'silu', 'reglu', 'swiglu', 'bilinear', 'glu'] + + Default = gelu + + Activation function to use - choose from ["gelu", "geglu", "relu", "softsign", "swish", "mish", "silu", "reglu", "swiglu", "bilinear", "glu"] + + + +- **use_flashattn_swiglu**: bool + + Default = False + + Use flash attention's version of swiglu + + + +- **scaled_upper_triang_masked_softmax_fusion**: bool + + Default = False + + Enable fusion of query_key_value_scaling time (upper diagonal) masking and softmax. + + + +- **scaled_masked_softmax_fusion**: bool + + Default = False + + Enable fusion of query_key_value_scaling general masking and softmax. + + + +- **bias_gelu_fusion**: bool + + Default = False + + Enable bias and gelu fusion. + + + +- **bias_dropout_fusion**: bool + + Default = False + + Enable bias and dropout fusion. + + + +- **rope_fusion**: bool + + Default = False + + Enable rotary embedding fusion. + + + +- **fp16_lm_cross_entropy**: bool + + Default = False + + Move the cross entropy unreduced loss calculation for lm head to fp16. + + + +- **init_method_std**: float + + Default = 0.02 + + Standard deviation of the zero mean normal distribution used for weight initialization. + + + +- **apply_query_key_layer_scaling**: bool + + Default = False + + Scale Q * K^T by 1 / layer-number. If this flag is set, then it will automatically set attention-softmax-in-fp32 to true + + + +- **use_cpu_initialization**: bool + + Default = False + + If set, affine parallel weights initialization uses CPU + + + +- **attention_softmax_in_fp32**: bool + + Default = False + + Run attention masking and softmax in fp32. + + + +- **rotary_pct**: float + + Default = 1.0 + + pct of hidden dims to apply rotary positional embedding to + + + +- **rotary_emb_base**: int + + Default = 10000 + + Base for rotary positional embedding + + + +- **rotary_save_freqs_buffer**: bool + + Default = False + + Used to control whether the `inv_freqs` buffer in rotary embeddings + will be stored in checkpoints (persistent=True) or not. + + Defaults to false, but is left configurable to maintain backward-compatibility + with GPT-NeoX checkpoints that were trained with this flag. + + + +- **init_method**: typing.Literal['normal', 'scaled_normal', 'orthogonal', 'scaled_orthogonal', 'xavier_uniform', 'xavier_normal', 'wang_init', 'small_init', 'single_residual_scaled_normal'] + + Default = normal + + Init function used on all layers except ff residual outputs - choose from + ["normal", "scaled_normal", "orthogonal", "scaled_orthogonal", "xavier_uniform", "xavier_normal", "wang_init", "small_init"] + + + +- **output_layer_init_method**: typing.Literal['normal', 'scaled_normal', 'orthogonal', 'scaled_orthogonal', 'xavier_uniform', 'xavier_normal', 'wang_init', 'small_init', 'single_residual_scaled_normal'] + + Default = scaled_normal + + Init function used for ff residual outputs - choose from + ["normal", "scaled_normal", "orthogonal", "scaled_orthogonal", "xavier_uniform", "xavier_normal", "wang_init", "small_init"] + + + +- **gmlp_attn_dim**: int + + Default = 64 + + the dimension of the single head self attention in gmlp model (not used in gpt models). + If None - gmlp model doesn't use attention. + + + +- **gpt_j_residual**: bool + + Default = False + + If false, we use the conventional residual path: + x = x + attn(ln1(x)) + x = x + mlp(ln2(x)) + Otherwise, we use the residual path from GPT-J, which offers a slight speedup: + x = ln(x) + x = x + attn(x) + mlp(x) + + + +- **gpt_j_tied**: bool + + Default = False + + If false, we use + x = x + attn(ln1(x)) + mlp(ln2(x)) + Otherwise, we tie the layer norms + y = ln(x) + x = x + attn(y) + mlp(y) + + + +- **use_bias_in_norms**: bool + + Default = True + + If false, norms (e.g. LayerNorm) will not have bias terms + + + +- **use_bias_in_attn_linear**: bool + + Default = True + + If false, attn_linear (e.g. QKVO) will not have bias terms + + + +- **use_bias_in_mlp**: bool + + Default = True + + If false, mlps will not have bias terms + + + +- **soft_prompt_tuning**: dict + + Default = None + + Dictionary configuring the soft prompt tuning parameters. + If enabled, will train *only* the soft prompt, and freezes the rest of the model. + parameters in the dict are: + 'enabled': bool = True # enables soft prompting + 'num_tokens': int = 10 # length of the soft prompt in tokens + 'init_string': str = '' # if provided, initialize the soft prompt with the word embeddings of this string + 'init_range': float = 0.5 # if no init string is provided, initialize the soft prompt with a uniform distribution between -init_range and init_rang + + + +- **mamba_selective_scan_fusion**: bool + + Default = False + + Enable fused kernels for Mamba selective scan. + + + +- **mamba_causal_conv_fusion**: bool + + Default = False + + Enable fused kernels for Mamba causal Conv1d. + + + +- **mamba_inner_func_fusion**: bool + + Default = False + + Enable fused inner operator for Mamba. (Supersedes conv. and selective scan fusion flags, requires each of those kernels to be installed.) + + + +- **mamba_selective_fp32_params**: bool + + Default = True + + Keep selected parameters in fp32 for Mamba (A and D). + Requires https://github.com/EleutherAI/DeeperSpeed/pull/61 . + + + +- **mamba_use_bias_in_conv**: bool + + Default = True + + If false, conv1d in mamba block will not have bias term + + + +- **mamba_use_bias_in_linears**: bool + + Default = False + + Enable bias terms in mamba block up- and down- projections (in_proj and out_proj). + + + +- **output_layer_parallelism**: typing.Literal['column'] + + Default = column + + Parameter controlling whether the output layer is parallelized over the hidden dim (row) or the vocab dim (column) + + + +- **dim_att**: int + + Default = None + + Total dimension of the attention mechanism for RWKV. If not set, defaults to hidden_size. + + + +- **head_size**: int + + Default = None + + Size of each attention head for RWKV. Calculated as dim_att // num_attention_heads. + + + +- **ffn_dim**: int + + Default = None + + Dimension of the feed-forward network for RWKV. If not set, calculated based on hidden_size and expansion_factor. + + +## NeoXArgsOptimizer + +Optimizer Arguments + + + +- **optimizer_type**: typing.Literal['adam', 'onebitadam', 'cpu_adam', 'cpu_torch_adam', 'sm3', 'madgrad_wd', 'sgd', 'lion'] + + Default = adam + + Type of optimizer to use. Choose from ['adam', 'onebitadam', 'cpu_adam', 'cpu_torch_adam', 'sm3', 'madgrad_wd', 'sgd', 'lion'] + NOTE: sgd will use MuSGD from Mup. Mup must be enabled for this optimizer. + + + +- **use_bnb_optimizer**: bool + + Default = False + + Whether to enable the bitsandbytes optimizers + + + +- **zero_stage**: typing.Union[int, typing.List[int], typing.Literal['all']] + + Default = None + + Zero Optimizer stage + + + +- **zero_reduce_scatter**: bool + + Default = None + + Zero: Uses reduce or reduce scatter instead of allreduce to average gradients + + + +- **zero_contiguous_gradients**: bool + + Default = None + + Zero: Copies the gradients to a contiguous buffer as they are produced. Avoids memory fragmentation during backward pass. Only useful when running very large models. + + + +- **zero_reduce_bucket_size**: int + + Default = None + + Zero: Number of elements reduced/allreduced at a time. Limits the memory required for the allgather for large model sizes + + + +- **zero_allgather_bucket_size**: int + + Default = None + + Zero: Number of elements allgathered at a time. Limits the memory required for the allgather for large model sizes + + + +- **lr**: float + + Default = None + + Max Learning rate during training + + + +## NeoXArgsOther + +Misc. Arguments + + + +- **distributed_backend**: str + + Default = nccl + + Which backend to use for distributed training. + + + +- **local_rank**: int + + Default = None + + local rank passed from distributed launcher. + + + +- **rank**: int + + Default = None + + global rank of process being run (passed in via distributed launcher) + + + +- **lazy_mpu_init**: bool + + Default = False + + If set to True, initialize_megatron() skips DDP initialization and returns function to complete it instead. Also turns on use-cpu-initialization flag. This is for external DDP manager. + + + +- **short_seq_prob**: float + + Default = 0.1 + + Probability of producing a short sequence. + + + +- **eod_mask_loss**: bool + + Default = False + + Mask loss for the end of document tokens. + + + +- **adlr_autoresume**: bool + + Default = False + + Enable auto-resume on adlr cluster. + + + +- **adlr_autoresume_interval**: int + + Default = 1000 + + Intervals over which check for auto-resume termination signal + + + +- **seed**: int + + Default = 1234 + + Random seed used for python, numpy, pytorch, and cuda. + + + +- **onnx_safe**: bool + + Default = False + + Use workarounds for known problems with Torch ONNX exporter + + + +- **deepscale**: bool + + Default = False + + (Deprecated) enable DeepSpeed (helper flag for user code, no impact on DeepSpeed backend)' + + + +- **deepscale_config**: str + + Default = None + + (Deprecated) deepscale json configuration file. + + + +- **deepspeed_mpi**: bool + + Default = False + + Run via MPI, this will attempt to discover the necessary variables to initialize torch distributed from the MPI environment + + + +- **deepspeed_slurm**: bool + + Default = False + + Run via SLURM, this will attempt to discover the necessary variables to initialize torch distributed from the SLURM environment + + + +- **user_script**: str + + Default = None + + user script to be run + + + +- **iteration**: int + + Default = None + + Set during training + + + +- **do_train**: bool + + Default = None + + Set during training + + + +- **do_valid**: bool + + Default = None + + Set during training + + + +- **do_test**: bool + + Default = None + + Set during training + + + +- **save_iters**: list + + Default = None + + Set during training + + + +- **global_num_gpus**: int + + Default = None + + Set during launching + + + +## NeoXArgsParallelism + +Parallelism Arguments + + + +- **pipe_parallel_size**: int + + Default = 0 + + Number of pipeline parallel stages. Disable with 0. + + + +- **model_parallel_size**: int + + Default = 1 + + Size of the model parallelism. + + + +- **pipe_partition_method**: str + + Default = type:transformer|mlp + + method used to distribute model layers across pipeline stages. Choose from "parameters", which balances the number + of parameters on each pipeline stage, "uniform", which naively balances the number of layers per stage, or + "type:[regex]", which balances layers whose class names match [regex] + + + +- **world_size**: int + + Default = None + + Total world size (i.e number of gpus in cluster). Configured post-launch using distributed launcher + + + +- **is_pipe_parallel**: bool + + Default = False + + flag to determine whether pipeline parallelism is on - shouldn't be set by user, is automatically determined + according to pipeline parallel size. + + + +- **sequence_parallel**: bool + + Default = False + + flag to determine whether Megatron-style Sequence Parallelism (https://arxiv.org/abs/2205.05198) + (Layernorm inputs and activations are sharded across model parallel group) will be used. Has no effect when model_parallel_size is 1. + **Set by user, in contrast to neox_args.is_pipe_parallel.** + + + +- **expert_interval**: int + + Default = 2 + + Have one MoE layer every expert_interval layers + + + +## NeoXArgsTemplate + +NeoXArgsTemplate() + + + +## NeoXArgsTextgen + +Text Generation arguments + + + +- **text_gen_type**: str + + Default = None + + How to generate text/sample the model. + Options: `unconditional`, `input-file`, `interactive`, `precompute` + + + +- **precompute_model_name**: str + + Default = None + + Model name to use for saving precomputed logprobs + + + +- **temperature**: float + + Default = 0.0 + + exponential scaling output distribution ("higher == more risk") + + + +- **top_p**: float + + Default = 0.0 + + Top-p (nucleus) sampling chooses from the smallest possible set of tokens whose cumulative probability exceeds the probability top_p. + + + +- **top_k**: int + + Default = 0 + + integer between 0 and the models vocab size. Filters out any logits with a probability less than that of the top_kth token. + + + +- **return_logits**: bool + + Default = False + + Boolean for whether to return the logits for generated tokens + + + +- **maximum_tokens**: int + + Default = 64 + + maximum number of tokens to be generated + + + +- **prompt_end**: str + + Default = + + + a single prompt's end. Defaults to newline + + + +- **sample_input_file**: str + + Default = None + + Get input from file instead of interactive mode, each line is an input. + + + +- **sample_output_file**: str + + Default = samples.txt + + Output file + + + +- **num_samples**: int + + Default = 1 + + Number of samples to generate unconditionally, defaults to 1 and interactive conditional sampling + + + +- **recompute**: bool + + Default = False + + During generation recompute all attention instead of using previously computed keys/values. + Should be set to true for sparse attention models + + + +- **eval_results_prefix**: str + + Default = + + prefix to which to save evaluation results - final fp will be {eval_results_prefix}_eval_results_yy-mm-dd-HH-MM.json + + + +- **eval_tasks**: list + + Default = None + + Tasks to evaluate on using lm_eval_harness + + NOTE: Requires internet connection + + + +- **moe_top_k**: int + + Default = 1 + + Activate top K experts in MoE + + + +- **use_tutel**: bool + + Default = False + + Use Tutel optimizations in MoE + + + +- **moe_num_experts**: int + + Default = 1 + + Number of MoE experts + + + +- **moe_loss_coeff**: float + + Default = 0.1 + + Coefficient for MoE loss + + + +- **moe_train_capacity_factor**: float + + Default = 1.0 + + The capacity of the expert at train time + + + +- **moe_eval_capacity_factor**: float + + Default = 1.0 + + The capacity of the expert at eval time + + + +- **moe_min_capacity**: int + + Default = 4 + + The minimum capacity per expert regardless of the capacity_factor + + + +- **moe_token_dropping**: bool + + Default = False + + Whether to drop tokens when exceeding capacity + + + +- **create_moe_param_group**: bool + + Default = True + + Whether to create a separate parameter group for MoE parameters + + + +- **moe_use_residual**: bool + + Default = True + + Whether to use residual in MoE + + + +- **moe_expert_parallel_size**: int + + Default = 1 + + Number of parallel experts in MoE + + + +- **moe_type**: str + + Default = megablocks + + Either `deepspeed` or `megablocks` + + + +- **moe_glu**: bool + + Default = False + + Use gated linear units in MoE + + + +- **moe_lbl_in_fp32**: bool + + Default = False + + Whether to compute the load balancing loss in fp32. + + + +- **moe_jitter_eps**: float + + Default = None + + Coefficient for MoE routing jitter. Jitter is + not used if set to None + + + +- **enable_expert_tensor_parallelism**: bool + + Default = False + + Enable expert tensor parallelism + + + +## NeoXArgsTokenizer + +Tokenizer Arguments + + + +- **tokenizer_type**: typing.Literal['GPT2BPETokenizer', 'HFTokenizer', 'HFGPT2Tokenizer', 'SPMTokenizer', 'CharLevelTokenizer', 'TiktokenTokenizer'] + + Default = GPT2BPETokenizer + + Type of tokenizer to use - should be one of ["GPT2BPETokenizer", "HFTokenizer", "HFGPT2Tokenizer", "SPMTokenizer", "CharLevelTokenizer", "TiktokenTokenizer"] + + + +- **padded_vocab_size**: int + + Default = None + + Total (padded) vocabulary size of tokenizer. Configured after launching of training, + as it's dependent on the parallelism size. + + + +## NeoXArgsTraining + +Training Arguments + + + +- **data_path**: str + + Default = None + + Path to combined dataset to split. + + + +- **use_shared_fs**: bool + + Default = True + + Whether to use a shared filesystem for data loading. If False, local rank 0 on all nodes will preprocess the data, + otherwise only global rank 0 will preprocess the data. This is implemented in megatron/data/gpt2_dataset.py::_build_index_mappings. + + + +- **train_data_paths**: list + + Default = None + + List of paths to train datasets. + + + +- **train_label_data_paths**: list + + Default = None + + List of paths to train label datasets (not shifted by 1 yet!). + + + +- **train_reward_data_paths**: list + + Default = None + + List of paths to train reward datasets + + + +- **test_data_paths**: list + + Default = None + + List of paths to test datasets. + + + +- **test_label_data_paths**: list + + Default = None + + List of paths to test label datasets (not shifted by 1 yet!). + + + +- **test_reward_data_paths**: list + + Default = None + + List of paths to test reward datasets + + + +- **valid_data_paths**: list + + Default = None + + List of paths to validation datasets. + + + +- **valid_label_data_paths**: list + + Default = None + + List of paths to validation label datasets (not shifted by 1 yet!). + + + +- **valid_reward_data_paths**: list + + Default = None + + List of paths to validation reward datasets + + + +- **pos_train_data_paths**: list + + Default = None + + + + + +- **neg_train_data_paths**: list + + Default = None + + List of paths to positive and negative training datasets. + + + +- **pos_train_label_data_paths**: list + + Default = None + + + + + +- **neg_train_label_data_paths**: list + + Default = None + + List of paths to positive and negative training label datasets (not shifted by 1 yet!). + + + +- **pos_valid_data_paths**: list + + Default = None + + + + + +- **neg_valid_data_paths**: list + + Default = None + + List of paths to positive and negative validation datasets. + + + +- **pos_valid_label_data_paths**: list + + Default = None + + + + + +- **neg_valid_label_data_paths**: list + + Default = None + + List of paths to positive and negative validation label datasets (not shifted by 1 yet!). + + + +- **pos_test_data_paths**: list + + Default = None + + + + + +- **neg_test_data_paths**: list + + Default = None + + List of paths to positive and negative test datasets. + + + +- **pos_test_label_data_paths**: list + + Default = None + + + + + +- **neg_test_label_data_paths**: list + + Default = None + + List of paths to positive and negative test label datasets (not shifted by 1 yet!). + + + +- **train_data_weights**: list + + Default = None + + List of 'weights' that decide how often to sample from each training dataset when blending datasets. If None, defaults to equal weighting. + Should be a list the same length as `train_data_paths` + + + +- **valid_data_weights**: list + + Default = None + + List of 'weights' that decide how often to sample from each validation dataset when blending datasets. If None, defaults to equal weighting. + Should be a list the same length as `valid_data_paths` + + + +- **test_data_weights**: list + + Default = None + + List of 'weights' that decide how often to sample from each test dataset when blending datasets. If None, defaults to equal weighting. + Should be a list the same length as `test_data_paths` + + + +- **weight_by_num_documents**: bool + + Default = False + + If True, Builds dataset weights from a multinomial distribution over groups of data according to the number of + documents in each group. + + WARNING: setting this to True will override any user provided weights + + We sample from a group according to the probability p(L) ∝ |L| ** α, + where p(L) is the probability of sampling from a given group, + |L| is the number of examples in that datapoint, + and α is a coefficient that acts to upsample data from underrepresented groups + + Hence α (`alpha`) allows us to control how much to 'boost' the probability of training on low-resource groups. + + See https://arxiv.org/abs/1911.02116 for more details + + + +- **weighted_sampler_alpha**: float + + Default = 1.0 + + Alpha value for `weight_by_num_documents`. Only has an effect if `weight_by_num_documents` = True. + + when alpha = 1, the probability of sampling from a given group = n_samples / total_samples + as alpha -> 0, the probability of sampling from all groups becomes equal, and number of documents has no effect + as alpha -> inf, the probability of sampling from the groups with *the most samples* -> 1 + + + +- **data_impl**: typing.Literal['infer', 'mmap', 'cached'] + + Default = infer + + Implementation of indexed datasets, can be one of "infer", "cached", or "mmap" + + + +- **pack_impl**: typing.Literal['packed', 'pack_until_overflow', 'unpacked'] + + Default = packed + + Packing implementation, can be one of "packed", "pack_until_overflow", or "unpacked". + + warning: pack_until_overflow is very naive and will likely have issues with pretraining scale datasets + + + +- **dataset_impl**: typing.Literal['gpt2', 'pairwise'] + + Default = gpt2 + + Dataset implementation, can be one of "gpt2" or "pairwise" + + + +- **train_impl**: typing.Literal['normal', 'dpo', 'rm', 'kto'] + + Default = normal + + Training implementation, can be one of "normal", "dpo", "kto", or "rm" + + + +- **dpo_fp32**: bool + + Default = True + + Whether to cast logits to fp32 for DPO loss calculation. + + + +- **dpo_reference_free**: bool + + Default = False + + Whether to use reference-free DPO. + + + +- **dpo_beta**: float + + Default = 0.1 + + Beta value for DPO + + + +- **kto_fp32**: bool + + Default = True + + Whether to cast logits to fp32 for KTO loss calculation. + + + +- **kto_desirable_weight**: float + + Default = 1.0 + + Weight for desirable loss in KTO. Might help if you have unbalanced desirable and undesirable classes. + + + +- **kto_undesirable_weight**: float + + Default = 1.0 + + Weight for undesirable loss in KTO. Might help if you have unbalanced desirable and undesirable classes. + + + +- **kto_beta**: float + + Default = 0.1 + + Beta value for KTO + + + +- **allow_chopped**: bool + + Default = True + + WARNING: if your packing impl is packed, this is ignored. + + Allow chopped samples in the dataset. + (e.g if your sequence length is 1024 and you have a sample of length 1026, it will be chopped to 1024) + + + +- **mmap_warmup**: bool + + Default = False + + Warm up mmap files. + + + +- **save**: str + + Default = None + + Output directory to save checkpoints to. + + + +- **s3_path**: str + + Default = None + + Path to s3 bucket for saving checkpoints. + + + +- **s3_chunk_size**: int + + Default = 104857600 + + The number of bytes in each file chunk when uploading to s3. Defaults to 100MiB. + + + +- **config_files**: dict + + Default = None + + Store of original config files mapping config filename to file contents + + + +- **load**: str + + Default = None + + Directory containing a model checkpoint. + + + +- **checkpoint_validation_with_forward_pass**: bool + + Default = False + + save input and output of a forward pass with the checkpoint and validate after load + + + +- **checkpoint_scale**: typing.Literal['linear', 'log'] + + Default = linear + + How step at which checkpoints are saved should scale. "linear" implies 1 checkpoint will be saved at every multiple of `checkpoint-factor`, + while "log" implies that the number of steps between each checkpoint will be multiplied by `checkpoint-factor` at each step, starting from step 1. + + + +- **checkpoint_factor**: int + + Default = None + + Acts as a multiplier on either the "log" or "linear" checkpoint spacing. + + With `checkpoint-scale="linear"`, `checkpoint-factor=20`, and `train-iters=100`, checkpoints will be saved at + steps [20, 40, 60, 80, 100]. + + With `checkpoint-scale="log"`, `checkpoint-factor=2`, and `train-iters=100`, checkpoints will be saved at + steps [1, 2, 4, 8, 16, 32, 64, 100]. + + Note that the last checkpoint step is always saved. + + + +- **extra_save_iters**: list + + Default = None + + Additional iterations when a checkpoint should be saved. + Must be a list of ints or `None`. + + + +- **no_save_optim**: bool + + Default = False + + Do not save current optimizer. + + + +- **no_save_rng**: bool + + Default = False + + Do not save current rng state. + + + +- **no_load_optim**: bool + + Default = False + + Do not load optimizer when loading checkpoint. + + + +- **no_load_rng**: bool + + Default = False + + Do not load rng state when loading checkpoint. + + + +- **finetune**: bool + + Default = False + + Load model for finetuning. Do not load optimizer or rng state from checkpoint and set iteration to 0. Assumed when loading a release checkpoint. + + + +- **batch_size**: int + + Default = None + + training microbatch size per gpu + + + +- **train_iters**: int + + Default = None + + Number of iterations to run for training. + + + +- **train_epochs**: int + + Default = None + + Number of epochs to run for training. Do not specify both train_epochs and train_iters. + Not currently compatible with data reweighing, pairwise datasets, and packing other than 'packed' + + + +- **eval_iters**: int + + Default = 100 + + Number of iterations to run for evaluation validation/test for. + + + +- **keep_last_n_checkpoints**: int + + Default = None + + Number of last checkpoints to keep + + + +- **eval_interval**: int + + Default = 1000 + + Interval between running evaluation on validation set. + + + +- **split**: str + + Default = 969, 30, 1 + + Comma_separated list of proportions for training, validation, and test split. For example the split 90,5,5 will use 90% of data for training, 5% for validation and 5% for test. + + + +- **vocab_file**: str + + Default = None + + Path to the vocab file. + + + +- **merge_file**: str + + Default = None + + Path to the BPE merge file. + + + +- **num_workers**: int + + Default = 2 + + Dataloader number of workers. + + + +- **exit_interval**: int + + Default = None + + Exit the program after the iteration is divisible by this value. + + + +- **attention_dropout**: float + + Default = 0.0 + + Post attention dropout probability. + + + +- **hidden_dropout**: float + + Default = 0.0 + + Dropout probability for hidden state transformer. + + + +- **weight_decay**: float + + Default = 0.1 + + Weight decay coefficient for L2 regularization. + + + +- **checkpoint_activations**: bool + + Default = False + + Checkpoint activation to allow for training with larger models, sequences, and batch sizes. + + + +- **checkpoint_num_layers**: int + + Default = 1 + + Chunk size (number of layers) for checkpointing. + + + +- **deepspeed_activation_checkpointing**: bool + + Default = True + + DEPRECATED - TODO: remove + Uses activation checkpointing from deepspeed + + + +- **contiguous_checkpointing**: bool + + Default = False + + Contiguous memory checkpointing for activations. + + + +- **checkpoint_in_cpu**: bool + + Default = False + + Move the activation checkpoints to CPU. + + + +- **synchronize_each_layer**: bool + + Default = False + + does a synchronize at the beginning and end of each checkpointed layer. + + + +- **profile_backward**: bool + + Default = False + + Enables backward pass profiling for checkpointed layers. + + + +- **partition_activations**: bool + + Default = False + + Partition Activations across GPUs before checkpointing. + + + +- **clip_grad**: float + + Default = 1.0 + + Gradient clipping based on global L2 norm. + + + +- **hysteresis**: int + + Default = 2 + + hysteresis for dynamic loss scaling + + + +- **dynamic_loss_scale**: bool + + Default = None + + flag indicating whether dynamic loss scale is used + + + +- **loss_scale**: float + + Default = None + + Static loss scaling, positive power of 2 + values can improve fp16 convergence. If None, dynamic loss scaling is used. + + + +- **loss_scale_window**: float + + Default = 1000.0 + + Window over which to raise/lower dynamic scale. + + + +- **min_scale**: float + + Default = 1.0 + + Minimum loss scale for dynamic loss scale. + + + +- **char_level_ppl**: bool + + Default = False + + Whether to calculate character level perplexity as well as token level perplexity. (may incur a time cost) + + + +- **use_mup**: bool + + Default = False + + Whether to use Microsoft's Mup https://github.com/microsoft/mup + + + +- **coord_check**: bool + + Default = False + + Whether to generate a "coord check" plot to verify mup's implementation in neox + + + +- **save_base_shapes**: bool + + Default = False + + Whether to save base shapes for mup. This will save the shapes to the path specified in base-shapes-file. + + + +- **base_shapes_file**: str + + Default = None + + Path to the base shapes to save to/load from + + + +- **mup_init_scale**: float + + Default = 1.0 + + Initialization scale: All the parameters are multiplied by this value + + + +- **mup_attn_temp**: float + + Default = 1.0 + + Attention temperature: Reciprocal of the multiplier applied to the input to attention softmax + + + +- **mup_output_temp**: float + + Default = 1.0 + + Output temperature: Reciprocal of the multiplier applied to the input to softmax that + produces the distribution over output tokens. + + + +- **mup_embedding_mult**: float + + Default = 1.0 + + Scalar by which we multiply the output of the embedding layer + + + +- **mup_rp_embedding_mult**: float + + Default = 1.0 + + Scalar by which we multiply vectors representing relative position + + + +- **mup_width_scale**: int + + Default = 2 + + What to scale width by when creating the delta model for mup + + + +## NeoXArgsDeepspeedConfig + +Args for deepspeed config + Every argument included here will be included in deepspeed config json + As of Mar 8 2023, up to date compared to https://www.deepspeed.ai/docs/config-json/ + + + +- **deepspeed**: bool + + Default = True + + boolean flag to enable DeepSpeed (Always True) + + + +- **train_batch_size**: int + + Default = None + + The effective training batch size. This is the amount of data samples that leads to one step of model update. train_batch_size is aggregated by the batch size that a single GPU processes in one forward/backward pass (a.k.a., train_step_batch_size), the gradient accumulation steps (a.k.a., gradient_accumulation_steps), and the number of GPUs. + + + +- **train_micro_batch_size_per_gpu**: int + + Default = None + + Batch size to be processed by one GPU in one step (without gradient accumulation). When specified, gradient_accumulation_steps is automatically calculated using train_batch_size and number of GPUs. Should not be concurrently specified with gradient_accumulation_steps in the configuration JSON. + + + +- **gradient_accumulation_steps**: int + + Default = 1 + + Number of training steps to accumulate gradients before averaging and applying them. This feature is sometimes useful to improve scalability since it results in less frequent communication of gradients between steps. Another impact of this feature is the ability to train with larger batch sizes per GPU. When specified, train_step_batch_size is automatically calculated using train_batch_size and number of GPUs. Should not be concurrently specified with train_step_batch_size in the configuration JSON. + + + +- **optimizer**: dict + + Default = None + + dict containing the keys type and params + + type: The optimizer name. DeepSpeed natively supports Adam, AdamW, OneBitAdam, Lamb, and OneBitLamb optimizers (See here for details) and will import other optimizers from torch. + + params: Dictionary of parameters to instantiate optimizer. The parameter names must match the optimizer constructor signature (e.g., for Adam). + + + +- **scheduler**: dict + + Default = None + + dict containing the keys type and params + + type: The scheduler name. See here (https://deepspeed.readthedocs.io/en/latest/schedulers.html) for list of support schedulers. + + params: Dictionary of parameters to instantiate scheduler. The parameter names should match scheduler constructor signature. + + + +- **fp32_allreduce**: bool + + Default = False + + During gradient averaging perform allreduce with 32 bit values + + + +- **prescale_gradients**: bool + + Default = False + + Scale gradients before doing allreduce + + + +- **gradient_predivide_factor**: float + + Default = 1.0 + + Before gradient averaging predivide gradients by a specified factor, can sometimes help with fp16 stability when scaling to large numbers of GPUs + + + +- **sparse_gradients**: bool + + Default = False + + Enable sparse compression of torch.nn.Embedding gradients. + + + +- **fp16**: dict + + Default = None + + Configuration for using mixed precision/FP16 training that leverages NVIDIA’s Apex package. + + Dictionary options as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#fp16-training-options + + + +- **bf16**: dict + + Default = None + + Configuration for using bfloat16 floating-point format as an alternative to FP16. BFLOAT16 requires hardware support (e.g., NVIDIA A100). + + Dictionary options as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#bfloat16-training-options + + + +- **amp**: dict + + Default = None + + Configuration for using automatic mixed precision (AMP) training that leverages NVIDIA’s Apex AMP package. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#automatic-mixed-precision-amp-training-options + + + +- **gradient_clipping**: float + + Default = 1.0 + + Enable gradient clipping with provided value + + + +- **zero_optimization**: dict + + Default = None + + Configuration for using ZeRO optimization. + + Multi-level dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#zero-optimization-options + + + +- **curriculum_learning**: dict + + Default = None + + + + + +- **curriculum_seqlen**: int + + Default = 0 + + Internal var for tracking the current seqlen + + + +- **steps_per_print**: int + + Default = 10 + + Print train loss every N steps. + + + +- **wall_clock_breakdown**: bool + + Default = False + + Enable timing of the latency of forward/backward/update training phases. + + + +- **dump_state**: bool + + Default = False + + Print out state information of DeepSpeed object after initialization. + + + +- **flops_profiler**: dict + + Default = None + + Configuration for using FLOPS profiler. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#flops-profiler + + + +- **communication_data_type**: bool + + Default = None + + During gradient averaging, perform communication with selected data type. By default it will be determined by selected regime + + + +- **autotuning**: dict + + Default = None + + Configuration for using autotuning. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#autotuning + + + +- **activation_checkpointing**: dict + + Default = None + + Configuration for using activation checkpointing. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#activation-checkpointing + + + +- **sparse_attention**: dict + + Default = None + + Configuration for using sparse attention. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#sparse-attention + + + +- **data_efficiency**: dict + + Default = None + + Configuration for using data efficiency. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#data-efficiency + + + +- **tensorboard**: dict + + Default = None + + Configuration for using tensorboard. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#monitoring-module-tensorboard-wandb-csv + + + +- **wandb**: dict + + Default = None + + Configuration for using wandb. + + + +- **csv_monitor**: dict + + Default = None + + Configuration for using csv_monitor. + + + +- **elasticity**: dict + + Default = None + + Configuration for using elastic training. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#elastic-training-config-v01-and-v02 + + + +- **comms_logger**: dict + + Default = None + + Configuration for using communication logger. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#communication-logging + + + +- **compression_training**: dict + + Default = None + + Configuration for using compression training. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#compression + + + +- **checkpoint**: dict + + Default = None + + Configuration for using checkpointing. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#checkpoint-options + + + +- **data_types**: dict + + Default = None + + Configuration for using data types. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#data-type-options + + + +- **deepspeed_extra_args**: dict + + Default = None + + Dictionary of extra arguments to be included in the yaml config file. This can be used for any argument not included in the above list. + + + +## NeoXArgsDeepspeedRunner + +Args for deepspeed runner (deepspeed.launcher.runner). + Every argument included here will be passed as command line argument to deepspeed.launcher.runner + + + +- **hostfile**: str + + Default = None + + list of hostnames / ssh aliases and the number of GPUs per host + + example file contents: + worker-1 slots=4 + worker-2 slots=4 + 127.0.0 slots=4 + 127.0.1 slots=4 + + + +- **include**: str + + Default = None + + Specify hardware resources to use during execution. String format is `NODE_SPEC[@NODE_SPEC ...]` where `NODE_SPEC=NAME[:SLOT[,SLOT ...]]`. If `:SLOT` is omitted, include all slots on that host. Example: `"worker-0@worker-1:0,2"` will use all slots. on `worker-0` and slots `[0, 2]` on `worker-1`. + + + +- **exclude**: str + + Default = None + + Specify hardware resources to NOT use during execution. Same format as include + + + +- **num_nodes**: int + + Default = -1 + + Total number of worker nodes to run on, this will use the top N hosts from the given hostfile. -1 will use all. + + + +- **num_gpus**: int + + Default = None + + Max number of GPUs to use on each node, will use [0:N) GPU ids on each node. None / not specifying a value will use all. + + + +- **master_port**: int + + Default = 29500 + + Port used by PyTorch distributed for communication during training. + + + +- **master_addr**: str + + Default = None + + IP address of node 0, will be inferred via 'hostname -I' if not specified. + + + +- **launcher**: typing.Literal['pdsh', 'openmpi', 'mvapich', 'slurm'] + + Default = pdsh + + Launcher backend for multi-node training. Options currently include PDSH, OpenMPI, MVAPICH. + + + +- **force_multi**: bool + + Default = False + + Force multi-node training even if only one node is specified. + + + +- **detect_nvlink_pairs**: bool + + Default = False + + If true, autodetects nvlink pairs and remaps cuda visible devices to place them next to each other. This is an Eleuther addition to deepspeed, and should speed up model parallel training on setups with nvlink pairs when mp=2. + + + +- **autotuning_run**: str + + Default = None + + Either "tune", "run", or `None`. + + + +- **no_ssh_check**: bool + + Default = False + + If true, overrides the default check where DeepSpeed confirms that the headnode is accessible via ssh. + + + +- **comment**: str + + Default = None + + Adds a `--comment` to the DeepSpeed launch command. In DeeperSpeed this is passed on to the SlurmLauncher as well. Sometimes necessary for cluster rules, or so I've heard. + + + +- **account**: str + + Default = None + + Adds a `--account` to the DeepSpeed launch command. In DeeperSpeed this is passed on to the SlurmLauncher as well. Sometimes necessary for cluster rules, or so I've heard. + diff --git a/configs/prof.yml b/configs/prof.yml new file mode 100644 index 0000000000000000000000000000000000000000..c2f2ee118ddf6c5fb6eb7b5f593c69c1f96c98bd --- /dev/null +++ b/configs/prof.yml @@ -0,0 +1,17 @@ +# Sample profiling config +{ + # Turns on nsys and pytorch profiling + "profile": true, + + # pytorch profiler options + "profile_step_start": 10, + "profile_step_stop": 12, + + # pytorch memory profiler options + "memory_profiling": true, + "memory_profiling_path": tensorboard, + + + # All trace files (pytorch, nsys, tensorboard, etc) will be written here + "tensorboard_dir": "tensorboard", +} diff --git a/configs/pythia/1-4B.yml b/configs/pythia/1-4B.yml new file mode 100644 index 0000000000000000000000000000000000000000..0176d0a78db26cfd2c4bb8ba66489e055f171f62 --- /dev/null +++ b/configs/pythia/1-4B.yml @@ -0,0 +1,84 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + "num_layers": 24, + "hidden_size": 2048, + "num_attention_heads": 16, + "seq_length": 2048, + "max_position_embeddings": 2048, + "pos_emb": "rotary", + "rotary_pct": 0.25, + "no_weight_tying": true, + "gpt_j_residual": true, + "output_layer_parallelism": "column", + + "attention_config": [[["flash"], 24]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": true, + + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0002, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + "min_lr": 0.00002, + + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 500000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 500000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + + "train_micro_batch_size_per_gpu": 16, + "data_impl": "mmap", + "num_workers": 1, + + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 12, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "train_iters": 143000, + "lr_decay_iters": 143000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 1000, + "extra_save_iters": [0,1,2,4,8,16,32,64,128,256,512], + "eval_interval": 143000, + "eval_iters": 10, + + + "log_interval": 10, + "steps_per_print": 10, + "wall_clock_breakdown": true, + "tokenizer_type": "HFTokenizer" + } diff --git a/configs/pythia/12B.yml b/configs/pythia/12B.yml new file mode 100644 index 0000000000000000000000000000000000000000..21b67521ded6eece8edf0ae4731511f7d411d6c5 --- /dev/null +++ b/configs/pythia/12B.yml @@ -0,0 +1,84 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 4, + + "num_layers": 36, + "hidden_size": 5120, + "num_attention_heads": 40, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "layernorm", + "pos_emb": "rotary", + "rotary_pct": 0.25, + "no_weight_tying": true, + "gpt_j_residual": true, + "output_layer_parallelism": "column", + + "attention_config": [[["flash"], 36]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": true, + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.00012, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + "min_lr": 0.000012, + + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 1260000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 1260000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + + "train_micro_batch_size_per_gpu": 8, + "gradient_accumulation_steps": 2, + "data_impl": "mmap", + + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 12, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "train_iters": 143000, + "lr_decay_iters": 143000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 1000, + "extra_save_iters": [0,1,2,4,8,16,32,64,128,256,512], + "eval_interval": 143000, + "eval_iters": 10, + + "log_interval": 10, + "steps_per_print": 10, + "wall_clock_breakdown": true, + + "log_grad_norm": true, + + "tokenizer_type": "HFTokenizer" +} diff --git a/configs/pythia/14M.yml b/configs/pythia/14M.yml new file mode 100644 index 0000000000000000000000000000000000000000..72314c7dda9c2925ad6e862b435eac6ae70b3e25 --- /dev/null +++ b/configs/pythia/14M.yml @@ -0,0 +1,97 @@ +{ + # parallelism settings + "pipe-parallel-size": 0, + "model-parallel-size": 1, + + # model settings + "num-layers": 6, + "hidden-size": 128, + "num-attention-heads": 4, + "seq-length": 2048, + "max-position-embeddings": 2048, + "pos-emb": "rotary", + "rotary-pct": 0.25, + "no-weight-tying": true, + "gpt-j-residual": true, + "output-layer-parallelism": "column", + + "attention-config": [[["flash"], 6]], + + "scaled-upper-triang-masked-softmax-fusion": true, + "bias-gelu-fusion": true, + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.001, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + "min_lr": 0.0001, + + "zero_optimization": { + "stage": 0, + "allgather_partitions": true, + "allgather_bucket_size": 50000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 50000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + + # batch size (trained on 32 gpus) + "train_micro_batch_size_per_gpu": 32, + "data-impl": "mmap", + "num_workers": 4, + + # activation checkpointing + "checkpoint-activations": false, #true, + "checkpoint-num-layers": 1, + "partition-activations": false, #true, + "synchronize-each-layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight-decay": 0.1, + "hidden-dropout": 0, + "attention-dropout": 0, + + # precision settings + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 12, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "train-iters": 143000, + "lr-decay-iters": 143000, + "distributed-backend": "nccl", + "lr-decay-style": "cosine", + "warmup": 0.01, + "checkpoint-factor": 1000, + "extra-save-iters": [0,1,2,4,8,16,32,64,128,256,512], + "eval-interval": 100000, + "eval-iters": 10, + + "log-interval": 10, + "steps_per_print": 10, + "wall_clock_breakdown": true, + + "train-data-paths": ["/mnt/ssd-2/pile_deduped/pile_20B_tokenizer_text_document"], + "valid-data-paths": ["/mnt/ssd-2/pile_deduped/pile_20B_tokenizer_text_document"], + "test-data-paths": ["/mnt/ssd-2/pile_deduped/pile_20B_tokenizer_text_document"], + + "tokenizer-type": "HFTokenizer", + "vocab-file": "/mnt/ssd-2/pile/20B_tokenizer.json" + +} diff --git a/configs/pythia/160M.yml b/configs/pythia/160M.yml new file mode 100644 index 0000000000000000000000000000000000000000..d5b12547f4f1d3385eb7d3d38c07b81158236cf7 --- /dev/null +++ b/configs/pythia/160M.yml @@ -0,0 +1,84 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + "num_layers": 12, + "hidden_size": 768, + "num_attention_heads": 12, + "seq_length": 2048, + "max_position_embeddings": 2048, + "pos_emb": "rotary", + "rotary_pct": 0.25, + "no_weight_tying": true, + "gpt_j_residual": true, + "output_layer_parallelism": "column", + + "attention_config": [[["flash"], 12]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": true, + + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0006, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + "min_lr": 0.00006, + + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 500000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 500000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + + "train_micro_batch_size_per_gpu": 32, + "data_impl": "mmap", + "num_workers": 1, + + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 12, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "train_iters": 143000, + "lr_decay_iters": 143000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 1000, + "extra_save_iters": [0,1,2,4,8,16,32,64,128,256,512], + "eval_interval": 143000, + "eval_iters": 10, + + "log_interval": 10, + "steps_per_print": 10, + "wall_clock_breakdown": true, + + "tokenizer_type": "HFTokenizer" +} diff --git a/configs/pythia/1B.yml b/configs/pythia/1B.yml new file mode 100644 index 0000000000000000000000000000000000000000..78fc28946d1de0b362a05fd5a7b1f584eb81e975 --- /dev/null +++ b/configs/pythia/1B.yml @@ -0,0 +1,86 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + "num_layers": 16, + "hidden_size": 2048, + "num_attention_heads": 8, + "seq_length": 2048, + "max_position_embeddings": 2048, + "pos_emb": "rotary", + "rotary_pct": 0.25, + "no_weight_tying": true, + "gpt_j_residual": true, + "output_layer_parallelism": "column", + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": true, + + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.00025, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + "min_lr": 0.000025, + + "zero_optimization": { + "stage": 0, + "allgather_partitions": true, + "allgather_bucket_size": 500000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 500000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + + "fp16": { + "enabled": true, + "type": "bfloat16", + "auto_cast": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 12, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "fp32_allreduce": true, + + "train_micro_batch_size_per_gpu": 4, + "gradient_accumulation_steps": 4, + "data_impl": "mmap", + "num_workers": 1, + + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + "train_iters": 143000, + "lr_decay_iters": 143000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 1000, + "extra_save_iters": [0,1,2,4,8,16,32,64,128,256,512], + "eval_interval": 143000, + "eval_iters": 10, + + "log_interval": 10, + "steps_per_print": 10, + "wall_clock_breakdown": true, + + "tokenizer_type": "HFTokenizer" +} diff --git a/configs/pythia/2-8B.yml b/configs/pythia/2-8B.yml new file mode 100644 index 0000000000000000000000000000000000000000..04427e9fd90b8e7095e9938100115059b5ddb0d5 --- /dev/null +++ b/configs/pythia/2-8B.yml @@ -0,0 +1,87 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + "num_layers": 32, + "hidden_size": 2560, + "num_attention_heads": 32, + "seq_length": 2048, + "max_position_embeddings": 2048, + "pos_emb": "rotary", + "rotary_pct": 0.25, + "no_weight_tying": true, + "gpt_j_residual": true, + "output_layer_parallelism": "column", + + "attention_config": [[["flash"], 32]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": true, + + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.00016, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + "min_lr": 0.000016, + + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 500000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 500000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + + "train_micro_batch_size_per_gpu": 8, + "gradient_accumulation_steps": 2, + "data_impl": "mmap", + "num_workers": 1, + + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 12, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "train_iters": 143000, + "lr_decay_iters": 143000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 1000, + "extra_save_iters": [0,1,2,4,8,16,32,64,128,256,512], + "eval_interval": 40000, + "eval_iters": 10, + + "log_grad_norm": true, + + "log_interval": 10, + "steps_per_print": 10, + "wall_clock_breakdown": true, + + "tokenizer_type": "HFTokenizer" +} diff --git a/configs/pythia/31M.yml b/configs/pythia/31M.yml new file mode 100644 index 0000000000000000000000000000000000000000..f450d12ade890a01acc62d5f3ba44540b861ddd0 --- /dev/null +++ b/configs/pythia/31M.yml @@ -0,0 +1,96 @@ +{ + # parallelism settings + "pipe-parallel-size": 0, + "model-parallel-size": 1, + + # model settings + "num-layers": 6, + "hidden-size": 256, + "num-attention-heads": 8, + "seq-length": 2048, + "max-position-embeddings": 2048, + "pos-emb": "rotary", + "rotary-pct": 0.25, + "no-weight-tying": true, + "gpt-j-residual": true, + "output-layer-parallelism": "column", + + "attention-config": [[["flash"], 6]], + + "scaled-upper-triang-masked-softmax-fusion": true, + "bias-gelu-fusion": true, + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.001, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + "min_lr": 0.0001, + + "zero_optimization": { + "stage": 0, + "allgather_partitions": true, + "allgather_bucket_size": 500000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 500000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + + # batch size (trained on 32 gpus) + "train_micro_batch_size_per_gpu": 32, + "data-impl": "mmap", + "num_workers": 2, + + # activation checkpointing + "checkpoint-activations": false, + "checkpoint-num-layers": 1, + "partition-activations": false, + "synchronize-each-layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight-decay": 0.1, + "hidden-dropout": 0, + "attention-dropout": 0, + + # precision settings + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 12, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "train-iters": 143000, + "lr-decay-iters": 143000, + "distributed-backend": "nccl", + "lr-decay-style": "cosine", + "warmup": 0.01, + "checkpoint-factor": 1000, + "extra-save-iters": [0,1,2,4,8,16,32,64,128,256,512], + "eval-interval": 100000, + "eval-iters": 10, + "log-interval": 10, + "steps_per_print": 10, + "wall_clock_breakdown": true, + + "train-data-paths": ["/mnt/ssd-2/pile_deduped/pile_20B_tokenizer_text_document"], + "valid-data-paths": ["/mnt/ssd-2/pile_deduped/pile_20B_tokenizer_text_document"], + "test-data-paths": ["/mnt/ssd-2/pile_deduped/pile_20B_tokenizer_text_document"], + + "tokenizer-type": "HFTokenizer", + "vocab-file": "/mnt/ssd-2/pile/20B_tokenizer.json" + +} diff --git a/configs/pythia/410M.yml b/configs/pythia/410M.yml new file mode 100644 index 0000000000000000000000000000000000000000..8d3c4e59453e928c1d678bc82fa680af3da259ed --- /dev/null +++ b/configs/pythia/410M.yml @@ -0,0 +1,84 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + "num_layers": 24, + "hidden_size": 1024, + "num_attention_heads": 16, + "seq_length": 2048, + "max_position_embeddings": 2048, + "pos_emb": "rotary", + "rotary_pct": 0.25, + "no_weight_tying": true, + "gpt_j_residual": true, + "output_layer_parallelism": "column", + + "attention_config": [[["flash"], 24]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": true, + + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0003, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + "min_lr": 0.00003, + + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 500000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 500000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + + "train_micro_batch_size_per_gpu": 32, + "data_impl": "mmap", + "num_workers": 1, + + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 12, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "train_iters": 143000, + "lr_decay_iters": 143000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 1000, + "extra_save_iters": [0,1,2,4,8,16,32,64,128,256,512], + "eval_interval": 143000, + "eval_iters": 10, + + "log_interval": 10, + "steps_per_print": 10, + "wall_clock_breakdown": true, + + "tokenizer_type": "HFTokenizer" +} diff --git a/configs/pythia/6-9B.yml b/configs/pythia/6-9B.yml new file mode 100644 index 0000000000000000000000000000000000000000..869129f30ba39803c549aca4af2d23c355dd6ec7 --- /dev/null +++ b/configs/pythia/6-9B.yml @@ -0,0 +1,84 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 2, + + "num_layers": 32, + "hidden_size": 4096, + "num_attention_heads": 32, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "layernorm", + "pos_emb": "rotary", + "rotary_pct": 0.25, + "no_weight_tying": true, + "gpt_j_residual": true, + "output_layer_parallelism": "column", + + "attention_config": [[["flash"], 32]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": true, + + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.00012, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + + "min_lr": 0.000012, + + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 1260000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 1260000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + + "train_micro_batch_size_per_gpu": 8, + "gradient_accumulation_steps": 2, + "data_impl": "mmap", + + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 12, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "train_iters": 143000, + "lr_decay_iters": 143000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 1000, + "extra_save_iters": [0,1,2,4,8,16,32,64,128,256,512], + "eval_interval": 143000, + "eval_iters": 10, + + "log_interval": 10, + "steps_per_print": 10, + "wall_clock_breakdown": true, + + "tokenizer_type": "HFTokenizer" +} diff --git a/configs/pythia/70M.yml b/configs/pythia/70M.yml new file mode 100644 index 0000000000000000000000000000000000000000..2cab2e70dfda42bac5b94e3b4b04bb62ae439125 --- /dev/null +++ b/configs/pythia/70M.yml @@ -0,0 +1,84 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + "num_layers": 6, + "hidden_size": 512, + "num_attention_heads": 8, + "seq_length": 2048, + "max_position_embeddings": 2048, + "pos_emb": "rotary", + "rotary_pct": 0.25, + "no_weight_tying": true, + "gpt_j_residual": true, + "output_layer_parallelism": "column", + + "attention_config": [[["flash"], 6]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": true, + + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.001, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + "min_lr": 0.0001, + + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 500000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 500000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + + "train_micro_batch_size_per_gpu": 32, + "data_impl": "mmap", + "num_workers": 1, + + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + "fp16": { + "fp16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 12, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "train_iters": 143000, + "lr_decay_iters": 143000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 1000, + "extra_save_iters": [0,1,2,4,8,16,32,64,128,256,512], + "eval_interval": 100000, + "eval_iters": 10, + + "log_interval": 10, + "steps_per_print": 10, + "wall_clock_breakdown": true, + + "tokenizer_type": "HFTokenizer" +} diff --git a/configs/rwkv/170M.yml b/configs/rwkv/170M.yml new file mode 100644 index 0000000000000000000000000000000000000000..11311f441042f4efed10abbda0c6b48c6ab1588b --- /dev/null +++ b/configs/rwkv/170M.yml @@ -0,0 +1,102 @@ +{ + # Parallelism is not yet supported for rwkv + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + "num_layers": 12, + "hidden_size": 768, + "num_attention_heads": 12, # head_size = dim_att / num_attention_heads. + # head_size is 64 for all rwkv models + "seq_length": 512, + "max_position_embeddings": 2048, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + "train_micro_batch_size_per_gpu": 32, + + "attention_config": [[["rwkv"], 12]], + + "activation": "silu", + + # model settings + + #"pos_emb": "rotary", + "rotary_pct": 0.25, + "no_weight_tying": true, + "gpt_j_residual": true, + + # these should provide some speedup but takes a while to build, set to true if desired + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + "layernorm_fusion": false, + + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0008, + "betas": [0.9, 0.95], + "eps": 1.0e-8, + } + }, + "min_lr": 0.00008, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 1, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + # batch / data settings + "data_impl": "mmap", + "num_workers": 1, + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + # precision settings + "bf16": { + "bf16": true, + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 12, + "hysteresis": 2, + "min_loss_scale": 1, + }, + + # misc. training settings + "train_iters": 500, + "lr_decay_iters": 500, + "distributed_backend": "nccl", + "lr_decay_style": "constant", + "warmup": 0.01, + "checkpoint_factor": 100, + "eval_interval": 100000, + "eval_iters": 10, + + # logging + "log_interval": 10, + "steps_per_print": 10, + "wall_clock_breakdown": true, +} diff --git a/configs/slurm_125M.yml b/configs/slurm_125M.yml new file mode 100644 index 0000000000000000000000000000000000000000..2ac60e53417ddc23f9e369e063c720b37883a66a --- /dev/null +++ b/configs/slurm_125M.yml @@ -0,0 +1,66 @@ +{ + "pipe_parallel_size": 1, + "model_parallel_size": 1, + "num_layers": 12, + "hidden_size": 768, + "num_attention_heads": 12, + "seq_length": 2048, + "max_position_embeddings": 2048, + "norm": "layernorm", + "pos_emb": "rotary", + "no_weight_tying": true, + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": true, + "rope_fusion": false, + "layernorm_fusion": false, + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0006, + "betas": [0.9, 0.999], + "eps": 1.0e-8 + } + }, + "zero_optimization": { + "stage": 0, + "allgather_partitions": true, + "allgather_bucket_size": 500000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 500000000, + "contiguous_gradients": true + }, + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + "split": "949,50,1", + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + "gradient_clipping": 1.0, + "weight_decay": 0.0, + "hidden_dropout": 0.0, + "attention_dropout": 0.0, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, + "launcher": "slurm", + "deepspeed_slurm": true, + "comment": "neox" +} diff --git a/configs/slurm_local.json b/configs/slurm_local.json new file mode 100644 index 0000000000000000000000000000000000000000..4b9ce5c56151247a51453a9a0af70cd2ac676a8f --- /dev/null +++ b/configs/slurm_local.json @@ -0,0 +1,8 @@ +{ + "vocab-file": "data/gpt2-vocab.json", + "merge-file": "data/gpt2-merges.txt", + "save": "checkpoints", + "checkpoint_validation_with_forward_pass": false, + "tensorboard-dir": "tensorboard", + "log-dir": "logs" +} diff --git a/configs/slurm_local.yml b/configs/slurm_local.yml new file mode 100644 index 0000000000000000000000000000000000000000..3aa3f374268df07e3b110cff4429a5b50ce2087a --- /dev/null +++ b/configs/slurm_local.yml @@ -0,0 +1,9 @@ +{ + "data_path": "data/enwik8/enwik8_text_document", + "vocab_file": "data/gpt2-vocab.json", + "merge_file": "data/gpt2-merges.txt", + "save": "checkpoints", + "checkpoint_validation_with_forward_pass": false, + "tensorboard_dir": "tensorboard", + "log_dir": "logs", +} diff --git a/configs/sparse.yml b/configs/sparse.yml new file mode 100644 index 0000000000000000000000000000000000000000..7251c88b7583c1c4896cc3de3293d278371af743 --- /dev/null +++ b/configs/sparse.yml @@ -0,0 +1,15 @@ +# Add this to your config for sparse attention every other layer +{ + "attention_config": [[["local", "global"], "all"]], + + # sparsity config: + # (these are the defaults for local sliding window sparsity, training will work without this here, but it's left in for + # illustrative purposes) + # see https://www.deepspeed.ai/tutorials/sparse-attention/#how-to-config-sparsity-structures for + # more detailed config instructions and available parameters + + "sparsity_config": { + "block": 16, # block size + "num_local_blocks": 32, + } +} diff --git a/configs/text_generation.yml b/configs/text_generation.yml new file mode 100644 index 0000000000000000000000000000000000000000..5a49d61e56f3f98c0c7b38cb65ded05e6c1be527 --- /dev/null +++ b/configs/text_generation.yml @@ -0,0 +1,21 @@ +# Parameters used for text generation +# Make sure `load` is specified somewhere else +{ + # Text gen type: `input-file`, `unconditional` or `interactive` + "text_gen_type": "unconditional", + + # Params for all + "maximum_tokens": 102, + "prompt_end": "\n", + "temperature": 1.0, + "top_p": 0.0, + "top_k": 0, + "recompute": false, + + # `unconditional`: samples + "num_samples": 10, + + # input/output file + "sample_input_file": "sample_input.txt", + "sample_output_file": "sample_output.txt", +} diff --git a/deepy.py b/deepy.py new file mode 100644 index 0000000000000000000000000000000000000000..e4dc7a3fdcc7cc48e18cd55f3ef3f3f1a1737287 --- /dev/null +++ b/deepy.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os + +import deepspeed.launcher.runner + + +def main(input_args=None): + logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO")) + + from megatron.neox_arguments import NeoXArgs + from megatron.utils import get_wandb_api_key + + neox_args = NeoXArgs.consume_deepy_args(input_args) + deepspeed_main_args = neox_args.get_deepspeed_main_args() + + # Extract wandb API key and inject into worker environments + wandb_token = get_wandb_api_key(neox_args=neox_args) + if wandb_token is not None: + deepspeed.launcher.runner.EXPORT_ENVS.append("WANDB_API_KEY") + os.environ["WANDB_API_KEY"] = wandb_token + + deepspeed.launcher.runner.main(deepspeed_main_args) + + +if __name__ == "__main__": + main() diff --git a/docker-compose-dockerhub.yml b/docker-compose-dockerhub.yml new file mode 100644 index 0000000000000000000000000000000000000000..4ac5113f7779595555d9e2a09002253325b21435 --- /dev/null +++ b/docker-compose-dockerhub.yml @@ -0,0 +1,25 @@ +version: '3' +services: + gpt-neox: + command: nvidia-smi dmon + image: leogao2/gpt-neox:main + shm_size: 1g + ulimits: + memlock: + soft: -1 + hard: -1 + runtime: nvidia + deploy: + resources: + reservations: + devices: + - driver: nvidia + capabilities: [gpu] + logging: + options: + max-size: "100m" + max-file: "3" + volumes: + - ${NEOX_DATA_PATH}:/home/mchorse/data + - ${NEOX_CHECKPOINT_PATH}:/home/mchorse/chk + - .:/home/mchorse/gpt-neox diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000000000000000000000000000000000000..b2591aed0dc05607c83d3a76953a0e95a15213bb --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,28 @@ +version: '3' +services: + gpt-neox: + command: nvidia-smi dmon + image: gpt-neox + build: + context: . + dockerfile: Dockerfile + shm_size: 1g + ulimits: + memlock: + soft: -1 + hard: -1 + runtime: nvidia + deploy: + resources: + reservations: + devices: + - driver: nvidia + capabilities: [gpu] + logging: + options: + max-size: "100m" + max-file: "3" + volumes: + - ${NEOX_DATA_PATH}:/home/mchorse/data + - ${NEOX_CHECKPOINT_PATH}:/home/mchorse/chk + - .:/home/mchorse/gpt-neox diff --git a/eval.py b/eval.py new file mode 100644 index 0000000000000000000000000000000000000000..53bd21e0c7a3a519845c13a437ea0fc0ba51d395 --- /dev/null +++ b/eval.py @@ -0,0 +1,79 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Evaluation tasks - modified from https://github.com/EleutherAI/lm-evaluation-harness""" +import os +import sys + +sys.path.append( + os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)) +) +from megatron.training import forward_step +from megatron.utils import setup_for_inference_or_eval, init_wandb +from megatron.logging import tb_wandb_log +from eval_tasks import run_eval_harness +from pprint import pprint +from datetime import datetime +import json + + +def main(input_args=None, overwrite_values=None): + model, neox_args = setup_for_inference_or_eval( + use_cache=False, input_args=input_args, overwrite_values=overwrite_values + ) + results = run_eval_harness( + model, + forward_step, + neox_args, + eval_tasks=neox_args.eval_tasks, + bootstrap_iters=10000, + ) + if neox_args.rank == 0: + init_wandb(neox_args=neox_args) + # log to wandb + for k, v in results["results"].items(): + if isinstance(v, dict): + for k2, v2 in v.items(): + k3 = "_".join([k, k2]) + tb_wandb_log( + f"eval/{k3}", + v2, + neox_args.iteration, + use_wandb=neox_args.use_wandb, + comet_experiment=neox_args.comet_experiment, + ) + else: + tb_wandb_log( + f"eval/{k}", + v, + neox_args.iteration, + use_wandb=neox_args.use_wandb, + comet_experiment=neox_args.comet_experiment, + ) + + pprint(results) + results_path = ( + f'eval_results_{datetime.now().strftime("%m-%d-%Y-%H-%M-%S")}.json' + ) + if neox_args.eval_results_prefix: + results_path = f"{neox_args.eval_results_prefix}_{results_path}" + with open(results_path, "w") as f: + json.dump(results, f, indent=4) + + +if __name__ == "__main__": + main() diff --git a/eval_tasks/__init__.py b/eval_tasks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6cb4e30ca2b06174935e3675c32cf89661adcfc6 --- /dev/null +++ b/eval_tasks/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .eval_adapter import EvalHarnessAdapter, run_eval_harness diff --git a/eval_tasks/eval_adapter.py b/eval_tasks/eval_adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..abbd5ca8d17e22c8e7747f1ce9dba8ceee2710df --- /dev/null +++ b/eval_tasks/eval_adapter.py @@ -0,0 +1,538 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from megatron.utils import is_local_main, print_rank_0 + +import copy +import os +import sys +import dataclasses +from functools import partial + +sys.path.append( + os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)) +) +from tqdm import tqdm +import torch +import torch.nn.functional as F + +from lm_eval.models.huggingface import HFLM +from lm_eval import tasks, evaluator, utils, api +from megatron.text_generation_utils import generate_samples_from_prompt +from megatron import mpu + + +class EvalHarnessAdapter(HFLM): + """ + An adapter to run NeoX models on LM Evaluation Harness (https://github.com/EleutherAI/lm-evaluation-harness) tasks. + + Args: + model: A NeoX Model + forward_step_fn: A function that runs a forward pass through the model, returning `tuple(loss, logits)`. + neox_args: a NeoXArgs object containing the model configuration. + batch_size (optional): An argument to override the batch size, which defaults to batch size per gpu * dp world size. + """ + + def __init__(self, model, forward_step_fn, neox_args, batch_size=None): + self.cache_hook = api.model.CacheHook(None) + self._model = model + self.neox_args = neox_args + self.tokenizer = neox_args.tokenizer + self._device = torch.device(f"cuda:{neox_args.local_rank}") + self._eot_token_id = neox_args.tokenizer.eod_id + self._max_length = neox_args.max_position_embeddings + self._max_gen_toks = 128 + self._vocab_size = neox_args.padded_vocab_size + + # parallelism args: + self.is_main = neox_args.rank == 0 + self.is_local_main = neox_args.local_rank == 0 + self.is_model_parallel = neox_args.model_parallel_size > 1 + self.is_pipe_parallel = self.model.is_pipe_parallel + self.is_data_parallel = self.model.is_data_parallel + self.is_last_stage = ( + True if not self.is_pipe_parallel else model.is_last_stage() + ) # only the last stage of the pipeline model will receive the logits + self.dp_world_size = mpu.get_data_parallel_world_size() + self.dp_rank = mpu.get_data_parallel_rank() + self.dp_group = mpu.get_data_parallel_group() + self.is_mp_rank_0 = mpu.get_model_parallel_rank() == 0 + + self._batch_size = batch_size or ( + neox_args.batch_size * self.dp_world_size + ) # default batch size to bs per gpu * dp size + # some utility functions: + # we need to patch tokenizer methods, because lm_eval uses them internally: + self.tokenizer.encode = self.tokenizer.tokenize + self.tokenizer.decode = self.tokenizer.detokenize + self._forward_step_fn = partial( + forward_step_fn, neox_args=neox_args, timers=None, return_logits=True + ) + self.generate = partial( + generate_samples_from_prompt, + neox_args=neox_args, + model=model, + ) + + @property + def vocab_size(self): + return self._vocab_size + + @property + def eot_token_id(self): + # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence* + return self._eot_token_id + + @property + def max_length(self): + return self._max_length + + @property + def max_gen_toks(self): + return self._max_gen_toks + + @property + def batch_size(self): + return self._batch_size + + @property + def device(self): + return self._device + + @property + def rank(self): + return 0 + + @property + def world_size(self): + return 1 + + def tok_encode(self, string: str, **kwargs): + return self.tokenizer.encode(string) + + def tok_decode(self, tokens, **kwargs): + return self.tokenizer.decode(tokens) + + def generate_until(self, requests): + """ + Generate until is lm_eval harness' way to say "do greedy generation" - necessary for some tasks. + the eval harness dispatches requests to the model, and the model does argmax generation, the results of which + are returned to the eval harness to evaluate. + + TODO: batched / data parallel generation + + :param requests: Dictionary of requests containing the context (prompt) and 'until' - a token or + list of stop tokens. + """ + self.model.module.inference_mode(use_cache=True) # tell model to cache kv pairs + res = [] + + # get only the args from each Instance object + reqs = [req.args for req in requests] + + def _collate(x): + toks = self.tokenizer.encode(x[0]) + return (len(toks), x[0]) + + reord = utils.Reorderer(reqs, _collate) + for context, gen_kwargs in tqdm( + reord.get_reordered(), "Running greedy generation" + ): + if isinstance(gen_kwargs, dict): + kwargs = copy.deepcopy(gen_kwargs) # edge case for repeats > 1 + if "until" in kwargs.keys(): + until = kwargs.pop("until") + if isinstance(until, str): + until = [kwargs] + elif not isinstance(until, list): + raise ValueError( + f"Expected `kwargs['until']` to be of type Union[str,list] but got {until}" + ) + else: + raise ValueError( + f"Expected `kwargs` to be of type `dict` but got {kwargs}" + ) + if not until: + until = [self.tok_decode(self.eot_token_id)] + if "max_gen_toks" in kwargs.keys(): + max_gen_toks = kwargs.pop("max_gen_toks") + else: + max_gen_toks = self.max_gen_toks + + if "do_sample" in kwargs.keys(): + kwargs.pop("do_sample") + + stop_tokens = [self.tokenizer.encode(i) for i in until] + cont = self.generate( + text=context, + stop_tokens=stop_tokens, + recompute=self.neox_args.recompute, + maximum_tokens=max_gen_toks, + **kwargs, + ) + if cont: + s = cont[0]["text"] or "" + else: + s = "" + + for term in until: + s = s.split(term)[0] + + # partial caching + self.cache_hook.add_partial("generate_until", (context, until), s) + + res.append(s) + + self.model.module.train_mode() # set back to train mode + return reord.get_original(res) + + def _loglikelihood_tokens(self, requests, disable_tqdm=False): + """ + In this method, the model doesn't do any generation, but just returns log likelihoods + for the next token, which eval harness uses to evaluate. + + :param requests: Dictionary of requests containing the context and the expected continuation. + :param disable_tqdm: If True, disable tqdm progress bar. + """ + self.model.module.inference_mode( + use_cache=False + ) # tell model to gather parallel outputs, but not cache key-value pairs + + disable_tqdm = disable_tqdm if self.is_main else True + res = [] + res_len = 0 # storing the result length for later + with torch.no_grad(): + + def _collate(x): + toks = x[1] + x[2] + return (-len(toks), tuple(toks)) + + reord = utils.Reorderer(requests, _collate) + for chunk in utils.chunks( + tqdm(reord.get_reordered(), disable=disable_tqdm), self.batch_size + ): + inps, contlens, inplens, padding_length = [], [], [], None + for cache_key, context_enc, continuation_enc in chunk: + # when too long to fit in context, truncate from the left + inp = torch.tensor( + (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1], + dtype=torch.long, + ).to(self.device) + (inplen,) = inp.shape + + cont = continuation_enc + + # since in _collate we make sure length is descending, the longest is always the first one. + padding_length = ( + padding_length if padding_length is not None else inplen + ) + + # pad to length + inp = torch.cat( + [ + inp, # [seq] + torch.zeros(padding_length - inplen, dtype=torch.long).to( + inp.device + ), # [padding_length - seq] + ], + dim=0, + ) + + inps.append(inp.unsqueeze(0)) + contlens.append(cont) + inplens.append(inplen) + + logits = self._model_call(torch.cat(inps, dim=0)) + res_len += len(chunk) + + if logits is not None: + multi_logits = F.log_softmax(logits, dim=-1) # [batch, seq, vocab] + for (cache_key, _, _), logits, inp, inplen, cont_toks in zip( + chunk, multi_logits, inps, inplens, contlens + ): + contlen = len(cont_toks) + logits = logits[inplen - contlen : inplen].unsqueeze( + 0 + ) # [1, seq, vocab] + greedy_tokens = logits.argmax(dim=-1) + # cont_toks :: [1, seq] + cont_toks = ( + torch.tensor(cont_toks, dtype=torch.long) + .unsqueeze(0) + .to(multi_logits.device) + ) + max_equal = (greedy_tokens == cont_toks).all() + logits = torch.gather( + logits, 2, cont_toks.unsqueeze(-1) + ).squeeze( + -1 + ) # [1, seq] + answer = (float(logits.sum()), bool(max_equal)) + + # partial caching + if cache_key is not None: + self.cache_hook.add_partial( + "loglikelihood", cache_key, answer + ) + + res.append(answer) + + # broadcast results to all ranks + if self.is_pipe_parallel: + src_rank = self.model.grid.stage_to_global(self.model.num_stages - 1) + if res: + logits_sums, max_equals = list(zip(*res)) + logits_sums = torch.FloatTensor(logits_sums).cuda() + max_equals = torch.LongTensor(max_equals).cuda() + else: + logits_sums = torch.zeros(res_len, dtype=torch.float32).cuda() + max_equals = torch.zeros(res_len, dtype=torch.int64).cuda() + torch.distributed.broadcast( + tensor=logits_sums, + src=src_rank, + group=mpu.get_pipe_parallel_group(), + ) + torch.distributed.broadcast( + tensor=max_equals, src=src_rank, group=mpu.get_pipe_parallel_group() + ) + max_equals = [bool(i) for i in max_equals.tolist()] + logits_sums = logits_sums.tolist() + res = list(zip(logits_sums, max_equals)) + + self.model.module.train_mode() # set back to train mode + return reord.get_original(res) + + def _dp_scatter(self, inps): + """ + Scatters the inputs to all data parallel ranks. + """ + + batch_size = inps.shape[0] + padded = False + if batch_size % self.dp_world_size != 0: + # The last batch could potentially not fill the full batch size (if the dataset size is not divisible by batch size) + # In this case we pad the batch + padded_size = self.dp_world_size - (batch_size % self.dp_world_size) + + print_rank_0( + f"WARNING: Batch size ({batch_size}) must be divisible by dp world size ({self.dp_world_size}). Padding inputs to {padded_size}." + ) + + inps = torch.cat( + [inps] + [inps[0:1, ...] for _ in range(padded_size)], dim=0 + ) # pad with first inp item + padded = True + + assert ( + inps.shape[0] % self.dp_world_size == 0 + ), f"batch size ({inps.shape[0]}) must be divisible by dp world size ({self.dp_world_size})" + + # get a chunk for each data parallel rank + chunk_size = inps.shape[0] // self.dp_world_size + inps = inps[self.dp_rank * chunk_size : (self.dp_rank + 1) * chunk_size] + # make a dummy dataloader / iterator to pass to model + # we need to do this because deepspeed pipe parallel only takes an iterator + # in this format + return iter([{"text": F.pad(inps, pad=(0, 1))}]), padded + + def _dp_gather(self, logits): + """ + Gather logits from all data parallel ranks + """ + if logits is not None: + tensor_list = [torch.zeros_like(logits) for _ in range(self.dp_world_size)] + torch.distributed.all_gather( + tensor_list, logits, group=mpu.get_data_parallel_group() + ) + logits = torch.cat(tensor_list, dim=0) + return logits + + def _model_call(self, inps): + batch_size = inps.shape[0] + + # scatter inputs to all dp ranks: + inps, padded = self._dp_scatter(inps) + + if self.neox_args.is_pipe_parallel: + # need these flags to stop deepspeed pipe parallel from hanging + self.model.first_output_send = True + self.model.pipe_recv_buf = None + + _, logits = self._forward_step_fn(model=self.model, data_iterator=inps) + + # gather outputs from all dp ranks: + logits = self._dp_gather(logits) + + # if logits have been padded (normally just last item where batch size is unequal) + # restore to original shape + if padded and logits is not None: + logits = logits[:batch_size, ...] + return logits + + def _model_generate(self, context, max_length, eos_token_id): + # Isn't used because we override `greedy_until``. + raise NotImplementedError() + + @torch.no_grad() + def run_eval( + self, + eval_tasks=None, + num_fewshot=0, + bootstrap_iters=2, + use_cache=True, + name="neox", + limit=None, + ): + was_training = self.model.training + self.model.eval() + in_micro_batches = ( + self.model.micro_batches + ) # store input microbatches - we need to set to 1 during eval, but want to return to its original value after + self.model.micro_batches = 1 + if eval_tasks is None: + eval_tasks = [ + "lambada", + "piqa", + "hellaswag", + "winogrande", + "mathqa", + "pubmedqa", + "triviaqa", + ] + + # register all the default tasks bundled with lm-evaluation-harness repository + tasks.initialize_tasks() + + # Returns a list containing all values of the task registry that + # match at least one of the patterns + import fnmatch + + def pattern_match(patterns, source_list): + task_names = set() + for pattern in patterns: + for matching in fnmatch.filter(source_list, pattern): + task_names.add(matching) + return list(task_names) + + eval_tasks = pattern_match(eval_tasks, tasks.ALL_TASKS) + print(f"Found tasks: {eval_tasks}") + + assert len(eval_tasks) > 0, "Must run at least one task" + + # **HACK INCOMING**: + # first get task dict on local main rank + # the tasks are downloaded *as they are initialized*, and the downloads don't like multithreading. + # so we download them once on the local main rank, wait, and then initialize them on all other ranks, which *should* load from the cache. + if self.is_local_main: + task_dict = tasks.get_task_dict(eval_tasks) + # torch barrier + if torch.distributed.is_initialized(): + torch.distributed.barrier() + task_dict = tasks.get_task_dict(eval_tasks) + + lm = self + + if use_cache: + use_cache = ( + "lm_cache/neox" + + "_dp_rank" + + str(self._dp_rank) + + "_dp_group" + + str(self._dp_group) + + ".db" + ) + print(f"Using cache at {use_cache}...") + lm = lm_eval.api.model.CachingLM( + lm, + use_cache + # each rank receives a different cache db. + # necessary to avoid multiple writes to cache at once + # TODO: Append a subset of `neox_args` to the cache database + # name arg to distinguish model runs that use different configurations. + ) + + # from simple_evaluate: + # override fewshot values for all tasks we can + for task_name in task_dict.keys(): + task_obj = task_dict[task_name] + if type(task_obj) == tuple: + group, task_obj = task_obj + if task_obj is None: + continue + + config = task_obj._config + + if num_fewshot is not None: + if config["num_fewshot"] == 0: + utils.eval_logger.info( + f"num_fewshot has been set to 0 for {task_name} in its config. Manual configuration will be ignored." + ) + else: + default_num_fewshot = config["num_fewshot"] + if not default_num_fewshot: + utils.eval_logger.warning( + f"Overwriting default num_fewshot of {task_name} from {default_num_fewshot} to {num_fewshot}" + ) + + task_obj._config["num_fewshot"] = num_fewshot + + results = evaluator.evaluate( + lm=lm, + task_dict=task_dict, + limit=10, # limit, + bootstrap_iters=bootstrap_iters, + log_samples=False, + ) + + results["config"] = { + "model": name, + "model_args": dataclasses.asdict(self.neox_args), + "batch_size": self.batch_size, + "device": str(self.device), + "use_cache": use_cache, + "limit": limit, + "bootstrap_iters": bootstrap_iters, + } + results["git_hash"] = utils.get_git_commit_hash() + + print(results.keys()) + for task_name in task_dict.keys(): + if "alias" in results["results"][task_name]: + results["results"][task_name].pop("alias") + + if was_training: + self.model.train() + self.model.micro_batches = in_micro_batches + return results + + +def run_eval_harness( + model, + forward_step_fn, + neox_args, + batch_size=None, + eval_tasks=None, + num_fewshot=0, + bootstrap_iters=2, +): + print_rank_0("Running evaluation harness...") + adapter = EvalHarnessAdapter( + model, forward_step_fn, neox_args, batch_size=batch_size + ) + return adapter.run_eval( + eval_tasks=eval_tasks, + num_fewshot=num_fewshot, + bootstrap_iters=bootstrap_iters, + use_cache=False, + ) diff --git a/generate.py b/generate.py new file mode 100644 index 0000000000000000000000000000000000000000..e19ef2e0e8453cf860af81ca36f9271ab4150acc --- /dev/null +++ b/generate.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python +# Copyright (c) 2024 EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from megatron.utils import print_rank_0, setup_for_inference_or_eval + +from megatron.text_generation_utils import ( + generate_samples_input_from_file, + generate_samples_from_prompt, + generate_samples_unconditional, + generate_samples_interactive, + precompute_logits, +) + + +def main(input_args=None, overwrite_values=None): + """ + Generate text/sample model + """ + model, neox_args = setup_for_inference_or_eval( + use_cache=True, input_args=input_args, overwrite_values=overwrite_values + ) + if neox_args.recompute: + model.module.inference_mode( + use_cache=False + ) # don't use kv cache if recomputing + if neox_args.text_gen_type == "unconditional": + print_rank_0( + f"Generating samples unconditionally and saving results to {neox_args.sample_output_file}" + ) + generate_samples_unconditional( + neox_args=neox_args, + model=model, + number_of_samples=neox_args.num_samples, + output_file=neox_args.sample_output_file, + maximum_tokens=neox_args.maximum_tokens, + recompute=neox_args.recompute, + temperature=neox_args.temperature, + top_k=neox_args.top_k, + top_p=neox_args.top_p, + ) + + elif neox_args.text_gen_type == "input-file": + print_rank_0( + f"Generating samples from input file {neox_args.sample_input_file}" + ) + assert neox_args.sample_input_file is not None + generate_samples_input_from_file( + neox_args=neox_args, + model=model, + input_file=neox_args.sample_input_file, + output_file=neox_args.sample_output_file, + maximum_tokens=neox_args.maximum_tokens, + prompt_end=neox_args.prompt_end, + recompute=neox_args.recompute, + temperature=neox_args.temperature, + top_k=neox_args.top_k, + top_p=neox_args.top_p, + ) + + elif neox_args.text_gen_type == "interactive": + generate_samples_interactive( + neox_args=neox_args, + model=model, + recompute=neox_args.recompute, + temperature=neox_args.temperature, + maximum_tokens=neox_args.maximum_tokens, + prompt_end=neox_args.prompt_end, + top_k=neox_args.top_k, + top_p=neox_args.top_p, + ) + + elif neox_args.text_gen_type == "precompute": + precompute_logits(neox_args=neox_args, model=model) + else: + raise ValueError( + f"`text_gen_type` either not specified or not recognised: {neox_args.text_gen_type}" + ) + + +if __name__ == "__main__": + main() diff --git a/images/memory_profiling.png b/images/memory_profiling.png new file mode 100644 index 0000000000000000000000000000000000000000..a691e1cb63f3cfa60ef010b3ff65544e81090775 --- /dev/null +++ b/images/memory_profiling.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a119a54df4346aad128352aae728e4ec421dea9ca9ca546d58aefc77901629e +size 1089137 diff --git a/images/nsight_profiling.png b/images/nsight_profiling.png new file mode 100644 index 0000000000000000000000000000000000000000..36ecc6568e6afe93f84420467639dc5083378b5a Binary files /dev/null and b/images/nsight_profiling.png differ diff --git a/images/pytorch_profiling.png b/images/pytorch_profiling.png new file mode 100644 index 0000000000000000000000000000000000000000..e85324dc694d7c11e922a0cdc54f38d364f99d59 Binary files /dev/null and b/images/pytorch_profiling.png differ diff --git a/megatron/__init__.py b/megatron/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fc254fe3d1f5757e6fa4fa8b5743e7983ebbcb4b --- /dev/null +++ b/megatron/__init__.py @@ -0,0 +1,26 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch + + +def print_rank_0(*message): + """If distributed is initialized print only on rank 0.""" + if torch.distributed.is_initialized(): + if torch.distributed.get_rank() == 0: + print(*message, flush=True) + else: + print(*message, flush=True) + + +from .neox_arguments import NeoXArgs diff --git a/megatron/checkpointing.py b/megatron/checkpointing.py new file mode 100644 index 0000000000000000000000000000000000000000..1b6909c9fb032bad662b65a31f6732bfaec0efe8 --- /dev/null +++ b/megatron/checkpointing.py @@ -0,0 +1,483 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Input/output checkpointing.""" + +import json +import math +import os +import re +import shutil +import time +import random +import sys +import numpy as np + +try: + import boto3 +except ModuleNotFoundError: + print( + "For s3 checkpointing, please install boto3 either using requirements/requirements-s3.txt or https://github.com/boto/boto3" + ) +try: + import hf_transfer +except ModuleNotFoundError: + print( + "For s3 checkpointing, please install hf_transfer either using requirements/requirements-s3.txt or https://github.com/huggingface/hf_transfer" + ) +import torch +from glob import glob + +from megatron import mpu +from megatron import print_rank_0 +from megatron.utils import natural_sort +from megatron.text_generation_utils import get_batch, forward_model +from pathlib import Path +from pprint import pformat + + +def check_checkpoint_args(neox_args, checkpoint_args): + """Ensure fixed arguments for a model are the same for the input + arguments and the one retrieved from checkpoint.""" + + assert isinstance(checkpoint_args, dict), "args stored in checkpoint is a dict" + for checkpoint_arg_name, checkpoint_arg_value in checkpoint_args.items(): + args_value = getattr(neox_args, checkpoint_arg_name) + error_message = "{} value from checkpoint ({}) is not equal to the currently set argument value ({}).".format( + checkpoint_arg_name, checkpoint_arg_value, args_value + ) + assert checkpoint_arg_value == args_value, error_message + + +def do_forward_pass(neox_args, model, inference=False): + + # set to eval mode + model_was_in_train = model.training + model.eval() + + # get context tokens + # always forward full batch size + context_tokens_tensor = ( + torch.arange(neox_args.seq_length + 1) + .repeat((neox_args.train_micro_batch_size_per_gpu, 1)) + .cuda() + ) + + # forward + if inference: + tokens, attention_mask, position_ids = get_batch( + neox_args, context_tokens_tensor[:, : neox_args.seq_length] + ) + model_inputs = ( + tokens, + position_ids, + attention_mask, + torch.Tensor(), + ) + logits, _ = forward_model(neox_args, model, model_inputs) + elif neox_args.is_pipe_parallel: + data_iterator = iter([{"text": context_tokens_tensor}]) + _, logits = model.eval_batch(data_iter=data_iterator, return_logits=True) + else: + tokens, attention_mask, position_ids = get_batch( + neox_args, context_tokens_tensor[:, : neox_args.seq_length] + ) + logits = model((tokens, position_ids, attention_mask)) + + # reset to train mode, if model was in training before + if model_was_in_train: + model.train() + + if logits is not None: + logits = logits.detach().cpu()[ + 0 + ] # just return first batch item (they are all equal) + + return logits + + +def check_forward_pass(neox_args, model, checkpoint_logits, inference): + # do forward pass with loaded checkpoint + logits = do_forward_pass(neox_args=neox_args, model=model, inference=inference) + + # check + if ( + logits is not None and checkpoint_logits is not None + ): # this could be the case for non-final pipeline stages + if not (logits == checkpoint_logits).all().item(): + if mpu.get_data_parallel_rank() == 0: + print( + " > WARNING: validate_checkpoint_forward() forward after load of checkpoint does not yield exactly same result" + ) + assert ( + torch.isclose(logits, checkpoint_logits).all().item() + ), "validate_checkpoint_forward() forward after load of checkpoint does not yield a close result" + + +def ensure_directory_exists(filename): + """Build filename's path if it does not already exists.""" + dirname = os.path.dirname(filename) + if not os.path.exists(dirname): + os.makedirs(dirname) + + +def get_checkpoint_name(checkpoints_path, iteration, release=False, mp_rank=None): + """A unified checkpoint name.""" + if release: + directory = "release" + else: + directory = "iter_{:07d}".format(iteration) + return os.path.join( + checkpoints_path, + directory, + "mp_rank_{:02d}".format( + mpu.get_model_parallel_rank() if mp_rank is None else mp_rank + ), + "model_optim_rng.pt", + ) + + +def get_checkpoint_tag(iteration: int) -> str: + return f"global_step{iteration}" + + +def delete_old_checkpoints(save_dir, n_to_keep): + if torch.distributed.get_rank() == 0: + ckpt_dir_regex = r"global_step[\d]*" + if save_dir.endswith("/"): + save_dir = save_dir.strip("/") + all_ckpts = natural_sort( + [ + i + for i in glob(f"{save_dir}/*") + if os.path.isdir(i) and re.search(ckpt_dir_regex, i) + ] + ) + n_to_delete = len(all_ckpts) - n_to_keep + if n_to_delete > 0: + to_delete = all_ckpts[:n_to_delete] + print(f"WARNING: Deleting old checkpoints: \n\t{', '.join(to_delete)}") + for ckpt in to_delete: + try: + shutil.rmtree(ckpt) + except FileNotFoundError: + pass + + +def save_ds_checkpoint(iteration, model, neox_args): + """Save a model checkpoint.""" + sd = { + "iteration": iteration, + "args": { + "num_layers": neox_args.num_layers, + "hidden_size": neox_args.hidden_size, + "num_attention_heads": neox_args.num_attention_heads, + "max_position_embeddings": neox_args.max_position_embeddings, + "make_vocab_size_divisible_by": neox_args.make_vocab_size_divisible_by, + "padded_vocab_size": neox_args.padded_vocab_size, + "tokenizer_type": neox_args.tokenizer_type, + "model_parallel_size": neox_args.model_parallel_size, + }, + } + # rng states. + if not neox_args.no_save_rng: + sd["random_rng_state"] = random.getstate() + sd["np_rng_state"] = np.random.get_state() + sd["torch_rng_state"] = torch.get_rng_state() + sd["cuda_rng_state"] = torch.cuda.get_rng_state() + sd["rng_tracker_states"] = mpu.get_cuda_rng_tracker().get_states() + + if neox_args.checkpoint_validation_with_forward_pass: + logits = do_forward_pass(neox_args=neox_args, model=model) + sd["checkpoint_validation_logits"] = logits + + # checkpoint folder name + tag = get_checkpoint_tag(iteration) + + # save checkpoint + model.save_checkpoint(neox_args.save, tag=tag, client_state=sd) + + # save config files + if torch.distributed.get_rank() == 0 and neox_args.config_files is not None: + configs_directory = os.path.join(neox_args.save, tag, "configs") + os.makedirs(configs_directory, exist_ok=True) + for config_filename, config_data in neox_args.config_files.items(): + with open(os.path.join(configs_directory, config_filename), "w") as f: + if isinstance(config_data, str): + f.write(config_data) + else: + json.dump(config_data, f) + + +def multiprocessing_starmap(func, args, num_processes=None): + """Wrapper to allow for re-usable multiprocessing pools with `spawn` context handling + Args: + func (Callable): Function to call + args (Iterable): Iterable of arguments to pass to `func` + num_processes (int, optional): Number of processes to spawn. Defaults to `multiprocessing.cpu_count() - 1` + """ + import multiprocessing + + num_processes = num_processes or (multiprocessing.cpu_count() - 1) + with multiprocessing.get_context("spawn").Pool( + processes=num_processes + ) as process_pool: + process_pool.starmap(func, args) + process_pool.terminate() + process_pool.join() + del process_pool + + +def _upload( + file_path: str, + s3_key: str, + chunk_size: int = 104_857_600, + max_files: int = 64, + parallel_failures: int = 63, + max_retries: int = 5, +): + """Upload local file to S3 using `hf_transfer` library + Args: + file_path (str): Local filename to upload + s3_key (str): S3 key to upload to. E.g. `s3://bucket-name/path/to/file` + chunk_size (int, optional): Chunk size to use for multipart upload. + Defaults to 100MiB = 104_857_600 + max_files (int, optional): Number of open file handles, which determines + the maximum number of parallel downloads. Defaults to 64 + parallel_failures (int, optional): Number of maximum failures of different + chunks in parallel (cannot exceed max_files). Defaults to 63 + max_retries (int, optional): Number of retries for each chunk. Defaults to 5 + """ + s3 = boto3.client("s3") + bucket = s3_key.split("s3://")[1].split("/")[0] + key = s3_key.split(bucket)[1].lstrip("/") + + # 1. Init multipart upload and obtain unique upload identifier + upload = s3.create_multipart_upload( + ACL="bucket-owner-full-control", + Bucket=bucket, + Key=key, + ) + upload_id = upload["UploadId"] + + # 2. Generate presigned URLs for each part + file_size = os.stat(file_path).st_size + urls = [] + nb_parts = math.ceil(file_size / chunk_size) + for part_number in range(1, nb_parts + 1): + params = { + "Bucket": bucket, + "Key": key, + "PartNumber": part_number, + "UploadId": upload_id, + } + urls.append( + s3.generate_presigned_url( + ClientMethod="upload_part", Params=params, ExpiresIn=86400 + ) + ) + + # 3. Upload parts in parallel + responses = hf_transfer.multipart_upload( + file_path=file_path, + parts_urls=urls, + chunk_size=chunk_size, + max_files=max_files, + parallel_failures=parallel_failures, + max_retries=max_retries, + ) + + # 4. Complete multipart upload request with ETag values + etag_with_parts = [] + for part_number, header in enumerate(responses): + etag = header.get("etag") + etag_with_parts.append({"ETag": etag, "PartNumber": part_number + 1}) + parts = {"Parts": etag_with_parts} + s3.complete_multipart_upload( + Bucket=bucket, Key=key, MultipartUpload=parts, UploadId=upload_id + ) + + +def upload_checkpoint(iteration, neox_args): + local_checkpoint_path = os.path.join( + os.path.abspath(neox_args.save), get_checkpoint_tag(iteration) + ) + local_checkpoint_list = sorted( + filter( + lambda x: os.path.isfile(x), + [str(p) for p in Path(local_checkpoint_path).rglob("*")], + ) + ) + remote_checkpoint_path = os.path.join( + neox_args.s3_path, + os.path.basename(neox_args.save), + get_checkpoint_tag(iteration), + ) + remote_checkpoint_list = [ + os.path.join( + remote_checkpoint_path, + os.path.relpath(local_checkpoint, local_checkpoint_path), + ) + for local_checkpoint in local_checkpoint_list + ] + inputs = zip( + local_checkpoint_list, + remote_checkpoint_list, + [neox_args.s3_chunk_size] * len(local_checkpoint_list), + ) + + print_rank_0( + f"[RANK {torch.distributed.get_rank()}] Uploading checkpoint `{local_checkpoint_path}` to `{remote_checkpoint_path}`..." + ) + start = time.time() + multiprocessing_starmap(_upload, inputs) + total_time = time.time() - start + print_rank_0( + f"[RANK {torch.distributed.get_rank()}] Uploaded checkpoint `{local_checkpoint_path}` to `{remote_checkpoint_path}` in {total_time:.2f}s" + ) + + +def save_checkpoint(neox_args, iteration, model, optimizer, lr_scheduler): + """Save a model checkpoint.""" + + if neox_args.deepspeed: + save_ds_checkpoint(iteration, model, neox_args) + else: + raise ValueError("Must be using deepspeed to use neox") + + torch.distributed.barrier() + upload_to_s3 = torch.distributed.get_rank() == 0 and neox_args.s3_path is not None + if upload_to_s3: + upload_checkpoint(iteration, neox_args) + + # Wait so everyone is done (necessary) + torch.distributed.barrier() + if neox_args.keep_last_n_checkpoints is not None: + delete_old_checkpoints(neox_args.save, neox_args.keep_last_n_checkpoints) + + # Wait so everyone is done (not necessary) + torch.distributed.barrier() + + +def load_checkpoint( + neox_args, model, optimizer, lr_scheduler, inference=False, iteration=None +): + """Load a model checkpoint and return the iteration.""" + if neox_args.deepspeed: + load_optim_and_scheduler = ( + not neox_args.no_load_optim + ) # TODO: These should be configured by separate args + if neox_args.finetune: + load_optim_and_scheduler = False + if iteration is not None: + tag = get_checkpoint_tag(iteration) + else: + tag = None + checkpoint_name, state_dict = model.load_checkpoint( + neox_args.load, + load_optimizer_states=load_optim_and_scheduler, + load_lr_scheduler_states=load_optim_and_scheduler, + load_module_only=not load_optim_and_scheduler, + tag=tag, + load_module_strict=neox_args.train_impl != "rm", + ) + + if checkpoint_name is None: + # if an iteration is specified, we want to raise an error here rather than + # continuing silently, since we are trying to load a specific checkpoint + if iteration is not None: + available_checkpoints = sorted( + [ + int(i.name.replace("global_step", "")) + for i in Path(neox_args.load).glob("global_step*") + ] + ) + raise ValueError( + f"Unable to load checkpoint for iteration {iteration}. \nAvailable iterations: {pformat(available_checkpoints)}" + ) + if mpu.get_data_parallel_rank() == 0: + print("Unable to load checkpoint.") + + return 0 # iteration 0, if not checkpoint loaded + else: + raise ValueError("Must be using deepspeed to use neox") + + # Set iteration. + if neox_args.finetune: + iteration = 0 + else: + if "iteration" in state_dict: + iteration = state_dict["iteration"] + else: + iteration = state_dict.get( + "total_iters" + ) # total_iters backward compatible with older checkpoints + if iteration is None: + raise ValueError( + f"Unable to load iteration from checkpoint {checkpoint_name} with keys {state_dict.keys()}, exiting" + ) + + # Check arguments. + if "args" in state_dict: + checkpoint_args = state_dict["args"] + check_checkpoint_args(neox_args=neox_args, checkpoint_args=checkpoint_args) + print_rank_0( + " > validated currently set args with arguments in the checkpoint ..." + ) + else: + print_rank_0(" > could not find arguments in the checkpoint for validation...") + + # Check loaded checkpoint with forward pass + if neox_args.checkpoint_validation_with_forward_pass: + if "checkpoint_validation_logits" in state_dict: + check_forward_pass( + neox_args=neox_args, + model=model, + checkpoint_logits=state_dict["checkpoint_validation_logits"], + inference=inference, + ) + print_rank_0(" > validated loaded checkpoint with forward pass ...") + else: + if mpu.get_data_parallel_rank() == 0: + print( + " > WARNING: checkpoint_validation_with_forward_pass is configured but no checkpoint validation data available in checkpoint {}".format( + checkpoint_name + ) + ) + + # rng states. + if not neox_args.finetune and not neox_args.no_load_rng: + try: + random.setstate(state_dict["random_rng_state"]) + np.random.set_state(state_dict["np_rng_state"]) + torch.set_rng_state(state_dict["torch_rng_state"]) + torch.cuda.set_rng_state(state_dict["cuda_rng_state"]) + mpu.get_cuda_rng_tracker().set_states(state_dict["rng_tracker_states"]) + except KeyError: + print_rank_0( + "Unable to load optimizer from checkpoint {}. " + "Specify --no-load-rng or --finetune to prevent " + "attempting to load the optimizer state, " + "exiting ...".format(checkpoint_name) + ) + sys.exit() + + torch.distributed.barrier() + if mpu.get_data_parallel_rank() == 0: + print(" successfully loaded {}".format(checkpoint_name)) + + return iteration diff --git a/megatron/data/Makefile b/megatron/data/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..8f9db7686696fbea6c94b998db4b40ef426c748d --- /dev/null +++ b/megatron/data/Makefile @@ -0,0 +1,9 @@ +CXXFLAGS += -O3 -Wall -shared -std=c++11 -fPIC -fdiagnostics-color +CPPFLAGS += $(shell python3 -m pybind11 --includes) +LIBNAME = helpers +LIBEXT = $(shell python3-config --extension-suffix) + +default: $(LIBNAME)$(LIBEXT) + +%$(LIBEXT): %.cpp + $(CXX) $(CXXFLAGS) $(CPPFLAGS) $< -o $@ diff --git a/megatron/data/__init__.py b/megatron/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b6e690fd59145ce8900fd9ab8d8a996ee7d33834 --- /dev/null +++ b/megatron/data/__init__.py @@ -0,0 +1 @@ +from . import * diff --git a/megatron/data/blendable_dataset.py b/megatron/data/blendable_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..0392841cb694f0e86f1fd30fda357f42ac84e105 --- /dev/null +++ b/megatron/data/blendable_dataset.py @@ -0,0 +1,82 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Blendable dataset.""" + +import time + +import numpy as np +import torch + +from megatron import print_rank_0 +from megatron import mpu + + +class BlendableDataset(torch.utils.data.Dataset): + def __init__(self, datasets, weights): + self.datasets = datasets + num_datasets = len(datasets) + assert num_datasets == len(weights) + + self.size = 0 + for dataset in self.datasets: + self.size += len(dataset) + + # Normalize weights. + weights = np.array(weights, dtype=np.float64) + sum_weights = np.sum(weights) + assert sum_weights > 0.0 + weights /= sum_weights + + # Build indices. + start_time = time.time() + assert num_datasets < 255 + self.dataset_index = np.zeros(self.size, dtype=np.uint8) + self.dataset_sample_index = np.zeros(self.size, dtype=np.int64) + + from megatron.data import helpers + + helpers.build_blending_indices( + self.dataset_index, + self.dataset_sample_index, + weights, + num_datasets, + self.size, + torch.distributed.get_rank() == 0, + ) + + print( + "> RANK {} elapsed time for building blendable dataset indices: " + "{:.2f} (sec)".format( + torch.distributed.get_rank(), time.time() - start_time + ) + ) + + def __len__(self): + return self.size + + def __getitem__(self, idx): + try: + dataset_idx = self.dataset_index[idx] + sample_idx = self.dataset_sample_index[idx] + return self.datasets[dataset_idx][sample_idx] + except IndexError: + new_idx = idx % len(self) + print( + f"WARNING: Got index out of bounds error with index {idx} - taking modulo of index instead ({new_idx})" + ) + return self[new_idx] diff --git a/megatron/data/data_utils.py b/megatron/data/data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c08b601514ba9bbe0237f5d686070919f1ae9552 --- /dev/null +++ b/megatron/data/data_utils.py @@ -0,0 +1,766 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import torch +import numpy as np +from typing import List, Tuple +from itertools import zip_longest, cycle +from functools import partial + +from megatron import mpu, print_rank_0 +from megatron.data.indexed_dataset import make_dataset as make_indexed_dataset +from megatron.data.blendable_dataset import BlendableDataset +from megatron.data.gpt2_dataset import GPT2Dataset +from megatron.data.pairwise_dataset import PairwiseDataset +from megatron.data.samplers import DistributedBatchSampler + + +def make_data_loader(dataset, neox_args): + """Build dataloader given an input dataset.""" + if dataset is None: + return None + # Data parallel arguments. + world_size = mpu.get_data_parallel_world_size() + rank = mpu.get_data_parallel_rank() + global_batch_size = neox_args.batch_size * world_size + num_workers = neox_args.num_workers + + # Use a simple sampler with distributed batch sampler. + sampler = torch.utils.data.SequentialSampler(dataset) + batch_sampler = DistributedBatchSampler( + sampler=sampler, + batch_size=global_batch_size, + drop_last=True, + rank=rank, + world_size=world_size, + ) + # Torch dataloader. + return torch.utils.data.DataLoader( + dataset, batch_sampler=batch_sampler, num_workers=num_workers, pin_memory=True + ) + + +def build_the_dataset( + data_prefix, + pos_data_prefix, + neg_data_prefix, + name, + data_impl, + pack_impl, + dataset_impl, + allow_chopped, + num_samples, + num_epochs, + seq_length, + seed, + skip_warmup, + build_index_mappings=True, + label_prefix=None, + pos_label_prefix=None, + neg_label_prefix=None, + precompute_model_name=None, + reward_prefix=None, +): + """Build train/valid/test datasets.""" + if dataset_impl == "gpt2": + indexed_dataset = make_indexed_dataset(data_prefix, data_impl, skip_warmup) + if label_prefix is None: + label_dataset = None + else: + label_dataset = make_indexed_dataset(label_prefix, data_impl, skip_warmup) + if precompute_model_name is not None: + # If we have the name, assume it exists. If it doesn't, it will just be None which is fine. + precompute_indexed_dataset = make_indexed_dataset( + data_prefix + "_" + precompute_model_name, data_impl, skip_warmup + ) + precompute_indexed_dataset = precompute_indexed_dataset + else: + precompute_indexed_dataset = None + if reward_prefix is not None: + reward_dataset = make_indexed_dataset(reward_prefix, data_impl, skip_warmup) + else: + reward_dataset = None + elif dataset_impl == "pairwise": + pos_indexed_dataset = make_indexed_dataset( + pos_data_prefix, data_impl, skip_warmup + ) + neg_indexed_dataset = make_indexed_dataset( + neg_data_prefix, data_impl, skip_warmup + ) + if pos_label_prefix is None: + pos_label_dataset = None + # Also do neg here since they both must be the same + assert neg_label_prefix is None + neg_label_dataset = None + else: + pos_label_dataset = make_indexed_dataset( + pos_label_prefix, data_impl, skip_warmup + ) + # Also do neg here since they both must be the same + assert neg_label_prefix is not None + neg_label_dataset = make_indexed_dataset( + neg_label_prefix, data_impl, skip_warmup + ) + if precompute_model_name is None: + pos_ref_dataset = None + neg_ref_dataset = None + else: + pos_ref_dataset = make_indexed_dataset( + pos_data_prefix + "_" + precompute_model_name, data_impl, skip_warmup + ) + neg_ref_dataset = make_indexed_dataset( + neg_data_prefix + "_" + precompute_model_name, data_impl, skip_warmup + ) + else: + raise NotImplementedError(f"dataset_impl={dataset_impl} not implemented") + + total_num_of_documents = ( + indexed_dataset.sizes.shape[0] + if dataset_impl == "gpt2" + else pos_indexed_dataset.sizes.shape[0] + ) + print_rank_0(" {}:".format(name)) + print_rank_0(" no. of documents:{}".format(total_num_of_documents)) + dataset = None + documents = np.arange(start=0, stop=total_num_of_documents, step=1, dtype=np.int32) + if dataset_impl == "gpt2": + dataset = GPT2Dataset( + name, + data_prefix, + documents, + indexed_dataset, + num_samples, + num_epochs, + seq_length, + seed, + pack_impl=pack_impl, + allow_chopped=allow_chopped, + build_index_mappings=build_index_mappings, + label_dataset=label_dataset, + reward_dataset=reward_dataset, + ref_dataset=precompute_indexed_dataset, + ) + elif dataset_impl == "pairwise": + dataset = PairwiseDataset( + name, + pos_data_prefix, + documents, + pos_indexed_dataset, + neg_indexed_dataset, + num_samples, + seq_length, + seed, + pack_impl=pack_impl, + allow_chopped=allow_chopped, + build_index_mappings=build_index_mappings, + pos_label_dataset=pos_label_dataset, + neg_label_dataset=neg_label_dataset, + pos_ref_dataset=pos_ref_dataset, + neg_ref_dataset=neg_ref_dataset, + ) + return dataset + + +def build_train_valid_test_datasets( + data_prefix, + use_shared_fs, + data_impl, + pack_impl, + allow_chopped, + splits_string, + train_valid_test_num_samples, + train_valid_test_epochs, + seq_length, + seed, + skip_warmup, +): + """Build train, valid, and test datasets.""" + + # Indexed dataset. + indexed_dataset = make_indexed_dataset(data_prefix, data_impl, skip_warmup) + + total_num_of_documents = indexed_dataset.sizes.shape[0] + splits = get_train_valid_test_split_(splits_string, total_num_of_documents) + + # Print stats about the splits. + print_rank_0(" > dataset split:") + + def print_split_stats(name, index): + print_rank_0(" {}:".format(name)) + print_rank_0( + " document indices in [{}, {}) total of {} " + "documents".format( + splits[index], splits[index + 1], splits[index + 1] - splits[index] + ) + ) + + print_split_stats("train", 0) + print_split_stats("validation", 1) + print_split_stats("test", 2) + + def build_dataset(index, name): + dataset = None + if splits[index + 1] > splits[index]: + documents = np.arange( + start=splits[index], stop=splits[index + 1], step=1, dtype=np.int32 + ) + dataset = GPT2Dataset( + name, + data_prefix, + documents, + indexed_dataset, + train_valid_test_num_samples[index], + train_valid_test_epochs[index], + seq_length, + seed, + pack_impl=pack_impl, + allow_chopped=allow_chopped, + use_shared_fs=use_shared_fs, + ) + return dataset + + train_dataset = build_dataset(0, "train") + valid_dataset = build_dataset(1, "valid") + test_dataset = build_dataset(2, "test") + + return train_dataset, valid_dataset, test_dataset + + +def get_train_valid_test_split_(splits_string, size): + """Get dataset splits from comma or '/' separated string list.""" + + splits = [] + if splits_string.find(",") != -1: + splits = [float(s) for s in splits_string.split(",")] + elif splits_string.find("/") != -1: + splits = [float(s) for s in splits_string.split("/")] + else: + splits = [float(splits_string)] + while len(splits) < 3: + splits.append(0.0) + splits = splits[:3] + splits_sum = sum(splits) + assert splits_sum > 0.0 + splits = [split / splits_sum for split in splits] + splits_index = [0] + for index, split in enumerate(splits): + splits_index.append(splits_index[index] + int(round(split * float(size)))) + diff = splits_index[-1] - size + for index in range(1, len(splits_index)): + splits_index[index] -= diff + assert len(splits_index) == 4 + assert splits_index[-1] == size + return splits_index + + +def get_normalized_weights_and_num_samples( + weights: List[float], num_samples: int +) -> Tuple[List[float], List[int]]: + # Normalize weights + weight_sum = sum(weights) + assert weight_sum > 0.0 + weights = [weight / weight_sum for weight in weights] + if num_samples is not None: + # Add 0.5% (the 1.005 factor) so in case the blending dataset does + # not uniformly distribute the number of samples, we still have + # samples left to feed to the network. + weighted_num_samples = [] + for weight in weights: + weighted_num_samples.append(int(math.ceil(num_samples * weight * 1.005))) + else: + weighted_num_samples = [None for _ in weights] + return weights, weighted_num_samples + + +def build_weighted_datasets( + neox_args, + train_num_samples, + valid_num_samples, + test_num_samples, + train_epochs, + valid_epochs, + test_epochs, + build_index_mappings=True, +): + # build individual datasets + train_datasets, valid_datasets, test_datasets = [], [], [] + for i, ( + train_path, + train_label_path, + train_reward_path, + valid_path, + valid_label_path, + valid_reward_path, + test_path, + test_label_path, + test_reward_path, + pos_train_path, + neg_train_path, + pos_train_label_path, + neg_train_label_path, + pos_valid_path, + neg_valid_path, + pos_valid_label_path, + neg_valid_label_path, + pos_test_path, + neg_test_path, + pos_test_label_path, + neg_test_label_path, + ) in enumerate( + zip_longest( + neox_args.train_data_paths if neox_args.train_data_paths else [], + neox_args.train_label_data_paths + if neox_args.train_label_data_paths + else [], + neox_args.train_reward_data_paths + if neox_args.train_reward_data_paths + else [], + neox_args.valid_data_paths if neox_args.valid_data_paths else [], + neox_args.valid_label_data_paths + if neox_args.valid_label_data_paths + else [], + neox_args.valid_reward_data_paths + if neox_args.valid_reward_data_paths + else [], + neox_args.test_data_paths if neox_args.test_data_paths else [], + neox_args.test_label_data_paths if neox_args.test_label_data_paths else [], + neox_args.test_reward_data_paths + if neox_args.test_reward_data_paths + else [], + neox_args.pos_train_data_paths if neox_args.pos_train_data_paths else [], + neox_args.neg_train_data_paths if neox_args.neg_train_data_paths else [], + neox_args.pos_train_label_data_paths + if neox_args.pos_train_label_data_paths + else [], + neox_args.neg_train_label_data_paths + if neox_args.neg_train_label_data_paths + else [], + neox_args.pos_valid_data_paths if neox_args.pos_valid_data_paths else [], + neox_args.neg_valid_data_paths if neox_args.neg_valid_data_paths else [], + neox_args.pos_valid_label_data_paths + if neox_args.pos_valid_label_data_paths + else [], + neox_args.neg_valid_label_data_paths + if neox_args.neg_valid_label_data_paths + else [], + neox_args.pos_test_data_paths if neox_args.pos_test_data_paths else [], + neox_args.neg_test_data_paths if neox_args.neg_test_data_paths else [], + neox_args.pos_test_label_data_paths + if neox_args.pos_test_label_data_paths + else [], + neox_args.neg_test_label_data_paths + if neox_args.neg_test_label_data_paths + else [], + ) + ): + if train_path or pos_train_path: + train_datasets.append( + build_the_dataset( + data_prefix=train_path, + name=f"train_{i}", + data_impl=neox_args.data_impl, + pack_impl=neox_args.pack_impl, + allow_chopped=neox_args.allow_chopped, + num_samples=train_num_samples[i], + num_epochs=train_epochs, + seq_length=neox_args.seq_length, + seed=neox_args.seed, + skip_warmup=(not neox_args.mmap_warmup), + build_index_mappings=build_index_mappings, + label_prefix=train_label_path, + dataset_impl=neox_args.dataset_impl, + pos_data_prefix=pos_train_path, + neg_data_prefix=neg_train_path, + pos_label_prefix=pos_train_label_path, + neg_label_prefix=neg_train_label_path, + precompute_model_name=neox_args.precompute_model_name, + reward_prefix=train_reward_path, + ) + ) + + if valid_path or pos_valid_path: + valid_datasets.append( + build_the_dataset( + data_prefix=valid_path, + name=f"valid_{i}", + data_impl=neox_args.data_impl, + pack_impl=neox_args.pack_impl, + allow_chopped=neox_args.allow_chopped, + num_samples=valid_num_samples[i], + num_epochs=valid_epochs, + seq_length=neox_args.seq_length, + seed=neox_args.seed, + skip_warmup=(not neox_args.mmap_warmup), + build_index_mappings=build_index_mappings, + label_prefix=valid_label_path, + dataset_impl=neox_args.dataset_impl, + pos_data_prefix=pos_valid_path, + neg_data_prefix=neg_valid_path, + pos_label_prefix=pos_valid_label_path, + neg_label_prefix=neg_valid_label_path, + precompute_model_name=neox_args.precompute_model_name, + reward_prefix=valid_reward_path, + ) + ) + + if test_path or pos_test_path: + test_datasets.append( + build_the_dataset( + data_prefix=test_path, + name=f"test_{i}", + data_impl=neox_args.data_impl, + pack_impl=neox_args.pack_impl, + allow_chopped=neox_args.allow_chopped, + num_samples=test_num_samples[i], + num_epochs=test_epochs, + seq_length=neox_args.seq_length, + seed=neox_args.seed, + skip_warmup=(not neox_args.mmap_warmup), + build_index_mappings=build_index_mappings, + label_prefix=test_label_path, + dataset_impl=neox_args.dataset_impl, + pos_data_prefix=pos_test_path, + neg_data_prefix=neg_test_path, + pos_label_prefix=pos_test_label_path, + neg_label_prefix=neg_test_label_path, + precompute_model_name=neox_args.precompute_model_name, + reward_prefix=test_reward_path, + ) + ) + return train_datasets, valid_datasets, test_datasets + + +def weights_by_num_docs(l: list, alpha=0.3): + """ + Builds weights from a multinomial distribution over groups of data according to the number of + samples in each group. + + We sample from a group according to the probability p(L) ∝ |L| ** α, + where p(L) is the probability of sampling from a given group, + |L| is the number of examples in that datapoint, + and α is a coefficient that acts to upsample data from underrepresented groups + + Hence α (`alpha`) allows us to control how much to 'boost' the probability of training on low-resource groups. + + See https://arxiv.org/abs/1911.02116 for more details + """ + if len(l) == 1: + return [1.0] + + total_n_docs = sum(l) + unbiased_sample_probs = [i / total_n_docs for i in l] + + probs = [i**alpha for i in unbiased_sample_probs] + + # normalize + total = sum(probs) + probs = [i / total for i in probs] + + # weights should be the inverse of the number of samples + unbiased_sample_probs_inverse = [1 - p for p in unbiased_sample_probs] + weights = [p * p2 for p, p2 in zip(probs, unbiased_sample_probs_inverse)] + + # normalize + total = sum(weights) + weights = [i / total for i in weights] + + return weights + + +def validate_train_epochs(neox_args): + """Check for unsupported neox_args when using train_epochs instead of train_iters""" + if neox_args.train_epochs is None: + return + + if neox_args.train_epochs and neox_args.train_iters: + raise ValueError( + "Cannot specify both train epochs and train iters simultaneously" + ) + + if neox_args.pack_impl != "packed": + raise ValueError( + "Packing implementations other than 'packed' are currently unsupported with train_epochs" + ) + + if neox_args.weight_by_num_documents: + raise ValueError( + "Weighting by number of documents is currently unsupported with train_epochs" + ) + + if neox_args.train_data_weights and ( + not all(weight == 1.0 for weight in neox_args.train_data_weights) + ): + raise ValueError( + "train_data_weights != None is currently unsupported with train_epochs" + ) + + if neox_args.dataset_impl != "gpt2": + raise ValueError( + "non gpt2 datasets are not currently unsupported with train_epochs" + ) + + +def build_train_valid_test_data_loaders(neox_args): + """XXX""" + + validate_train_epochs(neox_args) + + (train_dataloader, valid_dataloader, test_dataloader) = (None, None, None) + + print_rank_0("> building train, validation, and test datasets ...") + + # Ensure only the first/last pipeline stages have data loaders + if neox_args.is_pipe_parallel: + is_first_stage = mpu.get_pipe_parallel_rank() == 0 + is_last_stage = ( + mpu.get_pipe_parallel_rank() == mpu.get_pipe_parallel_world_size() - 1 + ) + pipe_load = is_first_stage or is_last_stage + else: + pipe_load = True + + # Data loader only on rank 0 of each model parallel group. + if mpu.get_model_parallel_rank() == 0 and pipe_load: + # Number of train/valid/test samples. + if neox_args.train_iters is not None: + train_iters = neox_args.train_iters + eval_iters = ( + train_iters // neox_args.eval_interval + 1 + ) * neox_args.eval_iters + test_iters = neox_args.eval_iters + train_val_test_num_samples = [ + train_iters * neox_args.train_batch_size, + eval_iters * neox_args.train_batch_size, + test_iters * neox_args.train_batch_size, + ] + train_val_test_epochs = [None, None, None] + elif neox_args.train_epochs is not None: + train_val_test_num_samples = [None, None, None] + train_val_test_epochs = [1, 1, 1] + + if (neox_args.train_data_paths) or (neox_args.pos_train_data_paths): + # when individual train / valid / test data paths are provided + # normalize weight values and get num samples for each dataset + train_weights, train_num_samples = get_normalized_weights_and_num_samples( + neox_args.train_data_weights, train_val_test_num_samples[0] + ) + valid_weights, valid_num_samples = get_normalized_weights_and_num_samples( + neox_args.valid_data_weights, train_val_test_num_samples[1] + ) + test_weights, test_num_samples = get_normalized_weights_and_num_samples( + neox_args.test_data_weights, train_val_test_num_samples[2] + ) + + # build individual datasets + train_datasets, valid_datasets, test_datasets = build_weighted_datasets( + neox_args, + train_num_samples, + valid_num_samples, + test_num_samples, + train_val_test_epochs[0], + train_val_test_epochs[1], + train_val_test_epochs[2], + build_index_mappings=not neox_args.weight_by_num_documents, + ) + + if neox_args.weight_by_num_documents: + # gets the number of documents in each datapath + get_num_docs_list = lambda datasets: [ + dataset.indexed_dataset.sizes.shape[0] for dataset in datasets + ] + train_num_docs, valid_num_docs, test_num_docs = ( + get_num_docs_list(train_datasets), + get_num_docs_list(valid_datasets), + get_num_docs_list(test_datasets), + ) + + # builds weights according to alpha + the number of docs + fn = partial( + weights_by_num_docs, alpha=neox_args.weighted_sampler_alpha + ) + train_weights, valid_weights, test_weights = ( + fn(train_num_docs), + fn(valid_num_docs), + fn(test_num_docs), + ) + ( + train_weights, + train_num_samples, + ) = get_normalized_weights_and_num_samples( + train_weights, train_val_test_num_samples[0] + ) + ( + valid_weights, + valid_num_samples, + ) = get_normalized_weights_and_num_samples( + valid_weights, train_val_test_num_samples[1] + ) + test_weights, test_num_samples = get_normalized_weights_and_num_samples( + test_weights, train_val_test_num_samples[2] + ) + + # rebuild datasets weighted according to new weights + train_datasets, valid_datasets, test_datasets = build_weighted_datasets( + neox_args, + train_num_samples, + valid_num_samples, + test_num_samples, + train_val_test_epochs[0], + train_val_test_epochs[1], + train_val_test_epochs[2], + ) + + if train_datasets: + train_ds = BlendableDataset(train_datasets, train_weights) + if valid_datasets: + valid_ds = BlendableDataset(valid_datasets, valid_weights) + if test_datasets: + test_ds = BlendableDataset(test_datasets, test_weights) + else: + # when just data_path is provided + # split dataset into train, valid and test from data_path + train_ds, valid_ds, test_ds = build_train_valid_test_datasets( + data_prefix=neox_args.data_path, + use_shared_fs=neox_args.use_shared_fs, + data_impl=neox_args.data_impl, + splits_string=neox_args.split, + train_valid_test_num_samples=train_val_test_num_samples, + train_valid_test_epochs=train_val_test_epochs, + seq_length=neox_args.seq_length, + seed=neox_args.seed, + skip_warmup=(not neox_args.mmap_warmup), + pack_impl=neox_args.pack_impl, + allow_chopped=neox_args.allow_chopped, + ) + + # Build dataloders. + train_dataloader = make_data_loader(train_ds, neox_args=neox_args) + valid_dataloader = make_data_loader(valid_ds, neox_args=neox_args) + test_dataloader = make_data_loader(test_ds, neox_args=neox_args) + + # Flags to know if we need to do training/validation/testing. + if neox_args.train_epochs: + do_train = train_dataloader is not None + do_valid = valid_dataloader is not None + do_test = test_dataloader is not None + else: + do_train = train_dataloader is not None and neox_args.train_iters > 0 + do_valid = valid_dataloader is not None and neox_args.eval_iters > 0 + do_test = test_dataloader is not None and neox_args.eval_iters > 0 + + # Need to broadcast num_tokens and num_type_tokens. + flags = torch.cuda.LongTensor([int(do_train), int(do_valid), int(do_test)]) + else: + flags = torch.cuda.LongTensor([0, 0, 0]) + + # Broadcast num tokens. + if neox_args.is_pipe_parallel: + # Only first/last pipeline stages have data loaders, so pipeline parallelism should + # broadcast globally instead of just the model parallel group. + torch.distributed.broadcast(flags, src=0) + else: + torch.distributed.broadcast( + flags, + mpu.get_model_parallel_src_rank(), + group=mpu.get_model_parallel_group(), + ) + neox_args.do_train = flags[0].item() + neox_args.do_valid = flags[1].item() + neox_args.do_test = flags[2].item() + data_loaders = { + "train": train_dataloader, + "valid": valid_dataloader, + "test": test_dataloader, + } + return data_loaders + + +def shift_and_wrap_data_loaders(neox_args, data_loaders, loop=True): + """Shift start iteration and wrap data_loaders in iterators""" + train_dataloader = data_loaders["train"] + valid_dataloader = data_loaders["valid"] + test_dataloader = data_loaders["test"] + + # Shift the start iterations. + if train_dataloader is not None: + train_dataloader.batch_sampler.start_iter = ( + neox_args.iteration * neox_args.gradient_accumulation_steps + ) % len(train_dataloader) + print_rank_0( + "setting training data start iteration to {}".format( + train_dataloader.batch_sampler.start_iter + ) + ) + if valid_dataloader is not None: + start_iter_val = ( + (neox_args.iteration * neox_args.gradient_accumulation_steps) + // neox_args.eval_interval + ) * neox_args.eval_iters + valid_dataloader.batch_sampler.start_iter = start_iter_val % len( + valid_dataloader + ) + print_rank_0( + "setting validation data start iteration to {}".format( + valid_dataloader.batch_sampler.start_iter + ) + ) + + def loop_iterator(data_loader): + while True: + for x in data_loader: + yield x + data_loader.start_iter = 0 + + # Build iterators. + if train_dataloader is not None: + if loop: + train_data_iterator = cycle(train_dataloader) + else: + train_data_iterator = iter(train_dataloader) + else: + train_data_iterator = None + + if valid_dataloader is not None: + if loop: + valid_data_iterator = cycle(valid_dataloader) + else: + valid_data_iterator = iter(valid_dataloader) + else: + valid_data_iterator = None + + if test_dataloader is not None: + if loop: + test_data_iterator = cycle(test_dataloader) + else: + test_data_iterator = iter(test_dataloader) + else: + test_data_iterator = None + + return train_data_iterator, valid_data_iterator, test_data_iterator + + +def compile_helper(): + """Compile helper function at runtime. Make sure this + is invoked on a single process.""" + import os + import subprocess + + path = os.path.abspath(os.path.dirname(__file__)) + ret = subprocess.run(["make", "-C", path]) + if ret.returncode != 0: + print("Making C++ dataset helpers module failed, exiting.") + import sys + + sys.exit(1) diff --git a/megatron/data/gpt2_dataset.py b/megatron/data/gpt2_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..73c21bebd540a67680860f501764998b99ee19b9 --- /dev/null +++ b/megatron/data/gpt2_dataset.py @@ -0,0 +1,482 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GPT2 style dataset.""" + +import os +import time + +import numpy as np +import torch + +from megatron import mpu, print_rank_0 + + +class GPT2Dataset(torch.utils.data.Dataset): + def __init__( + self, + name, + data_prefix, + documents, + indexed_dataset, + num_samples, + num_epochs, + seq_length, + seed, + pack_impl="packed", + allow_chopped=True, + build_index_mappings=True, + use_shared_fs=True, + label_dataset=None, + reward_dataset=None, + ref_dataset=None, + ): + + self.name = name + self.pack_impl = pack_impl + self.allow_chopped = allow_chopped + self.indexed_dataset = indexed_dataset + self.label_dataset = label_dataset + self.reward_dataset = reward_dataset + self.ref_dataset = ref_dataset + self.seq_length = seq_length + + # Checks + assert self.reward_dataset is None or ( + pack_impl == "unpacked" + ), "Reward dataset only supported with unpacked data." + assert np.min(documents) >= 0 + assert np.max(documents) < indexed_dataset.sizes.shape[0] + + if build_index_mappings: + # Build index mappings. + self.doc_idx, self.sample_idx, self.shuffle_idx = _build_index_mappings( + self.name, + data_prefix, + documents, + self.indexed_dataset.sizes, + self.label_dataset, + num_samples, + num_epochs, + seq_length, + seed, + self.pack_impl, + use_shared_fs=use_shared_fs, + allow_chopped=self.allow_chopped, + ) + self.shuffle_idx_len = self.shuffle_idx.shape[0] - 1 + self.sample_idx_len = self.sample_idx.shape[0] - 1 + + if self.shuffle_idx_len != self.sample_idx_len - 1: + print( + f"WARNING: shuffle index length ({self.shuffle_idx_len}) is not equal to sample index length ({self.sample_idx_len})" + ) + + def __len__(self): + return min(self.shuffle_idx_len, self.sample_idx_len) + + def __getitem__(self, idx): + try: + # Get the shuffled index. + idx = self.shuffle_idx[idx] + # Start and end documents and offsets. + doc_index_f = self.sample_idx[idx][0] + doc_index_l = self.sample_idx[idx + 1][0] + offset_f = self.sample_idx[idx][1] + offset_l = self.sample_idx[idx + 1][1] + # Labels and texts are supposed to be fully in sync. + datasets = [self.indexed_dataset] + rw_indx = 1 + if self.label_dataset is not None: + rw_indx += 1 + datasets.append(self.label_dataset) + if self.reward_dataset is not None: + datasets.append(self.reward_dataset) + else: + rw_indx = -1 + if self.ref_dataset is not None: + datasets.append(self.ref_dataset) + samples = [] + sample_lengths = [] + # If we are within the same document, just extract the chunk. + for n, dataset in enumerate(datasets): + if doc_index_f == doc_index_l: + if rw_indx == n: + # If we are in the reward dataset, we only need the last token. + rw = dataset.get(self.doc_idx[doc_index_f]) + samples.append( + np.array([rw[0] for _ in range(len(samples[-1]))]) + ) + else: + samples.append( + dataset.get( + self.doc_idx[doc_index_f], + offset=offset_f, + length=offset_l - offset_f + 1, + ) + ) + else: + if n != rw_indx: + # reset + sample_lengths = [] + # Otherwise, get the rest of the initial document. + if n == rw_indx: + rw = dataset.get(self.doc_idx[doc_index_f]) + sample_list = [ + np.array([rw[0] for _ in range(sample_lengths.pop(0))]) + ] + else: + sample_list = [ + dataset.get(self.doc_idx[doc_index_f], offset=offset_f) + ] + sample_lengths.append(len(sample_list[-1])) + # Loop over all in between documents and add the entire document. + for i in range(doc_index_f + 1, doc_index_l): + if n == rw_indx: + rw = dataset.get(self.doc_idx[i]) + sample_list.append( + np.array([rw[0] for _ in range(sample_lengths.pop(0))]) + ) + else: + sample_list.append(dataset.get(self.doc_idx[i])) + sample_lengths.append(len(sample_list[-1])) + # And finally add the relevant portion of last document. + if n == rw_indx: + rw = dataset.get(self.doc_idx[doc_index_l]) + sample_list.append( + np.array([rw[0] for _ in range(sample_lengths.pop(0))]) + ) + else: + sample_list.append( + dataset.get(self.doc_idx[doc_index_l], length=offset_l + 1) + ) + sample_lengths.append(len(sample_list[-1])) + samples.append(np.concatenate(sample_list)) + for i in range(len(samples)): + mask = (self.label_dataset is not None) and (i == 1) + if len(samples[i]) < (self.seq_length + 1): + # Pad + samples[i] = np.pad( + samples[i], + (0, (self.seq_length + 1) - len(samples[i])), + mode="constant", + constant_values=-100 if mask else 0, + ) + elif len(samples[i]) > (self.seq_length + 1): + # Truncate + samples[i] = samples[i][: (self.seq_length + 1)] + ret = {"text": np.array(samples[0], dtype=np.int64)} + next_idx = 1 + if self.label_dataset is not None: + ret["label"] = np.array(samples[next_idx], dtype=np.int64) + next_idx += 1 + if self.reward_dataset is not None: + ret["reward"] = np.array(samples[next_idx], dtype=np.float32) + next_idx += 1 + if self.ref_dataset is not None: + ret["ref"] = np.array(samples[next_idx], dtype=np.float32) + return ret + except IndexError as err: + new_idx = idx % len(self) + print( + f"WARNING: Got index out of bounds error with index {idx} - taking modulo of index instead ({new_idx}), error: {err}" + ) + return self[new_idx] + + +def _build_index_mappings( + name, + data_prefix, + documents, + sizes, + label_dataset, + num_samples, + num_epochs, + seq_length, + seed, + packing_impl, + use_shared_fs=True, + allow_chopped=True, +): + """Build doc-idx, sample-idx, and shuffle-idx. + doc-idx: is an array (ordered) of documents to be used in training. + sample-idx: is the start document index and document offset for each + training sample. + shuffle-idx: maps the sample index into a random index into sample-idx. + """ + # Number of tokens in each epoch and number of required epochs. + tokens_per_epoch = _num_tokens(documents, sizes) + if not num_epochs: + num_epochs = _num_epochs(tokens_per_epoch, seq_length, num_samples) + # rng state + np_rng = np.random.RandomState(seed=seed) + + # Filename of the index mappings. + _filename = data_prefix + _filename += "_{}_indexmap".format(name) + _filename += "_{}ns".format(num_samples) + _filename += "_{}sl".format(seq_length) + _filename += "_{}s".format(seed) + _filename += "_{}pi".format(packing_impl) + if allow_chopped: + _filename += "_ac" + doc_idx_filename = _filename + "_doc_idx.npy" + sample_idx_filename = _filename + "_sample_idx.npy" + shuffle_idx_filename = _filename + "_shuffle_idx.npy" + + if not use_shared_fs: + should_process_dataset = int(os.environ["LOCAL_RANK"]) == 0 + else: + should_process_dataset = torch.distributed.get_rank() == 0 + + # Build the indexed mapping if not exist. + if should_process_dataset: + if ( + (not os.path.isfile(doc_idx_filename)) + or (not os.path.isfile(sample_idx_filename)) + or (not os.path.isfile(shuffle_idx_filename)) + ): + print_rank_0( + " > WARNING: could not find index map files, building " + "the indices on rank 0 ..." + ) + # doc-idx. + start_time = time.time() + if packing_impl == "packed": + doc_idx = _build_doc_idx(documents, num_epochs, np_rng) + np.save(doc_idx_filename, doc_idx, allow_pickle=True) + print_rank_0( + " > elapsed time to build and save doc-idx mapping " + "(seconds): {:4f}".format(time.time() - start_time) + ) + # sample-idx. + start_time = time.time() + # Use C++ implementation for speed. + from megatron.data import helpers + + assert doc_idx.dtype == np.int32 + assert sizes.dtype == np.int32 + + num_samples = (num_epochs * tokens_per_epoch - 1) / seq_length + if 2 * (num_samples + 1) < np.iinfo(np.int32).max: + sample_idx = helpers.build_sample_idx_int32( + sizes, doc_idx, seq_length, num_epochs, tokens_per_epoch + ) + else: + sample_idx = helpers.build_sample_idx_int64( + sizes, doc_idx, seq_length, num_epochs, tokens_per_epoch + ) + np.save(sample_idx_filename, sample_idx, allow_pickle=True) + print_rank_0( + " > elapsed time to build and save sample-idx mapping " + "(seconds): {:4f}".format(time.time() - start_time) + ) + # shuffle-idx. + start_time = time.time() + # -1 is due to data structure used to retrieve the index: + # sample i --> [sample_idx[i], sample_idx[i+1]) + shuffle_idx = _build_shuffle_idx(sample_idx.shape[0] - 1, np_rng) + np.save(shuffle_idx_filename, shuffle_idx, allow_pickle=True) + print_rank_0( + " > elapsed time to build and save shuffle-idx mapping" + " (seconds): {:4f}".format(time.time() - start_time) + ) + elif packing_impl == "pack_until_overflow": + # Naively pack data until it overflows, then roll it over to a new one instead. + shuffle_idx = np.arange(num_samples) # Shuffle index around epochs + np_rng.shuffle(shuffle_idx) + sample_idx = [] + doc_idx = [] + # Iterate over files until we have enough samples. + temp_shuffle_idx = np.arange(len(documents)) + np_rng.shuffle(temp_shuffle_idx) + running_length = 0 + curr_shuffle_idx = 0 + while len(sample_idx) < num_samples: + if not allow_chopped: + # +1 since we shift left/right by 1 + if sizes[temp_shuffle_idx[curr_shuffle_idx]] > seq_length + 1: + curr_shuffle_idx += 1 + continue + # First, check if we need to skip this item... + if label_dataset is not None: + if np.all( + label_dataset.get(temp_shuffle_idx[curr_shuffle_idx])[ + : seq_length + 1 + ] + == -100 + ): + curr_shuffle_idx += 1 + continue + doc_length = sizes[temp_shuffle_idx[curr_shuffle_idx]] + if running_length == 0: + sample_idx.append(np.array([len(doc_idx), 0])) + doc_idx.append(temp_shuffle_idx[curr_shuffle_idx]) + running_length += doc_length + else: + if running_length + doc_length > (seq_length + 1): + running_length = doc_length + sample_idx.append(np.array([len(doc_idx), 0])) + else: + running_length += doc_length + doc_idx.append(temp_shuffle_idx[curr_shuffle_idx]) + curr_shuffle_idx += 1 + if curr_shuffle_idx == len(documents): + curr_shuffle_idx = 0 + np_rng.shuffle(temp_shuffle_idx) + sample_idx.append(np.array([len(doc_idx), 0])) + np.save(doc_idx_filename, doc_idx, allow_pickle=True) + np.save(sample_idx_filename, sample_idx, allow_pickle=True) + np.save(shuffle_idx_filename, shuffle_idx, allow_pickle=True) + elif packing_impl == "unpacked": + # Unpacked data, one sample per document. + shuffle_idx = np.arange(num_samples) # Shuffle index around epochs + np_rng.shuffle(shuffle_idx) + sample_idx = np.zeros((num_samples + 1, 2), dtype=np.int64) + sample_idx[:, 0] = np.array([i for i in range(num_samples + 1)]) + sample_idx[:, 1] = 0 + doc_idx = list() + doc_i = 0 + while len(doc_idx) <= num_samples: + if not allow_chopped: + # +1 since we shift left/right by 1 + if sizes[doc_i] > seq_length + 1: + doc_i = (doc_i + 1) % len(documents) + continue + # Just in case we have bad data in the loop... + if np.all(label_dataset.get(doc_i)[:seq_length] == -100): + doc_i = (doc_i + 1) % len(documents) + continue + doc_idx.append(doc_i) + doc_i = (doc_i + 1) % len(documents) + np.save(doc_idx_filename, doc_idx, allow_pickle=True) + np.save(sample_idx_filename, sample_idx, allow_pickle=True) + np.save(shuffle_idx_filename, shuffle_idx, allow_pickle=True) + + # This should be a barrier but nccl barrier assumes + # device_index=rank which is not the case for model + # parallel case + counts = torch.cuda.LongTensor([1]) + torch.distributed.all_reduce(counts, group=mpu.get_io_parallel_group()) + assert counts[0].item() == torch.distributed.get_world_size( + group=mpu.get_io_parallel_group() + ) + + # Load mappings. + start_time = time.time() + print_rank_0(" > loading doc-idx mapping from {}".format(doc_idx_filename)) + doc_idx = np.load(doc_idx_filename, allow_pickle=True, mmap_mode="r") + print_rank_0(" > loading sample-idx mapping from {}".format(sample_idx_filename)) + sample_idx = np.load(sample_idx_filename, allow_pickle=True, mmap_mode="r") + print_rank_0(" > loading shuffle-idx mapping from {}".format(shuffle_idx_filename)) + shuffle_idx = np.load(shuffle_idx_filename, allow_pickle=True, mmap_mode="r") + print_rank_0( + " loaded indexed file in {:3.3f} seconds".format(time.time() - start_time) + ) + print_rank_0(" total number of samples: {}".format(sample_idx.shape[0])) + print_rank_0(" total number of epochs: {}".format(num_epochs)) + + return doc_idx, sample_idx, shuffle_idx + + +def _num_tokens(documents, sizes): + """Total number of tokens in the dataset.""" + return np.sum(sizes[documents]) + + +def _num_epochs(tokens_per_epoch, seq_length, num_samples): + """Based on number of samples and sequence length, calculate how many + epochs will be needed.""" + num_epochs = 0 + total_tokens = 0 + while True: + num_epochs += 1 + total_tokens += tokens_per_epoch + # -1 is because we need to retrieve seq_length + 1 token each time + # but the last token will overlap with the first token of the next + # sample except for the last sample. + if ((total_tokens - 1) // seq_length) >= num_samples: + return num_epochs + + +def _build_doc_idx(documents, num_epochs, np_rng): + """Build an array with length = number-of-epochs * number-of-documents. + Each index is mapped to a corresponding document.""" + doc_idx = np.mgrid[0:num_epochs, 0 : len(documents)][1] + doc_idx[:] = documents + doc_idx = doc_idx.reshape(-1) + doc_idx = doc_idx.astype(np.int32) + np_rng.shuffle(doc_idx) + return doc_idx + + +def _build_sample_idx(sizes, doc_idx, seq_length, num_epochs, tokens_per_epoch): + """Sample index mapping is a 2D array with sizes + [number-of-samples + 1, 2] where [..., 0] contains + the index into `doc_idx` and [..., 1] is the + starting offset in that document.""" + + # Total number of samples. For -1 see comments in `_num_epochs`. + num_samples = (num_epochs * tokens_per_epoch - 1) // seq_length + sample_idx = np.zeros([num_samples + 1, 2], dtype=np.int64) + + # Index into sample_idx. + sample_index = 0 + # Index into doc_idx. + doc_idx_index = 0 + # Beginning offset for each document. + doc_offset = 0 + # Start with first document and no offset. + sample_idx[sample_index][0] = doc_idx_index + sample_idx[sample_index][1] = doc_offset + sample_index += 1 + while sample_index <= num_samples: + # Start with a fresh sequence. + remaining_seq_length = seq_length + 1 + while remaining_seq_length != 0: + # Get the document length. + doc_id = doc_idx[doc_idx_index] + doc_length = sizes[doc_id] - doc_offset + # And add it to the current sequence. + remaining_seq_length -= doc_length + # If we have more than a full sequence, adjust offset and set + # remaining length to zero so we return from the while loop. + # Note that -1 here is for the same reason we have -1 in + # `_num_epochs` calculations. + if remaining_seq_length <= 0: + doc_offset += remaining_seq_length + doc_length - 1 + remaining_seq_length = 0 + else: + # Otherwise, start from the beginning of the next document. + doc_idx_index += 1 + doc_offset = 0 + # Record the sequence. + sample_idx[sample_index][0] = doc_idx_index + sample_idx[sample_index][1] = doc_offset + sample_index += 1 + + return sample_idx + + +def _build_shuffle_idx(size, np_rng): + """Build the range [0, size) and shuffle.""" + dtype_ = np.uint32 + if size >= (np.iinfo(np.uint32).max - 1): + dtype_ = np.int64 + shuffle_idx = np.arange(start=0, stop=size, step=1, dtype=dtype_) + np_rng.shuffle(shuffle_idx) + return shuffle_idx diff --git a/megatron/data/helpers.cpp b/megatron/data/helpers.cpp new file mode 100644 index 0000000000000000000000000000000000000000..aca2908549d99f90309810d91e3057031cf7d3be --- /dev/null +++ b/megatron/data/helpers.cpp @@ -0,0 +1,756 @@ +/* + coding=utf-8 + Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +/* Helper methods for fast index mapping builds */ + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace py = pybind11; +using namespace std; + +const int32_t LONG_SENTENCE_LEN = 512; + +void build_blending_indices(py::array_t& dataset_index, + py::array_t& dataset_sample_index, + const py::array_t& weights, + const int32_t num_datasets, + const int64_t size, + const bool verbose) +{ + /* Given multiple datasets and a weighting array, build samples + such that it follows those weights.*/ + + if (verbose) { std::cout << "> building indices for blendable datasets ..." << std::endl; } + + // Get the pointer access without the checks. + auto dataset_index_ptr = dataset_index.mutable_unchecked<1>(); + auto dataset_sample_index_ptr = dataset_sample_index.mutable_unchecked<1>(); + auto weights_ptr = weights.unchecked<1>(); + + // Initialize buffer for number of samples used for each dataset. + int64_t current_samples[num_datasets]; + for (int64_t i = 0; i < num_datasets; ++i) { current_samples[i] = 0; } + + // For each sample: + for (int64_t sample_idx = 0; sample_idx < size; ++sample_idx) { + // Determine where the max error in sampling is happening. + double sample_idx_double = std::max(static_cast(sample_idx), 1.0); + int64_t max_error_index = 0; + double max_error = + weights_ptr[0] * sample_idx_double - static_cast(current_samples[0]); + for (int64_t dataset_idx = 1; dataset_idx < num_datasets; ++dataset_idx) { + double error = weights_ptr[dataset_idx] * sample_idx_double - + static_cast(current_samples[dataset_idx]); + if (error > max_error) { + max_error = error; + max_error_index = dataset_idx; + } + } + + // Populate the indices. + dataset_index_ptr[sample_idx] = static_cast(max_error_index); + dataset_sample_index_ptr[sample_idx] = current_samples[max_error_index]; + + // Update the total samples. + current_samples[max_error_index] += 1; + } + + // print info + if (verbose) { + std::cout << " > sample ratios:" << std::endl; + for (int64_t dataset_idx = 0; dataset_idx < num_datasets; ++dataset_idx) { + auto ratio = + static_cast(current_samples[dataset_idx]) / static_cast(size); + std::cout << " dataset " << dataset_idx << ", input: " << weights_ptr[dataset_idx] + << ", achieved: " << ratio << std::endl; + } + } +} + +py::array build_sample_idx_int32(const py::array_t& sizes_, + const py::array_t& doc_idx_, + const int32_t seq_length, + const int32_t num_epochs, + const int64_t tokens_per_epoch) +{ + /* Sample index (sample_idx) is used for gpt2 like dataset for which + the documents are flattened and the samples are built based on this + 1-D flatten array. It is a 2D array with sizes [number-of-samples + 1, 2] + where [..., 0] contains the index into `doc_idx` and [..., 1] is the + starting offset in that document.*/ + + // Consistency checks. + assert(seq_length > 1); + assert(num_epochs > 0); + assert(tokens_per_epoch > 1); + + // Remove bound checks. + auto sizes = sizes_.unchecked<1>(); + auto doc_idx = doc_idx_.unchecked<1>(); + + // Mapping and it's length (1D). + int64_t num_samples = (num_epochs * tokens_per_epoch - 1) / seq_length; + int32_t* sample_idx = new int32_t[2 * (num_samples + 1)]; + + cout << " using:" << endl << std::flush; + cout << " number of documents: " << doc_idx_.shape(0) / num_epochs << endl + << std::flush; + cout << " number of epochs: " << num_epochs << endl << std::flush; + cout << " sequence length: " << seq_length << endl << std::flush; + cout << " total number of samples: " << num_samples << endl << std::flush; + + // Index into sample_idx. + int64_t sample_index = 0; + // Index into doc_idx. + int64_t doc_idx_index = 0; + // Beginning offset for each document. + int32_t doc_offset = 0; + // Start with first document and no offset. + sample_idx[2 * sample_index] = doc_idx_index; + sample_idx[2 * sample_index + 1] = doc_offset; + ++sample_index; + + while (sample_index <= num_samples) { + // Start with a fresh sequence. + int32_t remaining_seq_length = seq_length + 1; + while (remaining_seq_length != 0) { + // Get the document length. + auto doc_id = doc_idx[doc_idx_index]; + auto doc_length = sizes[doc_id] - doc_offset; + // And add it to the current sequence. + remaining_seq_length -= doc_length; + // If we have more than a full sequence, adjust offset and set + // remaining length to zero so we return from the while loop. + // Note that -1 here is for the same reason we have -1 in + // `_num_epochs` calculations. + if (remaining_seq_length <= 0) { + doc_offset += (remaining_seq_length + doc_length - 1); + remaining_seq_length = 0; + } else { + // Otherwise, start from the beginning of the next document. + ++doc_idx_index; + doc_offset = 0; + } + } + // Record the sequence. + sample_idx[2 * sample_index] = doc_idx_index; + sample_idx[2 * sample_index + 1] = doc_offset; + ++sample_index; + } + + // Method to deallocate memory. + py::capsule free_when_done(sample_idx, [](void* mem_) { + int32_t* mem = reinterpret_cast(mem_); + delete[] mem; + }); + + // Return the numpy array. + const auto byte_size = sizeof(int32_t); + return py::array(std::vector{num_samples + 1, 2}, // shape + {2 * byte_size, byte_size}, // C-style contiguous strides + sample_idx, // the data pointer + free_when_done); // numpy array references +} + +py::array build_sample_idx_int64(const py::array_t& sizes_, + const py::array_t& doc_idx_, + const int32_t seq_length, + const int32_t num_epochs, + const int64_t tokens_per_epoch) +{ + /* Sample index (sample_idx) is used for gpt2 like dataset for which + the documents are flattened and the samples are built based on this + 1-D flatten array. It is a 2D array with sizes [number-of-samples + 1, 2] + where [..., 0] contains the index into `doc_idx` and [..., 1] is the + starting offset in that document.*/ + + // Consistency checks. + assert(seq_length > 1); + assert(num_epochs > 0); + assert(tokens_per_epoch > 1); + + // Remove bound checks. + auto sizes = sizes_.unchecked<1>(); + auto doc_idx = doc_idx_.unchecked<1>(); + + // Mapping and it's length (1D). + int64_t num_samples = (num_epochs * tokens_per_epoch - 1) / seq_length; + int64_t* sample_idx = new int64_t[2 * (num_samples + 1)]; + + cout << " using:" << endl << std::flush; + cout << " number of documents: " << doc_idx_.shape(0) / num_epochs << endl + << std::flush; + cout << " number of epochs: " << num_epochs << endl << std::flush; + cout << " sequence length: " << seq_length << endl << std::flush; + cout << " total number of samples: " << num_samples << endl << std::flush; + + // Index into sample_idx. + int64_t sample_index = 0; + // Index into doc_idx. + int64_t doc_idx_index = 0; + // Beginning offset for each document. + int32_t doc_offset = 0; + // Start with first document and no offset. + sample_idx[2 * sample_index] = doc_idx_index; + sample_idx[2 * sample_index + 1] = doc_offset; + ++sample_index; + + while (sample_index <= num_samples) { + // Start with a fresh sequence. + int32_t remaining_seq_length = seq_length + 1; + while (remaining_seq_length != 0) { + // Get the document length. + auto doc_id = doc_idx[doc_idx_index]; + auto doc_length = sizes[doc_id] - doc_offset; + // And add it to the current sequence. + remaining_seq_length -= doc_length; + // If we have more than a full sequence, adjust offset and set + // remaining length to zero so we return from the while loop. + // Note that -1 here is for the same reason we have -1 in + // `_num_epochs` calculations. + if (remaining_seq_length <= 0) { + doc_offset += (remaining_seq_length + doc_length - 1); + remaining_seq_length = 0; + } else { + // Otherwise, start from the beginning of the next document. + ++doc_idx_index; + doc_offset = 0; + } + } + // Record the sequence. + sample_idx[2 * sample_index] = doc_idx_index; + sample_idx[2 * sample_index + 1] = doc_offset; + ++sample_index; + } + + // Method to deallocate memory. + py::capsule free_when_done(sample_idx, [](void* mem_) { + int64_t* mem = reinterpret_cast(mem_); + delete[] mem; + }); + + // Return the numpy array. + const auto byte_size = sizeof(int64_t); + return py::array(std::vector{num_samples + 1, 2}, // shape + {2 * byte_size, byte_size}, // C-style contiguous strides + sample_idx, // the data pointer + free_when_done); // numpy array references +} + +inline int32_t get_target_sample_len(const int32_t short_seq_ratio, + const int32_t max_length, + std::mt19937& rand32_gen) +{ + /* Training sample length. */ + const auto random_number = rand32_gen(); + if ((random_number % short_seq_ratio) == 0) { return 2 + random_number % (max_length - 1); } + return max_length; +} + +template +py::array build_mapping_impl(const py::array_t& docs_, + const py::array_t& sizes_, + const int32_t num_epochs, + const uint64_t max_num_samples, + const int32_t max_seq_length, + const double short_seq_prob, + const int32_t seed, + const bool verbose) +{ + /* Build a mapping of (start-index, end-index, sequence-length) where + start and end index are the indices of the sentences in the sample + and sequence-length is the target sequence length. + */ + + // Consistency checks. + assert(num_epochs > 0); + assert(max_seq_length > 1); + assert(short_seq_prob > 0.0); + assert(short_seq_prob <= 1.0); + assert(seed > 0); + + // Remove bound checks. + auto docs = docs_.unchecked<1>(); + auto sizes = sizes_.unchecked<1>(); + + // For efficiency, convert probability to ratio. Note: rand() generates int. + const auto short_seq_ratio = static_cast(round(1.0 / short_seq_prob)); + + if (verbose) { + const auto sent_start_index = docs[0]; + const auto sent_end_index = docs[docs_.shape(0) - 1]; + const auto num_sentences = sent_end_index - sent_start_index; + cout << " using:" << endl << std::flush; + cout << " number of documents: " << docs_.shape(0) - 1 << endl << std::flush; + cout << " sentences range: [" << sent_start_index << ", " + << sent_end_index << ")" << endl + << std::flush; + cout << " total number of sentences: " << num_sentences << endl << std::flush; + cout << " number of epochs: " << num_epochs << endl << std::flush; + cout << " maximum number of samples: " << max_num_samples << endl << std::flush; + cout << " maximum sequence length: " << max_seq_length << endl << std::flush; + cout << " short sequence probability: " << short_seq_prob << endl << std::flush; + cout << " short sequence ration (1/prob): " << short_seq_ratio << endl << std::flush; + cout << " seed: " << seed << endl << std::flush; + } + + // Mapping and it's length (1D). + int64_t num_samples = -1; + DocIdx* maps = NULL; + + // Perform two iterations, in the first iteration get the size + // and allocate memory and in the second iteration populate the map. + bool second = false; + for (int32_t iteration = 0; iteration < 2; ++iteration) { + // Set the seed so both iterations produce the same results. + std::mt19937 rand32_gen(seed); + + // Set the flag on second iteration. + second = (iteration == 1); + + // Counters: + uint64_t empty_docs = 0; + uint64_t one_sent_docs = 0; + uint64_t long_sent_docs = 0; + + // Current map index. + uint64_t map_index = 0; + + // For each epoch: + for (int32_t epoch = 0; epoch < num_epochs; ++epoch) { + if (map_index >= max_num_samples) { + if (verbose && (!second)) { + cout << " reached " << max_num_samples << " samples after " << epoch + << " epochs ..." << endl + << std::flush; + } + break; + } + // For each document: + for (int32_t doc = 0; doc < (docs.shape(0) - 1); ++doc) { + // Document sentences are in [sent_index_first, sent_index_last) + const auto sent_index_first = docs[doc]; + const auto sent_index_last = docs[doc + 1]; + + // At the beginning of the document previous index is the + // start index. + auto prev_start_index = sent_index_first; + + // Remaining documents. + auto num_remain_sent = sent_index_last - sent_index_first; + + // Some bookkeeping + if ((epoch == 0) && (!second)) { + if (num_remain_sent == 0) { ++empty_docs; } + if (num_remain_sent == 1) { ++one_sent_docs; } + } + + // Detect documents with long sentences. + bool contains_long_sentence = false; + if (num_remain_sent > 1) { + for (auto sent_index = sent_index_first; sent_index < sent_index_last; + ++sent_index) { + if (sizes[sent_index] > LONG_SENTENCE_LEN) { + if ((epoch == 0) && (!second)) { ++long_sent_docs; } + contains_long_sentence = true; + break; + } + } + } + + // If we have more than two sentences. + if ((num_remain_sent > 1) && (!contains_long_sentence)) { + // Set values. + auto seq_len = int32_t{0}; + auto num_sent = int32_t{0}; + auto target_seq_len = + get_target_sample_len(short_seq_ratio, max_seq_length, rand32_gen); + + // Loop through sentences. + for (auto sent_index = sent_index_first; sent_index < sent_index_last; + ++sent_index) { + // Add the size and number of sentences. + seq_len += sizes[sent_index]; + ++num_sent; + --num_remain_sent; + + // If we have reached the target length. + // and if not only one sentence is left in the document. + // and if we have at least two sentneces. + // and if we have reached end of the document. + if (((seq_len >= target_seq_len) && (num_remain_sent > 1) && + (num_sent > 1)) || + (num_remain_sent == 0)) { + // Check for overflow. + if ((3 * map_index + 2) > std::numeric_limits::max()) { + cout << "number of samples exceeded maximum " + << "allowed by type int64: " + << std::numeric_limits::max() << endl; + throw std::overflow_error("Number of samples"); + } + + // Populate the map. + if (second) { + const auto map_index_0 = 3 * map_index; + maps[map_index_0] = static_cast(prev_start_index); + maps[map_index_0 + 1] = static_cast(sent_index + 1); + maps[map_index_0 + 2] = static_cast(target_seq_len); + } + + // Update indices / counters. + ++map_index; + prev_start_index = sent_index + 1; + target_seq_len = + get_target_sample_len(short_seq_ratio, max_seq_length, rand32_gen); + seq_len = 0; + num_sent = 0; + } + + } // for (auto sent_index=sent_index_first; ... + } // if (num_remain_sent > 1) { + } // for (int doc=0; doc < num_docs; ++doc) { + } // for (int epoch=0; epoch < num_epochs; ++epoch) { + + if (!second) { + if (verbose) { + cout << " number of empty documents: " << empty_docs << endl << std::flush; + cout << " number of documents with one sentence: " << one_sent_docs << endl + << std::flush; + cout << " number of documents with long sentences: " << long_sent_docs << endl + << std::flush; + cout << " will create mapping for " << map_index << " samples" << endl + << std::flush; + } + assert(maps == NULL); + assert(num_samples < 0); + maps = new DocIdx[3 * map_index]; + num_samples = static_cast(map_index); + } + + } // for (int iteration=0; iteration < 2; ++iteration) { + + // Shuffle. + // We need a 64 bit random number generator as we might have more + // than 2 billion samples. + std::mt19937_64 rand64_gen(seed + 1); + for (auto i = (num_samples - 1); i > 0; --i) { + const auto j = static_cast(rand64_gen() % (i + 1)); + const auto i0 = 3 * i; + const auto j0 = 3 * j; + // Swap values. + swap(maps[i0], maps[j0]); + swap(maps[i0 + 1], maps[j0 + 1]); + swap(maps[i0 + 2], maps[j0 + 2]); + } + + // Method to deallocate memory. + py::capsule free_when_done(maps, [](void* mem_) { + DocIdx* mem = reinterpret_cast(mem_); + delete[] mem; + }); + + // Return the numpy array. + const auto byte_size = sizeof(DocIdx); + return py::array(std::vector{num_samples, 3}, // shape + {3 * byte_size, byte_size}, // C-style contiguous strides + maps, // the data pointer + free_when_done); // numpy array references +} + +py::array build_mapping(const py::array_t& docs_, + const py::array_t& sizes_, + const int num_epochs, + const uint64_t max_num_samples, + const int max_seq_length, + const double short_seq_prob, + const int seed, + const bool verbose) +{ + if (sizes_.size() > std::numeric_limits::max()) { + if (verbose) { cout << " using uint64 for data mapping..." << endl << std::flush; } + return build_mapping_impl(docs_, + sizes_, + num_epochs, + max_num_samples, + max_seq_length, + short_seq_prob, + seed, + verbose); + } else { + if (verbose) { cout << " using uint32 for data mapping..." << endl << std::flush; } + return build_mapping_impl(docs_, + sizes_, + num_epochs, + max_num_samples, + max_seq_length, + short_seq_prob, + seed, + verbose); + } +} + +template +py::array build_blocks_mapping_impl(const py::array_t& docs_, + const py::array_t& sizes_, + const py::array_t& titles_sizes_, + const int32_t num_epochs, + const uint64_t max_num_samples, + const int32_t max_seq_length, + const int32_t seed, + const bool verbose, + const bool use_one_sent_blocks) +{ + /* Build a mapping of (start-index, end-index, sequence-length) where + start and end index are the indices of the sentences in the sample + and sequence-length is the target sequence length. + */ + + // Consistency checks. + assert(num_epochs > 0); + assert(max_seq_length > 1); + assert(seed > 0); + + // Remove bound checks. + auto docs = docs_.unchecked<1>(); + auto sizes = sizes_.unchecked<1>(); + auto titles_sizes = titles_sizes_.unchecked<1>(); + + if (verbose) { + const auto sent_start_index = docs[0]; + const auto sent_end_index = docs[docs_.shape(0) - 1]; + const auto num_sentences = sent_end_index - sent_start_index; + cout << " using:" << endl << std::flush; + cout << " number of documents: " << docs_.shape(0) - 1 << endl << std::flush; + cout << " sentences range: [" << sent_start_index << ", " + << sent_end_index << ")" << endl + << std::flush; + cout << " total number of sentences: " << num_sentences << endl << std::flush; + cout << " number of epochs: " << num_epochs << endl << std::flush; + cout << " maximum number of samples: " << max_num_samples << endl << std::flush; + cout << " maximum sequence length: " << max_seq_length << endl << std::flush; + cout << " seed: " << seed << endl << std::flush; + } + + // Mapping and its length (1D). + int64_t num_samples = -1; + DocIdx* maps = NULL; + + // Acceptable number of sentences per block. + int min_num_sent = 2; + if (use_one_sent_blocks) { min_num_sent = 1; } + + // Perform two iterations, in the first iteration get the size + // and allocate memory and in the second iteration populate the map. + bool second = false; + for (int32_t iteration = 0; iteration < 2; ++iteration) { + // Set the flag on second iteration. + second = (iteration == 1); + + // Current map index. + uint64_t map_index = 0; + + uint64_t empty_docs = 0; + uint64_t one_sent_docs = 0; + uint64_t long_sent_docs = 0; + // For each epoch: + for (int32_t epoch = 0; epoch < num_epochs; ++epoch) { + // assign every block a unique id + int32_t block_id = 0; + + if (map_index >= max_num_samples) { + if (verbose && (!second)) { + cout << " reached " << max_num_samples << " samples after " << epoch + << " epochs ..." << endl + << std::flush; + } + break; + } + // For each document: + for (int32_t doc = 0; doc < (docs.shape(0) - 1); ++doc) { + // Document sentences are in [sent_index_first, sent_index_last) + const auto sent_index_first = docs[doc]; + const auto sent_index_last = docs[doc + 1]; + const auto target_seq_len = max_seq_length - titles_sizes[doc]; + + // At the beginning of the document previous index is the + // start index. + auto prev_start_index = sent_index_first; + + // Remaining documents. + auto num_remain_sent = sent_index_last - sent_index_first; + + // Some bookkeeping + if ((epoch == 0) && (!second)) { + if (num_remain_sent == 0) { ++empty_docs; } + if (num_remain_sent == 1) { ++one_sent_docs; } + } + // Detect documents with long sentences. + bool contains_long_sentence = false; + if (num_remain_sent >= min_num_sent) { + for (auto sent_index = sent_index_first; sent_index < sent_index_last; + ++sent_index) { + if (sizes[sent_index] > LONG_SENTENCE_LEN) { + if ((epoch == 0) && (!second)) { ++long_sent_docs; } + contains_long_sentence = true; + break; + } + } + } + // If we have enough sentences and no long sentences. + if ((num_remain_sent >= min_num_sent) && (!contains_long_sentence)) { + // Set values. + auto seq_len = int32_t{0}; + auto num_sent = int32_t{0}; + + // Loop through sentences. + for (auto sent_index = sent_index_first; sent_index < sent_index_last; + ++sent_index) { + // Add the size and number of sentences. + seq_len += sizes[sent_index]; + ++num_sent; + --num_remain_sent; + + // If we have reached the target length. + // and there are an acceptable number of sentences left + // and if we have at least the minimum number of sentences. + // or if we have reached end of the document. + if (((seq_len >= target_seq_len) && (num_remain_sent >= min_num_sent) && + (num_sent >= min_num_sent)) || + (num_remain_sent == 0)) { + // Populate the map. + if (second) { + const auto map_index_0 = 4 * map_index; + // Each sample has 4 items: the starting sentence index, ending + // sentence index, the index of the document from which the block + // comes (used for fetching titles) and the unique id of the block + // (used for creating block indexes) + + maps[map_index_0] = static_cast(prev_start_index); + maps[map_index_0 + 1] = static_cast(sent_index + 1); + maps[map_index_0 + 2] = static_cast(doc); + maps[map_index_0 + 3] = static_cast(block_id); + } + + // Update indices / counters. + ++map_index; + ++block_id; + prev_start_index = sent_index + 1; + seq_len = 0; + num_sent = 0; + } + } // for (auto sent_index=sent_index_first; ... + } // if (num_remain_sent > 1) { + } // for (int doc=0; doc < num_docs; ++doc) { + } // for (int epoch=0; epoch < num_epochs; ++epoch) { + + if (!second) { + if (verbose) { + cout << " number of empty documents: " << empty_docs << endl << std::flush; + cout << " number of documents with one sentence: " << one_sent_docs << endl + << std::flush; + cout << " number of documents with long sentences: " << long_sent_docs << endl + << std::flush; + cout << " will create mapping for " << map_index << " samples" << endl + << std::flush; + } + assert(maps == NULL); + assert(num_samples < 0); + maps = new DocIdx[4 * map_index]; + num_samples = static_cast(map_index); + } + + } // for (int iteration=0; iteration < 2; ++iteration) { + + // Shuffle. + // We need a 64 bit random number generator as we might have more + // than 2 billion samples. + std::mt19937_64 rand64_gen(seed + 1); + for (auto i = (num_samples - 1); i > 0; --i) { + const auto j = static_cast(rand64_gen() % (i + 1)); + const auto i0 = 4 * i; + const auto j0 = 4 * j; + // Swap values. + swap(maps[i0], maps[j0]); + swap(maps[i0 + 1], maps[j0 + 1]); + swap(maps[i0 + 2], maps[j0 + 2]); + swap(maps[i0 + 3], maps[j0 + 3]); + } + + // Method to deallocate memory. + py::capsule free_when_done(maps, [](void* mem_) { + DocIdx* mem = reinterpret_cast(mem_); + delete[] mem; + }); + + // Return the numpy array. + const auto byte_size = sizeof(DocIdx); + return py::array(std::vector{num_samples, 4}, // shape + {4 * byte_size, byte_size}, // C-style contiguous strides + maps, // the data pointer + free_when_done); // numpy array references +} + +py::array build_blocks_mapping(const py::array_t& docs_, + const py::array_t& sizes_, + const py::array_t& titles_sizes_, + const int num_epochs, + const uint64_t max_num_samples, + const int max_seq_length, + const int seed, + const bool verbose, + const bool use_one_sent_blocks) +{ + if (sizes_.size() > std::numeric_limits::max()) { + if (verbose) { cout << " using uint64 for data mapping..." << endl << std::flush; } + return build_blocks_mapping_impl(docs_, + sizes_, + titles_sizes_, + num_epochs, + max_num_samples, + max_seq_length, + seed, + verbose, + use_one_sent_blocks); + } else { + if (verbose) { cout << " using uint32 for data mapping..." << endl << std::flush; } + return build_blocks_mapping_impl(docs_, + sizes_, + titles_sizes_, + num_epochs, + max_num_samples, + max_seq_length, + seed, + verbose, + use_one_sent_blocks); + } +} + +PYBIND11_MODULE(helpers, m) +{ + m.def("build_mapping", &build_mapping); + m.def("build_blocks_mapping", &build_blocks_mapping); + m.def("build_sample_idx_int32", &build_sample_idx_int32); + m.def("build_sample_idx_int64", &build_sample_idx_int64); + m.def("build_blending_indices", &build_blending_indices); +} diff --git a/megatron/data/indexed_dataset.py b/megatron/data/indexed_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..8165205b9d3fb7e05561b8cf18deef7a06a56604 --- /dev/null +++ b/megatron/data/indexed_dataset.py @@ -0,0 +1,595 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + + +# copied from fairseq/fairseq/data/indexed_dataset.py +# Removed IndexedRawTextDataset since it relied on Fairseq dictionary +# other slight modifications to remove fairseq dependencies +# Added document index to index file and made it accessible. +# An empty sentence no longer separates documents. + +import os +import shutil +import struct +from functools import lru_cache +from itertools import accumulate + +import numpy as np +import torch + +from megatron import print_rank_0 + + +def __best_fitting_dtype(vocab_size=None): + if vocab_size is not None and vocab_size < 65500: + return np.uint16 + else: + return np.int32 + + +def infer_dataset_impl(path): + if IndexedDataset.exists(path): + with open(index_file_path(path), "rb") as f: + magic = f.read(8) + if magic == IndexedDataset._HDR_MAGIC: + return "cached" + elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]: + return "mmap" + else: + return None + else: + print(f"Dataset does not exist: {path}") + print( + "Path should be a basename that both .idx and .bin can be appended to get full filenames." + ) + return None + + +def make_builder(out_file, impl, vocab_size=None): + if impl == "mmap": + return MMapIndexedDatasetBuilder( + out_file, dtype=__best_fitting_dtype(vocab_size) + ) + else: + return IndexedDatasetBuilder(out_file) + + +def make_dataset(path, impl, skip_warmup=False): + if not IndexedDataset.exists(path): + print(f"Dataset does not exist: {path}") + print( + "Path should be a basename that both .idx and .bin can be appended to get full filenames." + ) + return None + if impl == "infer": + impl = infer_dataset_impl(path) + elif impl == "cached" and IndexedDataset.exists(path): + return IndexedCachedDataset(path) + elif impl == "mmap" and MMapIndexedDataset.exists(path): + return MMapIndexedDataset(path, skip_warmup) + print(f"Unknown dataset implementation: {impl}") + return None + + +def dataset_exists(path, impl): + if impl == "mmap": + return MMapIndexedDataset.exists(path) + else: + return IndexedDataset.exists(path) + + +def read_longs(f, n): + a = np.empty(n, dtype=np.int64) + f.readinto(a) + return a + + +def write_longs(f, a): + f.write(np.array(a, dtype=np.int64)) + + +dtypes = { + 1: np.uint8, + 2: np.int8, + 3: np.int16, + 4: np.int32, + 5: np.int64, + 6: np.float32, + 7: np.float64, + 8: np.uint16, +} + + +def code(dtype): + for k in dtypes.keys(): + if dtypes[k] == dtype: + return k + raise ValueError(dtype) + + +def index_file_path(prefix_path): + return prefix_path + ".idx" + + +def data_file_path(prefix_path): + return prefix_path + ".bin" + + +def create_doc_idx(sizes): + doc_idx = [0] + for i, s in enumerate(sizes): + if s == 0: + doc_idx.append(i + 1) + return doc_idx + + +class IndexedDataset(torch.utils.data.Dataset): + """Loader for IndexedDataset""" + + _HDR_MAGIC = b"TNTIDX\x00\x00" + + def __init__(self, path): + super().__init__() + self.path = path + self.data_file = None + self.read_index(path) + + def read_index(self, path): + with open(index_file_path(path), "rb") as f: + magic = f.read(8) + assert magic == self._HDR_MAGIC, ( + "Index file doesn't match expected format. " + "Make sure that --dataset-impl is configured properly." + ) + version = f.read(8) + assert struct.unpack("= self._len: + raise IndexError("index out of range") + + def __del__(self): + if self.data_file: + self.data_file.close() + + # @lru_cache(maxsize=8) + def __getitem__(self, idx): + if not self.data_file: + self.read_data(self.path) + if isinstance(idx, int): + i = idx + self.check_index(i) + tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]] + a = np.empty(tensor_size, dtype=self.dtype) + self.data_file.seek(self.data_offsets[i] * self.element_size) + self.data_file.readinto(a) + return a + elif isinstance(idx, slice): + start, stop, step = idx.indices(len(self)) + if step != 1: + raise ValueError("Slices into indexed_dataset must be contiguous") + sizes = self.sizes[self.dim_offsets[start] : self.dim_offsets[stop]] + size = sum(sizes) + a = np.empty(size, dtype=self.dtype) + self.data_file.seek(self.data_offsets[start] * self.element_size) + self.data_file.readinto(a) + offsets = list(accumulate(sizes)) + sents = np.split(a, offsets[:-1]) + return sents + + def __len__(self): + return self._len + + def num_tokens(self, index): + return self.sizes[index] + + def size(self, index): + return self.sizes[index] + + @staticmethod + def exists(path): + return os.path.exists(index_file_path(path)) and os.path.exists( + data_file_path(path) + ) + + @property + def supports_prefetch(self): + return False # avoid prefetching to save memory + + +class IndexedCachedDataset(IndexedDataset): + def __init__(self, path): + super().__init__(path) + self.cache = None + self.cache_index = {} + + @property + def supports_prefetch(self): + return True + + def prefetch(self, indices): + if all(i in self.cache_index for i in indices): + return + if not self.data_file: + self.read_data(self.path) + indices = sorted(set(indices)) + total_size = 0 + for i in indices: + total_size += self.data_offsets[i + 1] - self.data_offsets[i] + self.cache = np.empty(total_size, dtype=self.dtype) + ptx = 0 + self.cache_index.clear() + for i in indices: + self.cache_index[i] = ptx + size = self.data_offsets[i + 1] - self.data_offsets[i] + a = self.cache[ptx : ptx + size] + self.data_file.seek(self.data_offsets[i] * self.element_size) + self.data_file.readinto(a) + ptx += size + if self.data_file: + # close and delete data file after prefetch so we can pickle + self.data_file.close() + self.data_file = None + + # @lru_cache(maxsize=8) + def __getitem__(self, idx): + if isinstance(idx, int): + i = idx + self.check_index(i) + tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]] + a = np.empty(tensor_size, dtype=self.dtype) + ptx = self.cache_index[i] + np.copyto(a, self.cache[ptx : ptx + a.size]) + return a + elif isinstance(idx, slice): + # Hack just to make this work, can optimizer later if necessary + sents = [] + for i in range(*idx.indices(len(self))): + sents.append(self[i]) + return sents + + +class IndexedDatasetBuilder(object): + element_sizes = { + np.uint8: 1, + np.int8: 1, + np.int16: 2, + np.int32: 4, + np.int64: 8, + np.float32: 4, + np.float64: 8, + } + + def __init__(self, out_file, dtype=np.int32): + self.out_file = open(out_file, "wb") + self.dtype = dtype + self.data_offsets = [0] + self.dim_offsets = [0] + self.sizes = [] + self.element_size = self.element_sizes[self.dtype] + self.doc_idx = [0] + + def add_item(self, np_array): + assert isinstance(np_array, np.ndarray) and np_array.dtype == self.dtype + bytes = self.out_file.write(np_array) + self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size) + for s in np_array.shape: + self.sizes.append(s) + self.dim_offsets.append(self.dim_offsets[-1] + len(np_array.shape)) + + def end_document(self): + self.doc_idx.append(len(self.sizes)) + + def merge_file_(self, another_file): + index = IndexedDataset(another_file) + assert index.dtype == self.dtype + + begin = self.data_offsets[-1] + for offset in index.data_offsets[1:]: + self.data_offsets.append(begin + offset) + self.sizes.extend(index.sizes) + begin = self.dim_offsets[-1] + for dim_offset in index.dim_offsets[1:]: + self.dim_offsets.append(begin + dim_offset) + + with open(data_file_path(another_file), "rb") as f: + while True: + data = f.read(1024) + if data: + self.out_file.write(data) + else: + break + + def finalize(self, index_file): + self.out_file.close() + index = open(index_file, "wb") + index.write(b"TNTIDX\x00\x00") + index.write(struct.pack("= 0 + assert (neg_label_dataset is not None and pos_label_dataset is not None) or ( + neg_label_dataset is None and pos_label_dataset is None + ), "Label datasets must be both None or both not None" + assert np.max(documents) < pos_indexed_dataset.sizes.shape[0] + assert pos_indexed_dataset.sizes.shape[0] == neg_indexed_dataset.sizes.shape[0] + assert ( + pack_impl != "packed" + ), "Packed implementation not supported for pairwise dataset" + + if build_index_mappings: + # Build index mappings. + self.doc_idx, self.sample_idx, self.shuffle_idx = _build_index_mappings( + self.name, + pos_data_prefix, + documents, + self.pos_indexed_dataset.sizes, + self.neg_indexed_dataset.sizes, + self.pos_label_dataset, + self.neg_label_dataset, + num_samples, + seq_length, + seed, + pack_impl, + use_shared_fs=use_shared_fs, + allow_chopped=allow_chopped, + ) + self.shuffle_idx_len = self.shuffle_idx.shape[0] - 1 + self.sample_idx_len = self.sample_idx.shape[0] - 1 + + if self.shuffle_idx_len != self.sample_idx_len - 1: + print( + f"WARNING: shuffle index length ({self.shuffle_idx_len}) is not equal to sample index length ({self.sample_idx_len})" + ) + + def __len__(self): + return min(self.shuffle_idx_len, self.sample_idx_len) + + def __getitem__(self, idx): + try: + # Get the shuffled index. + idx = self.shuffle_idx[idx] + # Start and end documents and offsets. + doc_index_f = self.sample_idx[idx][0] + doc_index_l = self.sample_idx[idx + 1][0] + offset_f = self.sample_idx[idx][1] + offset_l = self.sample_idx[idx + 1][1] + # Labels and texts are supposed to be fully in sync. + datasets = [self.pos_indexed_dataset, self.neg_indexed_dataset] + + if self.pos_label_dataset is not None: + datasets += [ + self.pos_label_dataset, + self.neg_label_dataset, + ] + if self.pos_ref_dataset is not None: + datasets += [ + self.pos_ref_dataset, + self.neg_ref_dataset, + ] + samples = [] + pos_ref_samples = [] + neg_ref_samples = [] + # If we are within the same document, just extract the chunk. + for n, dataset in enumerate(datasets): + if doc_index_f == doc_index_l: + samples.append( + dataset.get( + self.doc_idx[doc_index_f], + offset=offset_f, + length=offset_l - offset_f + 1, + ) + ) + else: + # Otherwise, get the rest of the initial document. + sample_list = [ + dataset.get(self.doc_idx[doc_index_f], offset=offset_f) + ] + # Loop over all in between documents and add the entire document. + for i in range(doc_index_f + 1, doc_index_l): + sample_list.append(dataset.get(self.doc_idx[i])) + # And finally add the relevant portion of last document. + sample_list.append( + dataset.get(self.doc_idx[doc_index_l], length=offset_l + 1) + ) + samples.append(np.concatenate(sample_list)) + for i in range(len(samples)): + if len(samples[i]) < (self.seq_length + 1): + if ((i == 2) or (i == 3)) and self.pos_label_dataset is not None: + # Labels... So pad with -100 + samples[i] = np.pad( + samples[i], + (0, (self.seq_length + 1) - len(samples[i])), + mode="constant", + constant_values=-100, + ) + else: + # Pad with 0s, can use any number since it's masked. + samples[i] = np.pad( + samples[i], + (0, (self.seq_length + 1) - len(samples[i])), + mode="constant", + constant_values=0, + ) + elif len(samples[i]) > (self.seq_length + 1): + # Check for overflow and truncate. + samples[i] = samples[i][: (self.seq_length + 1)] + ret = {} + ret["pos"] = np.array(samples[0], dtype=np.int64) + ret["neg"] = np.array(samples[1], dtype=np.int64) + if self.pos_label_dataset is not None: + ret["pos_label"] = np.array(samples[2], dtype=np.int64) + ret["neg_label"] = np.array(samples[3], dtype=np.int64) + if self.pos_ref_dataset is not None: + ret["pos_ref"] = np.array(samples[4], dtype=np.float32) + ret["neg_ref"] = np.array(samples[5], dtype=np.float32) + elif self.pos_ref_dataset is not None: + # Don't have labels... + ret["pos_ref"] = np.array(samples[2], dtype=np.float32) + ret["neg_ref"] = np.array(samples[3], dtype=np.float32) + return ret + except IndexError: + new_idx = idx % len(self) + print( + f"WARNING: Got index out of bounds error with index {idx} - taking modulo of index instead ({new_idx})" + ) + return self[new_idx] + + +def _build_index_mappings( + name, + pos_data_prefix, + documents, + pos_sizes, + neg_sizes, + pos_label_dataset, + neg_label_dataset, + num_samples, + seq_length, + seed, + packing_impl, + use_shared_fs=True, + allow_chopped=True, +): + """Build doc-idx, sample-idx, and shuffle-idx. + doc-idx: is an array (ordered) of documents to be used in training. + sample-idx: is the start document index and document offset for each + training sample. + shuffle-idx: maps the sample index into a random index into sample-idx. + """ + # Number of tokens in each epoch and number of required epochs. + tokens_per_epoch = _num_tokens(documents, pos_sizes) + num_epochs = _num_epochs(tokens_per_epoch, seq_length, num_samples) + # rng state + np_rng = np.random.RandomState(seed=seed) + + # Filename of the index mappings. + _filename = pos_data_prefix + _filename += "_{}_indexmap".format(name) + _filename += "_{}ns".format(num_samples) + _filename += "_{}sl".format(seq_length) + _filename += "_{}s".format(seed) + _filename += "_{}pi".format(packing_impl) + doc_idx_filename = _filename + "_doc_idx.npy" + sample_idx_filename = _filename + "_sample_idx.npy" + shuffle_idx_filename = _filename + "_shuffle_idx.npy" + + if not use_shared_fs: + should_process_dataset = int(os.environ["LOCAL_RANK"]) == 0 + else: + should_process_dataset = torch.distributed.get_rank() == 0 + + # Build the indexed mapping if not exist. + if should_process_dataset: + if ( + (not os.path.isfile(doc_idx_filename)) + or (not os.path.isfile(sample_idx_filename)) + or (not os.path.isfile(shuffle_idx_filename)) + ): + print_rank_0( + " > WARNING: could not find index map files, building " + "the indices on rank 0 ..." + ) + # doc-idx. + start_time = time.time() + if packing_impl == "pack_until_overflow": + # Naively pack data until it overflows, then roll it over to a new one instead. + shuffle_idx = np.arange(num_samples) # Shuffle index around epochs + np_rng.shuffle(shuffle_idx) + sample_idx = [] + doc_idx = [] + # Iterate over files until we have enough samples. + temp_shuffle_idx = np.arange(len(documents)) + np_rng.shuffle(temp_shuffle_idx) + running_length = 0 + curr_shuffle_idx = 0 + while len(sample_idx) < num_samples: + # If not allow_chopped, skip this item if it's chopped. + if not allow_chopped: + if ( + pos_sizes[temp_shuffle_idx[curr_shuffle_idx]] + < seq_length + 1 + ): + curr_shuffle_idx += 1 + continue + if ( + neg_sizes[temp_shuffle_idx[curr_shuffle_idx]] + < seq_length + 1 + ): + curr_shuffle_idx += 1 + continue + # Then, check if we need to skip this item... + if pos_label_dataset is not None: + if np.all( + pos_label_dataset.get(temp_shuffle_idx[curr_shuffle_idx])[ + : seq_length + 1 + ] + == -100 + ): + curr_shuffle_idx += 1 + continue + if np.all( + neg_label_dataset.get(temp_shuffle_idx[curr_shuffle_idx])[ + : seq_length + 1 + ] + == -100 + ): + curr_shuffle_idx += 1 + continue + doc_length = max( + pos_sizes[temp_shuffle_idx[curr_shuffle_idx]], + neg_sizes[temp_shuffle_idx[curr_shuffle_idx]], + ) + if running_length == 0: + sample_idx.append(np.array([len(doc_idx), 0])) + doc_idx.append(temp_shuffle_idx[curr_shuffle_idx]) + running_length += doc_length + else: + if running_length + doc_length > (seq_length + 1): + running_length = doc_length + sample_idx.append(np.array([len(doc_idx), 0])) + else: + running_length += doc_length + doc_idx.append(temp_shuffle_idx[curr_shuffle_idx]) + curr_shuffle_idx += 1 + if curr_shuffle_idx == len(documents): + curr_shuffle_idx = 0 + np_rng.shuffle(temp_shuffle_idx) + sample_idx.append(np.array([len(doc_idx), 0])) + np.save(doc_idx_filename, doc_idx, allow_pickle=True) + np.save(sample_idx_filename, sample_idx, allow_pickle=True) + np.save(shuffle_idx_filename, shuffle_idx, allow_pickle=True) + elif packing_impl == "unpacked": + # Unpacked data, one sample per document. + shuffle_idx = np.array([i % len(documents) for i in range(num_samples)]) + np_rng.shuffle(shuffle_idx) + sample_idx = np.zeros((num_samples + 1, 2), dtype=np.int64) + sample_idx[:, 0] = np.array([i for i in range(num_samples + 1)]) + sample_idx[:, 1] = 0 + doc_idx = list() + doc_i = 0 + while len(doc_idx) <= num_samples: + # Check if we need to skip this item... + if not allow_chopped: + # +1 since we shift left/right by 1 + if pos_sizes[doc_i] > seq_length + 1: + doc_i = (doc_i + 1) % len(documents) + continue + if neg_sizes[doc_i] > seq_length + 1: + doc_i = (doc_i + 1) % len(documents) + continue + # In theory if we don't allow chopped we should be able to skip it, but the warm fuzzies I get + # from this are worth the extra bool check + if np.all(pos_label_dataset.get(doc_i)[:seq_length] == -100): + doc_i = (doc_i + 1) % len(documents) + continue + if np.all(neg_label_dataset.get(doc_i)[:seq_length] == -100): + doc_i = (doc_i + 1) % len(documents) + continue + doc_idx.append(doc_i) + doc_i = (doc_i + 1) % len(documents) + np.save(doc_idx_filename, doc_idx, allow_pickle=True) + np.save(sample_idx_filename, sample_idx, allow_pickle=True) + np.save(shuffle_idx_filename, shuffle_idx, allow_pickle=True) + + # This should be a barrier but nccl barrier assumes + # device_index=rank which is not the case for model + # parallel case + counts = torch.cuda.LongTensor([1]) + torch.distributed.all_reduce(counts, group=mpu.get_io_parallel_group()) + assert counts[0].item() == torch.distributed.get_world_size( + group=mpu.get_io_parallel_group() + ) + + # Load mappings. + start_time = time.time() + print_rank_0(" > loading doc-idx mapping from {}".format(doc_idx_filename)) + doc_idx = np.load(doc_idx_filename, allow_pickle=True, mmap_mode="r") + print_rank_0(" > loading sample-idx mapping from {}".format(sample_idx_filename)) + sample_idx = np.load(sample_idx_filename, allow_pickle=True, mmap_mode="r") + print_rank_0(" > loading shuffle-idx mapping from {}".format(shuffle_idx_filename)) + shuffle_idx = np.load(shuffle_idx_filename, allow_pickle=True, mmap_mode="r") + print_rank_0( + " loaded indexed file in {:3.3f} seconds".format(time.time() - start_time) + ) + print_rank_0(" total number of samples: {}".format(sample_idx.shape[0])) + print_rank_0(" total number of epochs: {}".format(num_epochs)) + + return doc_idx, sample_idx, shuffle_idx + + +def _num_tokens(documents, sizes): + """Total number of tokens in the dataset.""" + return np.sum(sizes[documents]) + + +def _num_epochs(tokens_per_epoch, seq_length, num_samples): + """Based on number of samples and sequence length, calculate how many + epochs will be needed.""" + num_epochs = 0 + total_tokens = 0 + while True: + num_epochs += 1 + total_tokens += tokens_per_epoch + # -1 is because we need to retrieve seq_length + 1 token each time + # but the last token will overlap with the first token of the next + # sample except for the last sample. + if ((total_tokens - 1) // seq_length) >= num_samples: + return num_epochs + + +def _build_doc_idx(documents, num_epochs, np_rng): + """Build an array with length = number-of-epochs * number-of-documents. + Each index is mapped to a corresponding document.""" + doc_idx = np.mgrid[0:num_epochs, 0 : len(documents)][1] + doc_idx[:] = documents + doc_idx = doc_idx.reshape(-1) + doc_idx = doc_idx.astype(np.int32) + np_rng.shuffle(doc_idx) + return doc_idx + + +def _build_sample_idx(sizes, doc_idx, seq_length, num_epochs, tokens_per_epoch): + """Sample index mapping is a 2D array with sizes + [number-of-samples + 1, 2] where [..., 0] contains + the index into `doc_idx` and [..., 1] is the + starting offset in that document.""" + + # Total number of samples. For -1 see comments in `_num_epochs`. + num_samples = (num_epochs * tokens_per_epoch - 1) // seq_length + sample_idx = np.zeros([num_samples + 1, 2], dtype=np.int64) + + # Index into sample_idx. + sample_index = 0 + # Index into doc_idx. + doc_idx_index = 0 + # Beginning offset for each document. + doc_offset = 0 + # Start with first document and no offset. + sample_idx[sample_index][0] = doc_idx_index + sample_idx[sample_index][1] = doc_offset + sample_index += 1 + while sample_index <= num_samples: + # Start with a fresh sequence. + remaining_seq_length = seq_length + 1 + while remaining_seq_length != 0: + # Get the document length. + doc_id = doc_idx[doc_idx_index] + doc_length = sizes[doc_id] - doc_offset + # And add it to the current sequence. + remaining_seq_length -= doc_length + # If we have more than a full sequence, adjust offset and set + # remaining length to zero so we return from the while loop. + # Note that -1 here is for the same reason we have -1 in + # `_num_epochs` calculations. + if remaining_seq_length <= 0: + doc_offset += remaining_seq_length + doc_length - 1 + remaining_seq_length = 0 + else: + # Otherwise, start from the beginning of the next document. + doc_idx_index += 1 + doc_offset = 0 + # Record the sequence. + sample_idx[sample_index][0] = doc_idx_index + sample_idx[sample_index][1] = doc_offset + sample_index += 1 + + return sample_idx + + +def _build_shuffle_idx(size, np_rng): + """Build the range [0, size) and shuffle.""" + dtype_ = np.uint32 + if size >= (np.iinfo(np.uint32).max - 1): + dtype_ = np.int64 + shuffle_idx = np.arange(start=0, stop=size, step=1, dtype=dtype_) + np_rng.shuffle(shuffle_idx) + return shuffle_idx diff --git a/megatron/data/samplers.py b/megatron/data/samplers.py new file mode 100644 index 0000000000000000000000000000000000000000..5206636d843f80c8a2c6287e7ad9076d67156bab --- /dev/null +++ b/megatron/data/samplers.py @@ -0,0 +1,169 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Batch samplers that work with either random or sequential data samplers.""" + +import torch +from torch.utils import data + + +class RandomSampler(data.sampler.Sampler): + """Based off of pytorch RandomSampler and DistributedSampler. Essentially + a RandomSampler, but this class lets the user set an epoch like + DistributedSampler Samples elements randomly. If without replacement, then + sample from a shuffled dataset. If with replacement, then user can + specify ``num_samples`` to draw. + Arguments: + data_source (Dataset): dataset to sample from + num_samples (int): number of samples to draw, default=len(dataset) + replacement (bool): samples are drawn with replacement if ``True``, + default=False + """ + + def __init__(self, data_source, replacement=False, num_samples=None): + self.data_source = data_source + self.replacement = replacement + self._num_samples = num_samples + self.epoch = -1 + + if self._num_samples is not None and replacement is False: + raise ValueError( + "With replacement=False, num_samples should not " + "be specified, since a random permute will be " + "performed." + ) + + if not isinstance(self.num_samples, int) or self.num_samples <= 0: + raise ValueError( + "num_samples should be a positive integer " + "value, but got num_samples={}".format(self.num_samples) + ) + if not isinstance(self.replacement, bool): + raise ValueError( + "replacement should be a boolean value, but got " + "replacement={}".format(self.replacement) + ) + + @property + def num_samples(self): + # dataset size might change at runtime + if self._num_samples is None: + return len(self.data_source) + return self._num_samples + + def __iter__(self): + n = len(self.data_source) + g = torch.Generator() + if self.epoch >= 0: + g.manual_seed(self.epoch) + if self.replacement: + return iter( + torch.randint( + high=n, size=(self.num_samples,), dtype=torch.int64, generator=g + ).tolist() + ) + return iter(torch.randperm(n, generator=g).tolist()) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch + + +class DistributedBatchSampler(data.sampler.BatchSampler): + """Similar to normal implementation of distributed sampler, except + implementation is at the batch sampler level, instead of just the + sampler level. This allows wrapping of arbitrary data samplers + (sequential, random, WeightedRandomSampler, etc.) with this batch + sampler. + + The `interleave` argument specifies how to distribute a batch. A value + of True combined with the above random sampler is equivalent to pytorch's + torch.utils.data.distributed.DistributedSampler. + + For the following batch [0,1,2,3,4,5,6,7] and data parallelism of 2 + specifying True will result in the following samples for each gpu: + GPU0: [0,2,4,6] GPU1: [1,3,5,7] + specifying False will result in the following samples: + GPU0: [0,1,2,3] GPU1: [4,5,6,7] + + The `infinite_loop` parameter allows the sampler to yield batches indefinitely, + restarting from the beginning of the dataset when all samples have been iterated over. + """ + + def __init__( + self, + sampler, + batch_size, + drop_last, + rank=-1, + world_size=2, + wrap_last=False, + interleave=False, + ): + super(DistributedBatchSampler, self).__init__(sampler, batch_size, drop_last) + if rank == -1: + assert False, "should not be here" + rank = torch.distributed.get_rank() + self.rank = rank + self.world_size = world_size + self.sampler.wrap_around = 0 + self.wrap_around = 0 + self.wrap_last = wrap_last + self.start_iter = 0 + self.interleave = interleave + + def __iter__(self): + batch = [] + i = 0 + for idx in self.data_iterator(self.sampler, wrap_around=False): + batch.append(idx) + if len(batch) == self.batch_size: + tbatch = self._batch(batch) + if i >= self.start_iter: + yield tbatch + self.start_iter = 0 + i += 1 + batch = [] + batch_len = len(batch) + if batch_len > 0 and not self.drop_last: + if self.wrap_last: + self.sampler.wrap_around -= self.batch_size + self.wrap_around += len(batch) + self.wrap_around %= self.batch_size + yield self._batch(batch) + if self.wrap_last: + self.sampler.wrap_around += self.batch_size + + def data_iterator(self, _iter, wrap_around=False): + """iterates through data and handles wrap around""" + for i, idx in enumerate(_iter): + if i < self.wrap_around % self.batch_size: + continue + if wrap_around: + self.wrap_around += 1 + self.wrap_around %= self.batch_size + yield idx + + def _batch(self, batch): + """extracts samples only pertaining to this worker's batch""" + if self.interleave: + return batch[self.rank : self.batch_size : self.world_size] + start = self.rank * self.batch_size // self.world_size + end = (self.rank + 1) * self.batch_size // self.world_size + return batch[start:end] diff --git a/megatron/devutil.py b/megatron/devutil.py new file mode 100644 index 0000000000000000000000000000000000000000..7563d7dcf76d8f2cb150372c9921b9e57a66adc7 --- /dev/null +++ b/megatron/devutil.py @@ -0,0 +1,51 @@ +import torch.cuda + + +class Metric: + """ + Dumb utility to collect and report average wall-time metrics. + """ + + def __init__(self, label): + self.label = label + self.measurements = [] + + def collect(self, measurement): + self.measurements.append(measurement) + + def get_measurements(self): + return self.measurements[:] + + def report(self): + print( + self.label, + torch.quantile(torch.tensor(self.measurements), torch.arange(10) / 10.0), + ) + + +def monitor_method_cuda_wall_times(metric, obj, methodname): + """ + Measure timings for a method on an object or class. + + For instance: + + >>> metric = Metric('!LNORM') + >>> monitor_method_wall_times(metric, LayerNorm, 'forward') + """ + oldmeth = getattr(obj, methodname) + + start_event = torch.cuda.Event(enable_timing=True) + end_event = torch.cuda.Event(enable_timing=True) + + def newmeth(*args, **kw): + start_event.record() + try: + return oldmeth(*args, **kw) + finally: + end_event.record() + torch.cuda.synchronize() + elapsed = start_event.elapsed_time(end_event) + metric.collect(elapsed) + metric.report() + + setattr(obj, methodname, newmeth) diff --git a/megatron/fused_kernels/__init__.py b/megatron/fused_kernels/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3694e964b764693600c53a9a820b7fc21d2b2cbd --- /dev/null +++ b/megatron/fused_kernels/__init__.py @@ -0,0 +1,181 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This file has been modified from its original version +# + +import os +import pathlib +import subprocess +import torch +from torch.utils import cpp_extension + +# Setting this param to a list has a problem of generating different +# compilation commands (with different order of architectures) and +# leading to recompilation of fused kernels. Set it to empty string +# to avoid recompilation and assign arch flags explicitly in +# extra_cuda_cflags below +os.environ["TORCH_CUDA_ARCH_LIST"] = "" + + +def load(neox_args=None): + + # Check if cuda 11 is installed for compute capability 8.0 + cc_flag = [] + if torch.version.hip is None: + _, bare_metal_major, bare_metal_minor = _get_cuda_bare_metal_version( + cpp_extension.CUDA_HOME + ) + if int(bare_metal_major) >= 11: + cc_flag.append("-gencode") + cc_flag.append("arch=compute_80,code=sm_80") + if int(bare_metal_minor) >= 1: + cc_flag.append("-gencode") + cc_flag.append("arch=compute_86,code=sm_86") + if int(bare_metal_minor) >= 4: + cc_flag.append("-gencode") + cc_flag.append("arch=compute_87,code=sm_87") + if int(bare_metal_minor) >= 8: + cc_flag.append("-gencode") + cc_flag.append("arch=compute_89,code=sm_89") + if int(bare_metal_major) >= 12: + cc_flag.append("-gencode") + cc_flag.append("arch=compute_90,code=sm_90") + + # Build path + srcpath = pathlib.Path(__file__).parent.absolute() + buildpath = srcpath / "build" + _create_build_dir(buildpath) + + # Determine verbosity + verbose = True if neox_args is None else (neox_args.rank == 0) + + # Helper function to build the kernels. + def _cpp_extention_load_helper( + name, sources, extra_cuda_flags, extra_include_paths + ): + if torch.version.hip is not None: + extra_cuda_cflags = ["-O3"] + extra_cuda_flags + cc_flag + else: + extra_cuda_cflags = ( + ["-O3", "-gencode", "arch=compute_70,code=sm_70", "--use_fast_math"] + + extra_cuda_flags + + cc_flag + ) + + return cpp_extension.load( + name=name, + sources=sources, + build_directory=buildpath, + extra_cflags=[ + "-O3", + ], + extra_cuda_cflags=extra_cuda_cflags, + extra_include_paths=extra_include_paths, + verbose=verbose, + ) + + # ============== + # Fused softmax. + # ============== + + if torch.version.hip is not None: + extra_include_paths = [os.path.abspath(srcpath)] + else: + extra_include_paths = [] + + if torch.version.hip is not None: + extra_cuda_flags = [ + "-D__HIP_NO_HALF_OPERATORS__=1", + "-D__HIP_NO_HALF_CONVERSIONS__=1", + ] + else: + extra_cuda_flags = [ + "-U__CUDA_NO_HALF_OPERATORS__", + "-U__CUDA_NO_HALF_CONVERSIONS__", + "--expt-relaxed-constexpr", + "--expt-extended-lambda", + ] + + # Upper triangular softmax. + sources = [ + srcpath / "scaled_upper_triang_masked_softmax.cpp", + srcpath / "scaled_upper_triang_masked_softmax_cuda.cu", + ] + scaled_upper_triang_masked_softmax_cuda = _cpp_extention_load_helper( + "scaled_upper_triang_masked_softmax_cuda", + sources, + extra_cuda_flags, + extra_include_paths, + ) + # Masked softmax. + sources = [ + srcpath / "scaled_masked_softmax.cpp", + srcpath / "scaled_masked_softmax_cuda.cu", + ] + scaled_masked_softmax_cuda = _cpp_extention_load_helper( + "scaled_masked_softmax_cuda", sources, extra_cuda_flags, extra_include_paths + ) + # fused rope + sources = [ + srcpath / "fused_rotary_positional_embedding.cpp", + srcpath / "fused_rotary_positional_embedding_cuda.cu", + ] + fused_rotary_positional_embedding = _cpp_extention_load_helper( + "fused_rotary_positional_embedding", + sources, + extra_cuda_flags, + extra_include_paths, + ) + + +def _get_cuda_bare_metal_version(cuda_dir): + raw_output = subprocess.check_output( + [cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True + ) + output = raw_output.split() + release_idx = output.index("release") + 1 + release = output[release_idx].split(".") + bare_metal_major = release[0] + bare_metal_minor = release[1][0] + + return raw_output, bare_metal_major, bare_metal_minor + + +def _create_build_dir(buildpath): + try: + os.mkdir(buildpath) + except OSError: + if not os.path.isdir(buildpath): + print(f"Creation of the build directory {buildpath} failed") + + +def load_fused_kernels(): + try: + import scaled_upper_triang_masked_softmax_cuda + import scaled_masked_softmax_cuda + import fused_rotary_positional_embedding + except (ImportError, ModuleNotFoundError) as e: + print("\n") + print(e) + print("=" * 100) + print( + f"ERROR: Fused kernels configured but not properly installed. Please run `from megatron.fused_kernels import load()` then `load()` to load them correctly" + ) + print("=" * 100) + exit() + return diff --git a/megatron/fused_kernels/compat.h b/megatron/fused_kernels/compat.h new file mode 100644 index 0000000000000000000000000000000000000000..88867dc7eed60daa1f61a6a3ce303d2f4959f986 --- /dev/null +++ b/megatron/fused_kernels/compat.h @@ -0,0 +1,29 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*This code is copied from NVIDIA apex: + * https://github.com/NVIDIA/apex + * with minor changes. */ + +#ifndef TORCH_CHECK +#define TORCH_CHECK AT_CHECK +#endif + +#ifdef VERSION_GE_1_3 +#define DATA_PTR data_ptr +#else +#define DATA_PTR data +#endif diff --git a/megatron/fused_kernels/fused_rotary_positional_embedding.cpp b/megatron/fused_kernels/fused_rotary_positional_embedding.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e1a77de2beeea0fa5aec698cc43f2f1925b468fb --- /dev/null +++ b/megatron/fused_kernels/fused_rotary_positional_embedding.cpp @@ -0,0 +1,139 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +namespace fused_rope { + +torch::Tensor fwd_cuda(const torch::Tensor& input, + const torch::Tensor& freqs, + const bool transpose_output); + +torch::Tensor bwd_cuda(const torch::Tensor& output_grads, + const torch::Tensor& freqs, + const bool transpose_output); + +torch::Tensor fwd_cached_cuda(const torch::Tensor& input, + const torch::Tensor& cos, + const torch::Tensor& sin, + const bool transpose_output); + +torch::Tensor bwd_cached_cuda(const torch::Tensor& output_grads, + const torch::Tensor& cos, + const torch::Tensor& sin, + const bool transpose_output); + +torch::Tensor fwd(const at::Tensor& input, const at::Tensor& freqs, const bool transpose_output) +{ + TORCH_CHECK(input.dim() == 4, "expected 4D tensor"); + TORCH_CHECK(freqs.dim() == 4, "expected 4D tensor"); + TORCH_CHECK(input.size(0) == freqs.size(0), + "expected input and freqs tensor have the same sequence length"); + TORCH_CHECK(freqs.size(1) == 1 && freqs.size(2) == 1, + "expected the second and third dims of the freqs tensor equal 1"); + TORCH_CHECK(input.size(3) >= freqs.size(3), + "expected the last dim of the input tensor equals or is " + "greater than the freqs tensor"); + TORCH_CHECK(freqs.scalar_type() == at::ScalarType::Float, + "Dtype of the freqs tensor must be float"); + + return fwd_cuda(input, freqs, transpose_output); +} + +torch::Tensor bwd(const torch::Tensor& output_grads, + const at::Tensor& freqs, + const bool transpose_output) +{ + TORCH_CHECK(output_grads.dim() == 4, "expected 4D tensor"); + TORCH_CHECK(freqs.dim() == 4, "expected 4D tensor"); + TORCH_CHECK(output_grads.size(0) == freqs.size(0), + "expected output_grads and freqs tensor have the same sequence length"); + TORCH_CHECK(freqs.size(1) == 1 && freqs.size(2) == 1, + "expected the second and third dims of the freqs tensor equal 1"); + TORCH_CHECK(output_grads.size(3) >= freqs.size(3), + "expected the last dim of the output_grads tensor equals or is " + "greater than the freqs tensor"); + TORCH_CHECK(freqs.scalar_type() == at::ScalarType::Float, + "Dtype of the freqs tensor must be float"); + + return bwd_cuda(output_grads, freqs, transpose_output); +} + +torch::Tensor fwd_cached(const at::Tensor& input, + const at::Tensor& cos, + const at::Tensor& sin, + const bool transpose_output) +{ + TORCH_CHECK(input.dim() == 4, "expected 4D tensor"); + TORCH_CHECK(cos.dim() == 4, "expected 4D tensor"); + TORCH_CHECK(sin.dim() == 4, "expected 4D tensor"); + TORCH_CHECK(input.size(0) == cos.size(0), + "expected input and cos tensor have the same sequence length"); + TORCH_CHECK(input.size(0) == sin.size(0), + "expected input and sin tensor have the same sequence length"); + TORCH_CHECK(cos.size(1) == 1 && cos.size(2) == 1, + "expected the second and third dims of the cos tensor equal 1"); + TORCH_CHECK(sin.size(1) == 1 && sin.size(2) == 1, + "expected the second and third dims of the sin tensor equal 1"); + TORCH_CHECK(cos.size(3) == sin.size(3), "expected cos and sin tensor have the same last dim"); + TORCH_CHECK(input.size(3) >= cos.size(3), + "expected the last dim of the input tensor equals or is " + "greater than the cos tensor"); + TORCH_CHECK(cos.scalar_type() == sin.scalar_type(), + "expected cos and sin tensor have the same dtype"); + + return fwd_cached_cuda(input, cos, sin, transpose_output); +} + +torch::Tensor bwd_cached(const torch::Tensor& output_grads, + const at::Tensor& cos, + const at::Tensor& sin, + const bool transpose_output) +{ + TORCH_CHECK(output_grads.dim() == 4, "expected 4D tensor"); + TORCH_CHECK(cos.dim() == 4, "expected 4D tensor"); + TORCH_CHECK(sin.dim() == 4, "expected 4D tensor"); + TORCH_CHECK(output_grads.size(0) == cos.size(0), + "expected output_grads and cos tensor have the same sequence length"); + TORCH_CHECK(output_grads.size(0) == sin.size(0), + "expected output_grads and sin tensor have the same sequence length"); + TORCH_CHECK(cos.size(1) == 1 && cos.size(2) == 1, + "expected the second and third dims of the cos tensor equal 1"); + TORCH_CHECK(sin.size(1) == 1 && sin.size(2) == 1, + "expected the second and third dims of the sin tensor equal 1"); + TORCH_CHECK(cos.size(3) == sin.size(3), "expected cos and sin tensor have the same last dim"); + TORCH_CHECK(output_grads.size(3) >= cos.size(3), + "expected the last dim of the output_grads tensor equals or is " + "greater than the cos tensor"); + TORCH_CHECK(cos.scalar_type() == sin.scalar_type(), + "expected cos and sin tensor have the same dtype"); + + return bwd_cached_cuda(output_grads, cos, sin, transpose_output); +} + +} // end namespace fused_rope + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("forward", &fused_rope::fwd, "Fused Rotary Positional Embedding -- Forward."); + m.def("backward", &fused_rope::bwd, "Fused Rotary Positional Embedding -- Backward."); + m.def("forward_cached", + &fused_rope::fwd_cached, + "Fused Rotary Positional Embedding Cached -- Forward."); + m.def("backward_cached", + &fused_rope::bwd_cached, + "Fused Rotary Positional Embedding Cached -- Backward."); +} diff --git a/megatron/fused_kernels/fused_rotary_positional_embedding.h b/megatron/fused_kernels/fused_rotary_positional_embedding.h new file mode 100644 index 0000000000000000000000000000000000000000..aafd5104d2bcd1198e2e566ddd56e849f5a25137 --- /dev/null +++ b/megatron/fused_kernels/fused_rotary_positional_embedding.h @@ -0,0 +1,395 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include +#include +#include +#include + +namespace { + +template +__global__ void fused_rope_forward(const int h, + const int d, + const int d2, + const int stride_s, + const int stride_b, + const int stride_h, + const int stride_d, + const int o_stride_s, + const int o_stride_b, + const int o_stride_h, + const int o_stride_d, + const scalar_t* src, + const float* freqs, + scalar_t* dst) +{ + int s_id = blockIdx.x, b_id = blockIdx.y; + int offset_block = s_id * stride_s + b_id * stride_b; + int offset_block_dst = s_id * o_stride_s + b_id * o_stride_b; +#pragma unroll + for (int d_id = threadIdx.x; d_id < d2; d_id += blockDim.x) { + float v_cos, v_sin; + sincosf(freqs[s_id * d2 + d_id], &v_sin, &v_cos); +#pragma unroll + for (int h_id = threadIdx.y; h_id < h; h_id += blockDim.y) { + int offset_src = offset_block + h_id * stride_h + d_id * stride_d; + int offset_dst = offset_block_dst + h_id * o_stride_h + d_id * o_stride_d; + scalar_t v_src = src[offset_src]; + scalar_t v_src_rotate = (d_id + d2 / 2 < d2) + ? -src[offset_src + (d2 / 2) * stride_d] + : src[offset_src + (d2 / 2 - d2) * stride_d]; + dst[offset_dst] = v_src * (scalar_t)v_cos + v_src_rotate * (scalar_t)v_sin; + } + } + + // copy the rest + if (d > d2) { +#pragma unroll + for (int h_id = threadIdx.y; h_id < h; h_id += blockDim.y) { + int offset_head = offset_block + h_id * stride_h; + int offset_head_dst = offset_block_dst + h_id * o_stride_h; +#pragma unroll + for (int d_id = d2 + threadIdx.x; d_id < d; d_id += blockDim.x) { + dst[offset_head_dst + d_id * o_stride_d] = src[offset_head + d_id * stride_d]; + } + } + } +} + +template +__global__ void fused_rope_backward(const int h, + const int d, + const int d2, + const int stride_s, + const int stride_b, + const int stride_h, + const int stride_d, + const int o_stride_s, + const int o_stride_b, + const int o_stride_h, + const int o_stride_d, + const scalar_t* src, + const float* freqs, + scalar_t* dst) +{ + int s_id = blockIdx.x, b_id = blockIdx.y; + int offset_block = s_id * stride_s + b_id * stride_b; + int offset_block_dst = s_id * o_stride_s + b_id * o_stride_b; +#pragma unroll + for (int d_id = threadIdx.x; d_id < d2; d_id += blockDim.x) { + scalar_t v_cos = cosf(freqs[s_id * d2 + d_id]); + scalar_t v_sin = (d_id + d2 / 2 < d2) ? sinf(freqs[s_id * d2 + d_id + d2 / 2]) + : -sinf(freqs[s_id * d2 + d_id + d2 / 2 - d2]); +#pragma unroll + for (int h_id = threadIdx.y; h_id < h; h_id += blockDim.y) { + int offset_src = offset_block + h_id * stride_h + d_id * stride_d; + int offset_dst = offset_block_dst + h_id * o_stride_h + d_id * o_stride_d; + scalar_t v_src = src[offset_src]; + scalar_t v_src_rotate = (d_id + d2 / 2 < d2) + ? src[offset_src + (d2 / 2) * stride_d] + : src[offset_src + (d2 / 2 - d2) * stride_d]; + dst[offset_dst] = v_src * v_cos + v_src_rotate * v_sin; + } + } + + // handle the tail + if (d > d2) { +#pragma unroll + for (int h_id = threadIdx.y; h_id < h; h_id += blockDim.y) { + int offset_head = offset_block + h_id * stride_h; + int offset_head_dst = offset_block_dst + h_id * o_stride_h; +#pragma unroll + for (int d_id = d2 + threadIdx.x; d_id < d; d_id += blockDim.x) { + dst[offset_head_dst + d_id * o_stride_d] = src[offset_head + d_id * stride_d]; + } + } + } +} + +template +__global__ void fused_rope_cached_forward(const int h, + const int d, + const int d2, + const int stride_s, + const int stride_b, + const int stride_h, + const int stride_d, + const int o_stride_s, + const int o_stride_b, + const int o_stride_h, + const int o_stride_d, + const scalar_t_0* src, + const scalar_t_1* cos, + const scalar_t_1* sin, + scalar_t_0* dst) +{ + int s_id = blockIdx.x, b_id = blockIdx.y; + int offset_block = s_id * stride_s + b_id * stride_b; + int offset_block_dst = s_id * o_stride_s + b_id * o_stride_b; +#pragma unroll + for (int d_id = threadIdx.x; d_id < d2; d_id += blockDim.x) { + scalar_t_0 v_cos = cos[s_id * d2 + d_id]; + scalar_t_0 v_sin = sin[s_id * d2 + d_id]; +#pragma unroll + for (int h_id = threadIdx.y; h_id < h; h_id += blockDim.y) { + int offset_src = offset_block + h_id * stride_h + d_id * stride_d; + int offset_dst = offset_block_dst + h_id * o_stride_h + d_id * o_stride_d; + scalar_t_0 v_src = src[offset_src]; + scalar_t_0 v_src_rotate = (d_id + d2 / 2 < d2) + ? -src[offset_src + (d2 / 2) * stride_d] + : src[offset_src + (d2 / 2 - d2) * stride_d]; + dst[offset_dst] = v_src * v_cos + v_src_rotate * v_sin; + } + } + + // copy the rest + if (d > d2) { +#pragma unroll + for (int h_id = threadIdx.y; h_id < h; h_id += blockDim.y) { + int offset_head = offset_block + h_id * stride_h; + int offset_head_dst = offset_block_dst + h_id * o_stride_h; +#pragma unroll + for (int d_id = d2 + threadIdx.x; d_id < d; d_id += blockDim.x) { + dst[offset_head_dst + d_id * o_stride_d] = src[offset_head + d_id * stride_d]; + } + } + } +} + +template +__global__ void fused_rope_cached_backward(const int h, + const int d, + const int d2, + const int stride_s, + const int stride_b, + const int stride_h, + const int stride_d, + const int o_stride_s, + const int o_stride_b, + const int o_stride_h, + const int o_stride_d, + const scalar_t_0* src, + const scalar_t_1* cos, + const scalar_t_1* sin, + scalar_t_0* dst) +{ + int s_id = blockIdx.x, b_id = blockIdx.y; + int offset_block = s_id * stride_s + b_id * stride_b; + int offset_block_dst = s_id * o_stride_s + b_id * o_stride_b; +#pragma unroll + for (int d_id = threadIdx.x; d_id < d2; d_id += blockDim.x) { + scalar_t_0 v_cos = cos[s_id * d2 + d_id]; + scalar_t_0 v_sin = (d_id + d2 / 2 < d2) ? sin[s_id * d2 + d_id + d2 / 2] + : -sin[s_id * d2 + d_id + d2 / 2 - d2]; +#pragma unroll + for (int h_id = threadIdx.y; h_id < h; h_id += blockDim.y) { + int offset_src = offset_block + h_id * stride_h + d_id * stride_d; + int offset_dst = offset_block_dst + h_id * o_stride_h + d_id * o_stride_d; + scalar_t_0 v_src = src[offset_src]; + scalar_t_0 v_src_rotate = (d_id + d2 / 2 < d2) + ? src[offset_src + (d2 / 2) * stride_d] + : src[offset_src + (d2 / 2 - d2) * stride_d]; + dst[offset_dst] = v_src * v_cos + v_src_rotate * v_sin; + } + } + + // handle the tail + if (d > d2) { +#pragma unroll + for (int h_id = threadIdx.y; h_id < h; h_id += blockDim.y) { + int offset_head = offset_block + h_id * stride_h; + int offset_head_dst = offset_block_dst + h_id * o_stride_h; +#pragma unroll + for (int d_id = d2 + threadIdx.x; d_id < d; d_id += blockDim.x) { + dst[offset_head_dst + d_id * o_stride_d] = src[offset_head + d_id * stride_d]; + } + } + } +} + +} // end of anonymous namespace + +template +void dispatch_fused_rope_forward(const int s, + const int b, + const int h, + const int d, + const int d2, + const int stride_s, + const int stride_b, + const int stride_h, + const int stride_d, + const int o_stride_s, + const int o_stride_b, + const int o_stride_h, + const int o_stride_d, + const scalar_t* input, + const float* freqs, + scalar_t* output) +{ + auto stream = at::cuda::getCurrentCUDAStream(); + + int warps_per_block = h < 16 ? 4 : 8; + dim3 blocks(s, b); + dim3 threads(C10_WARP_SIZE, warps_per_block); + + fused_rope_forward<<>>(h, + d, + d2, + stride_s, + stride_b, + stride_h, + stride_d, + o_stride_s, + o_stride_b, + o_stride_h, + o_stride_d, + input, + freqs, + output); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +void dispatch_fused_rope_backward(const int s, + const int b, + const int h, + const int d, + const int d2, + const int stride_s, + const int stride_b, + const int stride_h, + const int stride_d, + const int o_stride_s, + const int o_stride_b, + const int o_stride_h, + const int o_stride_d, + const scalar_t* output_grads, + const float* freqs, + scalar_t* input_grads) +{ + auto stream = at::cuda::getCurrentCUDAStream(); + + int warps_per_block = h < 16 ? 4 : 8; + dim3 blocks(s, b); + dim3 threads(C10_WARP_SIZE, warps_per_block); + + fused_rope_backward<<>>(h, + d, + d2, + stride_s, + stride_b, + stride_h, + stride_d, + o_stride_s, + o_stride_b, + o_stride_h, + o_stride_d, + output_grads, + freqs, + input_grads); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +void dispatch_fused_rope_cached_forward(const int s, + const int b, + const int h, + const int d, + const int d2, + const int stride_s, + const int stride_b, + const int stride_h, + const int stride_d, + const int o_stride_s, + const int o_stride_b, + const int o_stride_h, + const int o_stride_d, + const scalar_t_0* input, + const scalar_t_1* cos, + const scalar_t_1* sin, + scalar_t_0* output) +{ + auto stream = at::cuda::getCurrentCUDAStream(); + + int warps_per_block = h < 16 ? 4 : 8; + dim3 blocks(s, b); + dim3 threads(C10_WARP_SIZE, warps_per_block); + + fused_rope_cached_forward<<>>(h, + d, + d2, + stride_s, + stride_b, + stride_h, + stride_d, + o_stride_s, + o_stride_b, + o_stride_h, + o_stride_d, + input, + cos, + sin, + output); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +void dispatch_fused_rope_cached_backward(const int s, + const int b, + const int h, + const int d, + const int d2, + const int stride_s, + const int stride_b, + const int stride_h, + const int stride_d, + const int o_stride_s, + const int o_stride_b, + const int o_stride_h, + const int o_stride_d, + const scalar_t_0* output_grads, + const scalar_t_1* cos, + const scalar_t_1* sin, + scalar_t_0* input_grads) +{ + auto stream = at::cuda::getCurrentCUDAStream(); + + int warps_per_block = h < 16 ? 4 : 8; + dim3 blocks(s, b); + dim3 threads(C10_WARP_SIZE, warps_per_block); + + fused_rope_cached_backward<<>>(h, + d, + d2, + stride_s, + stride_b, + stride_h, + stride_d, + o_stride_s, + o_stride_b, + o_stride_h, + o_stride_d, + output_grads, + cos, + sin, + input_grads); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} diff --git a/megatron/fused_kernels/fused_rotary_positional_embedding_cuda.cu b/megatron/fused_kernels/fused_rotary_positional_embedding_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..6b54662bcb0cba1f5b1a2695df9015a9c32a334b --- /dev/null +++ b/megatron/fused_kernels/fused_rotary_positional_embedding_cuda.cu @@ -0,0 +1,336 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "fused_rotary_positional_embedding.h" +#include "type_shim.h" + +namespace fused_rope { + +torch::Tensor fwd_cuda(const torch::Tensor& input, + const torch::Tensor& freqs, + const bool transpose_output) +{ + // input sizes: (s, b, h, d) + // s: sequence length + // b: batch size + // h: head num + // d: dim of each head + const int s = input.size(0); + const int b = input.size(1); + const int h = input.size(2); + const int d = input.size(3); + // input strides + const int stride_s = input.stride(0); + const int stride_b = input.stride(1); + const int stride_h = input.stride(2); + const int stride_d = input.stride(3); + // freqs' shape is always (s, 1, 1, d2), so the strides are same under + // different memory formats + const int d2 = freqs.size(3); + + // output + auto act_options = input.options().requires_grad(false); + torch::Tensor output; + if (transpose_output) { + output = torch::empty({b, s, h, d}, act_options).transpose(0, 1); + } else { + output = torch::empty({s, b, h, d}, act_options); + } + // output strides + const int o_stride_s = output.stride(0); + const int o_stride_b = output.stride(1); + const int o_stride_h = output.stride(2); + const int o_stride_d = output.stride(3); + + DISPATCH_FLOAT_HALF_AND_BFLOAT(input.scalar_type(), + 0, + "dispatch_fused_rope_forward", + dispatch_fused_rope_forward(s, + b, + h, + d, + d2, + stride_s, + stride_b, + stride_h, + stride_d, + o_stride_s, + o_stride_b, + o_stride_h, + o_stride_d, + input.data_ptr(), + freqs.data_ptr(), + output.data_ptr());); + return output; +} + +torch::Tensor bwd_cuda(const torch::Tensor& output_grads, + const torch::Tensor& freqs, + const bool transpose_output) +{ + // output_grads sizes: (s, b, h, d) + // s: sequence length + // b: batch size + // h: head num + // d: dim of each head + const int s = output_grads.size(0); + const int b = output_grads.size(1); + const int h = output_grads.size(2); + const int d = output_grads.size(3); + // output_grads strides + const int stride_s = output_grads.stride(0); + const int stride_b = output_grads.stride(1); + const int stride_h = output_grads.stride(2); + const int stride_d = output_grads.stride(3); + // freqs' shape is always (s, 1, 1, d2), so the strides are same under + // different memory formats + const int d2 = freqs.size(3); + + auto act_options = output_grads.options().requires_grad(false); + torch::Tensor input_grads; + if (transpose_output) { + input_grads = torch::empty({b, s, h, d}, act_options).transpose(0, 1); + } else { + input_grads = torch::empty({s, b, h, d}, act_options); + } + const int o_stride_s = input_grads.stride(0); + const int o_stride_b = input_grads.stride(1); + const int o_stride_h = input_grads.stride(2); + const int o_stride_d = input_grads.stride(3); + + DISPATCH_FLOAT_HALF_AND_BFLOAT( + output_grads.scalar_type(), + 0, + "dispatch_fused_rope_backward", + dispatch_fused_rope_backward(s, + b, + h, + d, + d2, + stride_s, + stride_b, + stride_h, + stride_d, + o_stride_s, + o_stride_b, + o_stride_h, + o_stride_d, + output_grads.data_ptr(), + freqs.data_ptr(), + input_grads.data_ptr());); + return input_grads; +} + +#define DISPATCH_FUSED_ROPE_TYPES(TYPE1, TYPE2, NAME, ...) \ + switch (TYPE1) { \ + case at::ScalarType::Float: { \ + using scalar_t_0 = float; \ + switch (TYPE2) { \ + case at::ScalarType::Float: { \ + using scalar_t_1 = float; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + TORCH_CHECK(false, \ + #NAME, \ + " not supported for '", \ + toString(TYPE1), \ + "' with '", \ + toString(TYPE2), \ + "'"); \ + } \ + break; \ + } \ + case at::ScalarType::Half: { \ + using scalar_t_0 = at::Half; \ + switch (TYPE2) { \ + case at::ScalarType::Float: { \ + using scalar_t_1 = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: { \ + using scalar_t_1 = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + TORCH_CHECK(false, \ + #NAME, \ + " not supported for '", \ + toString(TYPE1), \ + "' with '", \ + toString(TYPE2), \ + "'"); \ + } \ + break; \ + } \ + case at::ScalarType::BFloat16: { \ + using scalar_t_0 = at::BFloat16; \ + switch (TYPE2) { \ + case at::ScalarType::Float: { \ + using scalar_t_1 = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: { \ + using scalar_t_1 = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + TORCH_CHECK(false, \ + #NAME, \ + " not supported for '", \ + toString(TYPE1), \ + "' with '", \ + toString(TYPE2), \ + "'"); \ + } \ + break; \ + } \ + default: \ + TORCH_CHECK(false, \ + #NAME, \ + " not supported for '", \ + toString(TYPE1), \ + "' with '", \ + toString(TYPE2), \ + "'"); \ + } + +torch::Tensor fwd_cached_cuda(const torch::Tensor& input, + const torch::Tensor& cos, + const torch::Tensor& sin, + const bool transpose_output) +{ + // input sizes: (s, b, h, d) + // s: sequence length + // b: batch size + // h: head num + // d: dim of each head + const int s = input.size(0); + const int b = input.size(1); + const int h = input.size(2); + const int d = input.size(3); + // input strides + const int stride_s = input.stride(0); + const int stride_b = input.stride(1); + const int stride_h = input.stride(2); + const int stride_d = input.stride(3); + // cos/sin's shape is always (s, 1, 1, d2), so the strides are same under + // different memory formats + const int d2 = cos.size(3); + + // output + auto act_options = input.options().requires_grad(false); + torch::Tensor output; + if (transpose_output) { + output = torch::empty({b, s, h, d}, act_options).transpose(0, 1); + } else { + output = torch::empty({s, b, h, d}, act_options); + } + // output strides + const int o_stride_s = output.stride(0); + const int o_stride_b = output.stride(1); + const int o_stride_h = output.stride(2); + const int o_stride_d = output.stride(3); + + DISPATCH_FUSED_ROPE_TYPES(input.scalar_type(), + cos.scalar_type(), + "dispatch_fused_rope_cached_forward", + dispatch_fused_rope_cached_forward(s, + b, + h, + d, + d2, + stride_s, + stride_b, + stride_h, + stride_d, + o_stride_s, + o_stride_b, + o_stride_h, + o_stride_d, + input.data_ptr(), + cos.data_ptr(), + sin.data_ptr(), + output.data_ptr());); + return output; +} + +torch::Tensor bwd_cached_cuda(const torch::Tensor& output_grads, + const torch::Tensor& cos, + const torch::Tensor& sin, + const bool transpose_output) +{ + // output_grads sizes: (s, b, h, d) + // s: sequence length + // b: batch size + // h: head num + // d: dim of each head + const int s = output_grads.size(0); + const int b = output_grads.size(1); + const int h = output_grads.size(2); + const int d = output_grads.size(3); + // output_grads strides + const int stride_s = output_grads.stride(0); + const int stride_b = output_grads.stride(1); + const int stride_h = output_grads.stride(2); + const int stride_d = output_grads.stride(3); + // cos/sin's shape is always (s, 1, 1, d2), so the strides are same under + // different memory formats + const int d2 = cos.size(3); + + auto act_options = output_grads.options().requires_grad(false); + torch::Tensor input_grads; + if (transpose_output) { + input_grads = torch::empty({b, s, h, d}, act_options).transpose(0, 1); + } else { + input_grads = torch::empty({s, b, h, d}, act_options); + } + const int o_stride_s = input_grads.stride(0); + const int o_stride_b = input_grads.stride(1); + const int o_stride_h = input_grads.stride(2); + const int o_stride_d = input_grads.stride(3); + + DISPATCH_FUSED_ROPE_TYPES( + output_grads.scalar_type(), + cos.scalar_type(), + "dispatch_fused_rope_cached_backward", + dispatch_fused_rope_cached_backward(s, + b, + h, + d, + d2, + stride_s, + stride_b, + stride_h, + stride_d, + o_stride_s, + o_stride_b, + o_stride_h, + o_stride_d, + output_grads.data_ptr(), + cos.data_ptr(), + sin.data_ptr(), + input_grads.data_ptr());); + return input_grads; +} +} // end namespace fused_rope diff --git a/megatron/fused_kernels/scaled_masked_softmax.cpp b/megatron/fused_kernels/scaled_masked_softmax.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6a210cc373bc9bbd839334e6e8bd90f7a1dbee0f --- /dev/null +++ b/megatron/fused_kernels/scaled_masked_softmax.cpp @@ -0,0 +1,83 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +namespace multihead_attn { +namespace fused_softmax { +namespace scaled_masked_softmax { + +torch::Tensor fwd_cuda(torch::Tensor const& input, torch::Tensor const& mask, float scale_factor); + +torch::Tensor bwd_cuda(torch::Tensor const& output_grads, + torch::Tensor const& softmax_results, + float scale_factor); + +int get_batch_per_block_cuda(int query_seq_len, int key_seq_len, int batches, int attn_heads); + +torch::Tensor fwd(torch::Tensor const& input, torch::Tensor const& mask, float scale_factor) +{ + AT_ASSERTM(input.dim() == 4, "expected 4D tensor"); + AT_ASSERTM((input.scalar_type() == at::ScalarType::Half) || + (input.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + AT_ASSERTM(mask.dim() == 4, "expected 4D tensor"); + + return fwd_cuda(input, mask, scale_factor); +} + +torch::Tensor bwd(torch::Tensor const& output_grads, + torch::Tensor const& softmax_results, + float scale_factor) +{ + AT_ASSERTM(output_grads.dim() == 4, "expected 3D tensor"); + AT_ASSERTM(softmax_results.dim() == 4, "expected 3D tensor"); + + AT_ASSERTM((output_grads.scalar_type() == at::ScalarType::Half) || + (output_grads.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + AT_ASSERTM((softmax_results.scalar_type() == at::ScalarType::Half) || + (softmax_results.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + + return bwd_cuda(output_grads, softmax_results, scale_factor); +} + +int get_batch_per_block(int query_seq_len, int key_seq_len, int batches, int attn_heads) +{ + return get_batch_per_block_cuda(query_seq_len, key_seq_len, batches, attn_heads); +} + +} // end namespace scaled_masked_softmax +} // end namespace fused_softmax +} // end namespace multihead_attn + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("forward", + &multihead_attn::fused_softmax::scaled_masked_softmax::fwd, + "Self Multihead Attention scaled, time masked softmax -- Forward."); + + m.def("backward", + &multihead_attn::fused_softmax::scaled_masked_softmax::bwd, + "Self Multihead Attention scaled, time masked softmax -- Backward."); + + m.def("get_batch_per_block", + &multihead_attn::fused_softmax::scaled_masked_softmax::get_batch_per_block, + "Return Batch per block size."); +} diff --git a/megatron/fused_kernels/scaled_masked_softmax.h b/megatron/fused_kernels/scaled_masked_softmax.h new file mode 100644 index 0000000000000000000000000000000000000000..a594a13c1c692f2eb6b30974c28e53e746abe9f7 --- /dev/null +++ b/megatron/fused_kernels/scaled_masked_softmax.h @@ -0,0 +1,550 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace { + +template +__device__ __inline__ void copy_vector(Datatype* dst, const Datatype* src); + +template <> +__device__ __inline__ void copy_vector(c10::BFloat16* dst, + const c10::BFloat16* src) +{ + *dst = *src; +} + +template <> +__device__ __inline__ void copy_vector(c10::BFloat16* dst, + const c10::BFloat16* src) +{ + *((float2*)dst) = *((float2*)src); +} + +template <> +__device__ __inline__ void copy_vector(c10::Half* dst, const c10::Half* src) +{ + *dst = *src; +} + +template <> +__device__ __inline__ void copy_vector(c10::Half* dst, const c10::Half* src) +{ + *((float2*)dst) = *((float2*)src); +} + +template <> +__device__ __inline__ void copy_vector(uint8_t* dst, const uint8_t* src) +{ + *dst = *src; +} + +template <> +__device__ __inline__ void copy_vector(uint8_t* dst, const uint8_t* src) +{ + *((half2*)dst) = *((half2*)src); +} + +int log2_ceil(int value) +{ + int log2_value = 0; + while ((1 << log2_value) < value) ++log2_value; + return log2_value; +} + +template +struct Add { + __device__ __forceinline__ T operator()(T a, T b) const { return a + b; } +}; + +template +struct Max { + __device__ __forceinline__ T operator()(T a, T b) const { return a < b ? b : a; } +}; + +template +__device__ __forceinline__ T +WARP_SHFL_XOR_NATIVE(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if CUDA_VERSION >= 9000 + return __shfl_xor_sync(mask, value, laneMask, width); +#else + return __shfl_xor(value, laneMask, width); +#endif +} + +template class ReduceOp> +__device__ __forceinline__ void warp_reduce(acc_t* sum) +{ + ReduceOp r; +#pragma unroll + for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) { +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + acc_t b = WARP_SHFL_XOR_NATIVE(sum[i], offset, WARP_SIZE); + sum[i] = r(sum[i], b); + } + } +} + +/* + * Extended softmax (from native aten pytorch) with following additional features + * 1) input scaling + * 2) Explicit masking + */ +template +__global__ void scaled_masked_softmax_warp_forward(output_t* dst, + const input_t* src, + const uint8_t* mask, + const acc_t scale, + int micro_batch_size, + int element_count, + int pad_batches) +{ + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + // warp_size of method warp_softmax_forward_kernel. + constexpr int next_power_of_two = 1 << log2_elements; + constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two + : C10_WARP_SIZE; + constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; + constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; + constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4; + + // blockDim/threadIdx = (WARP_SIZE, WARPS_PER_BLOCK, ) + // gridDim/blockIdx = (seq_len, attn_heads, batches) + int first_batch = + (blockDim.y * (blockIdx.x + gridDim.x * (blockIdx.y + gridDim.y * blockIdx.z)) + + threadIdx.y) * + WARP_BATCH; + int pad_first_batch = 0; + if (pad_batches != 1) { // bert style + pad_first_batch = + (blockDim.y * (blockIdx.x + gridDim.x * blockIdx.z) + threadIdx.y) * WARP_BATCH; + } else { // gpt2 style + pad_first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH; + } + + // micro_batch_size might not be a multiple of WARP_BATCH. Check how + // many batches have to computed within this WARP. + int local_batches = micro_batch_size - first_batch; + if (local_batches > WARP_BATCH) local_batches = WARP_BATCH; + + // there might be multiple batches per warp. compute the index within the batch + int local_idx = threadIdx.x; + + src += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; + dst += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; + mask += pad_first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; + + // load data from global memory + acc_t elements[WARP_BATCH][WARP_ITERATIONS]; + input_t temp_data[ELEMENTS_PER_LDG_STG]; + uint8_t temp_mask[ELEMENTS_PER_LDG_STG]; +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + int batch_element_count = (i >= local_batches) ? 0 : element_count; + +#pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it += ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + + if (element_index < batch_element_count) { + int itr_idx = i * element_count + it * WARP_SIZE; + copy_vector(temp_data, src + itr_idx); + copy_vector(temp_mask, mask + itr_idx); + +#pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + if (temp_mask[element] != 1) { + elements[i][it + element] = (acc_t)temp_data[element] * scale; + } else { + elements[i][it + element] = -10000.0; + } + } + } else { +#pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + elements[i][it + element] = -std::numeric_limits::infinity(); + } + } + } + } + + // compute max_value + acc_t max_value[WARP_BATCH]; +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + max_value[i] = elements[i][0]; +#pragma unroll + for (int it = 1; it < WARP_ITERATIONS; ++it) { + max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it]; + } + } + warp_reduce(max_value); + + acc_t sum[WARP_BATCH]{0.0f}; +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { +#pragma unroll + for (int it = 0; it < WARP_ITERATIONS; ++it) { + elements[i][it] = std::exp((elements[i][it] - max_value[i])); + sum[i] += elements[i][it]; + } + } + warp_reduce(sum); + + // store result + output_t out[ELEMENTS_PER_LDG_STG]; +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + if (i >= local_batches) break; +#pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it += ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + if (element_index < element_count) { +#pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + out[element] = elements[i][it + element] / sum[i]; + } + copy_vector( + dst + i * element_count + it * WARP_SIZE, out); + } else { + break; + } + } + } +} + +template +__global__ void scaled_masked_softmax_warp_backward(output_t* gradInput, + input_t* grad, + const input_t* output, + acc_t scale, + int micro_batch_size, + int element_count) +{ + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + // warp_size of method warp_softmax_backward_kernel. + constexpr int next_power_of_two = 1 << log2_elements; + constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two + : C10_WARP_SIZE; + constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; + constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; + constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4; + + // blockDim/threadIdx = (WARP_SIZE, WARPS_PER_BLOCK, ) + // gridDim/blockIdx = (seq_len, attn_heads, batches) + int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH; + + // micro_batch_size might not be a multiple of WARP_BATCH. Check how + // many batches have to computed within this WARP. + int local_batches = micro_batch_size - first_batch; + if (local_batches > WARP_BATCH) local_batches = WARP_BATCH; + + // there might be multiple batches per warp. compute the index within the batch + int local_idx = threadIdx.x; + + // the first element to process by the current thread + int thread_offset = first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; + grad += thread_offset; + output += thread_offset; + gradInput += thread_offset; + + // load data from global memory + acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS]{0.0f}; + acc_t output_reg[WARP_BATCH][WARP_ITERATIONS]{0.0f}; + input_t temp_grad[ELEMENTS_PER_LDG_STG]; + input_t temp_output[ELEMENTS_PER_LDG_STG]; +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + int batch_element_count = (i >= local_batches) ? 0 : element_count; + +#pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it += ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + if (element_index < batch_element_count) { + copy_vector( + temp_grad, grad + i * element_count + it * WARP_SIZE); + copy_vector( + temp_output, output + i * element_count + it * WARP_SIZE); + +#pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + output_reg[i][it + element] = (acc_t)temp_output[element]; + } +#pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + grad_reg[i][it + element] = + (acc_t)temp_grad[element] * output_reg[i][it + element]; + } + } + } + } + + acc_t sum[WARP_BATCH]; +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + sum[i] = grad_reg[i][0]; +#pragma unroll + for (int it = 1; it < WARP_ITERATIONS; ++it) { sum[i] += grad_reg[i][it]; } + } + warp_reduce(sum); + +// store result +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + if (i >= local_batches) break; +#pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it += ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + if (element_index < element_count) { + // compute gradients + output_t out[ELEMENTS_PER_LDG_STG]; +#pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + out[element] = (output_t)(scale * (grad_reg[i][it + element] - + output_reg[i][it + element] * sum[i])); + } + copy_vector( + gradInput + i * element_count + it * WARP_SIZE, out); + } + } + } +} +} // end of anonymous namespace + +int get_batch_per_block(int query_seq_len, int key_seq_len, int batches, int attn_heads) +{ + int log2_elements = log2_ceil(key_seq_len); + const int next_power_of_two = 1 << log2_elements; + + int batch_count = batches * attn_heads * query_seq_len; + int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + constexpr int threads_per_block = 128; + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + + return batches_per_block; +} + +template +void dispatch_scaled_masked_softmax_forward(output_t* dst, + const input_t* src, + const uint8_t* mask, + const input_t scale, + int query_seq_len, + int key_seq_len, + int batches, + int attn_heads, + int pad_batches) +{ + if (key_seq_len == 0) { + return; + } else { + int log2_elements = log2_ceil(key_seq_len); + const int next_power_of_two = 1 << log2_elements; + int batch_count = batches * attn_heads * query_seq_len; + + // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward. + int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + + // This value must match the WARP_BATCH constexpr value computed inside + // softmax_warp_forward. + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + // use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + dim3 blocks(query_seq_len / batches_per_block, attn_heads, batches); + dim3 threads(warp_size, warps_per_block, 1); + // Launch code would be more elegant if C++ supported FOR CONSTEXPR + switch (log2_elements) { + case 0: // 1 + scaled_masked_softmax_warp_forward + <<>>( + dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 1: // 2 + scaled_masked_softmax_warp_forward + <<>>( + dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 2: // 4 + scaled_masked_softmax_warp_forward + <<>>( + dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 3: // 8 + scaled_masked_softmax_warp_forward + <<>>( + dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 4: // 16 + scaled_masked_softmax_warp_forward + <<>>( + dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 5: // 32 + scaled_masked_softmax_warp_forward + <<>>( + dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 6: // 64 + scaled_masked_softmax_warp_forward + <<>>( + dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 7: // 128 + scaled_masked_softmax_warp_forward + <<>>( + dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 8: // 256 + scaled_masked_softmax_warp_forward + <<>>( + dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 9: // 512 + scaled_masked_softmax_warp_forward + <<>>( + dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 10: // 1024 + scaled_masked_softmax_warp_forward + <<>>( + dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 11: // 2048 + scaled_masked_softmax_warp_forward + <<>>( + dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + default: break; + } + } +} + +template +void dispatch_scaled_masked_softmax_backward(output_t* grad_input, + input_t* grad, + const input_t* output, + const acc_t scale, + int query_seq_len, + int key_seq_len, + int batches, + int attn_heads) +{ + if (key_seq_len == 0) { + return; + } else { + int log2_elements = log2_ceil(key_seq_len); + const int next_power_of_two = 1 << log2_elements; + int batch_count = batches * attn_heads * query_seq_len; + + // This value must match the WARP_SIZE constexpr value computed inside + // softmax_warp_backward. + int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + + // This value must match the WARP_BATCH constexpr value computed inside + // softmax_warp_backward. + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + // use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + int blocks = batch_count / batches_per_block; + dim3 threads(warp_size, warps_per_block, 1); + // Launch code would be more elegant if C++ supported FOR CONSTEXPR + switch (log2_elements) { + case 0: // 1 + scaled_masked_softmax_warp_backward + <<>>( + grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 1: // 2 + scaled_masked_softmax_warp_backward + <<>>( + grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 2: // 4 + scaled_masked_softmax_warp_backward + <<>>( + grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 3: // 8 + scaled_masked_softmax_warp_backward + <<>>( + grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 4: // 16 + scaled_masked_softmax_warp_backward + <<>>( + grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 5: // 32 + scaled_masked_softmax_warp_backward + <<>>( + grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 6: // 64 + scaled_masked_softmax_warp_backward + <<>>( + grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 7: // 128 + scaled_masked_softmax_warp_backward + <<>>( + grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 8: // 256 + scaled_masked_softmax_warp_backward + <<>>( + grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 9: // 512 + scaled_masked_softmax_warp_backward + <<>>( + grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 10: // 1024 + scaled_masked_softmax_warp_backward + <<>>( + grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 11: // 2048 + scaled_masked_softmax_warp_backward + <<>>( + grad_input, grad, output, scale, batch_count, key_seq_len); + break; + default: break; + } + } +} diff --git a/megatron/fused_kernels/scaled_masked_softmax_cuda.cu b/megatron/fused_kernels/scaled_masked_softmax_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..7479713ec406774ec3b26b3e396a5f41add9f9f3 --- /dev/null +++ b/megatron/fused_kernels/scaled_masked_softmax_cuda.cu @@ -0,0 +1,111 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#ifndef __HIP_PLATFORM_HCC__ +#include +#endif +#include +#include +#include "scaled_masked_softmax.h" +#include "type_shim.h" + +namespace multihead_attn { +namespace fused_softmax { +namespace scaled_masked_softmax { + +int get_batch_per_block_cuda(int query_seq_len, int key_seq_len, int batches, int attn_heads) +{ + return get_batch_per_block(query_seq_len, key_seq_len, batches, attn_heads); +} + +torch::Tensor fwd_cuda(torch::Tensor const& input, torch::Tensor const& mask, float scale_factor) +{ + // input is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len] + const int batches = input.size(0); + const int pad_batches = mask.size(0); + const int attn_heads = input.size(1); + const int query_seq_len = input.size(2); + const int key_seq_len = input.size(3); + TORCH_INTERNAL_ASSERT(key_seq_len <= 2048); + TORCH_INTERNAL_ASSERT(query_seq_len > 1); + TORCH_INTERNAL_ASSERT(pad_batches == 1 || pad_batches == batches); + TORCH_INTERNAL_ASSERT(mask.size(1) == 1); + TORCH_INTERNAL_ASSERT(mask.size(2) == query_seq_len); + TORCH_INTERNAL_ASSERT(mask.size(3) == key_seq_len); + + // Output + auto act_options = input.options().requires_grad(false); + torch::Tensor softmax_results = + torch::empty({batches, attn_heads, query_seq_len, key_seq_len}, act_options); + + // Softmax Intermediate Result Ptr + void* input_ptr = static_cast(input.data_ptr()); + void* mask_ptr = static_cast(mask.data_ptr()); + void* softmax_results_ptr = static_cast(softmax_results.data_ptr()); + + DISPATCH_HALF_AND_BFLOAT(input.scalar_type(), + "dispatch_scaled_masked_softmax_forward", + dispatch_scaled_masked_softmax_forward( + reinterpret_cast(softmax_results_ptr), + reinterpret_cast(input_ptr), + reinterpret_cast(mask_ptr), + scale_factor, + query_seq_len, + key_seq_len, + batches, + attn_heads, + pad_batches);); + return softmax_results; +} + +torch::Tensor bwd_cuda(torch::Tensor const& output_grads_, + torch::Tensor const& softmax_results_, + float scale_factor) +{ + auto output_grads = output_grads_.contiguous(); + auto softmax_results = softmax_results_.contiguous(); + + // output grads is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len] + const int batches = output_grads.size(0); + const int attn_heads = output_grads.size(1); + const int query_seq_len = output_grads.size(2); + const int key_seq_len = output_grads.size(3); + + void* output_grads_ptr = static_cast(output_grads.data_ptr()); + + // Softmax Grad + DISPATCH_HALF_AND_BFLOAT(output_grads_.scalar_type(), + "dispatch_scaled_masked_softmax_backward", + dispatch_scaled_masked_softmax_backward( + reinterpret_cast(output_grads_ptr), + reinterpret_cast(output_grads_ptr), + reinterpret_cast(softmax_results.data_ptr()), + scale_factor, + query_seq_len, + key_seq_len, + batches, + attn_heads);); + + // backward pass is completely in-place + return output_grads; +} +} // namespace scaled_masked_softmax +} // namespace fused_softmax +} // namespace multihead_attn diff --git a/megatron/fused_kernels/scaled_upper_triang_masked_softmax.cpp b/megatron/fused_kernels/scaled_upper_triang_masked_softmax.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cedd649a2ce08ce1cd0e362f8c6b72473cf3fa0d --- /dev/null +++ b/megatron/fused_kernels/scaled_upper_triang_masked_softmax.cpp @@ -0,0 +1,70 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +namespace multihead_attn { +namespace fused_softmax { +namespace scaled_upper_triang_masked_softmax { + +torch::Tensor fwd_cuda(torch::Tensor const& input, float scale_factor); + +torch::Tensor bwd_cuda(torch::Tensor const& output_grads, + torch::Tensor const& softmax_results, + float scale_factor); + +torch::Tensor fwd(torch::Tensor const& input, float scale_factor) +{ + AT_ASSERTM(input.dim() == 3, "expected 3D tensor"); + AT_ASSERTM((input.scalar_type() == at::ScalarType::Half) || + (input.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + + return fwd_cuda(input, scale_factor); +} + +torch::Tensor bwd(torch::Tensor const& output_grads, + torch::Tensor const& softmax_results, + float scale_factor) +{ + AT_ASSERTM(output_grads.dim() == 3, "expected 3D tensor"); + AT_ASSERTM(softmax_results.dim() == 3, "expected 3D tensor"); + + AT_ASSERTM((output_grads.scalar_type() == at::ScalarType::Half) || + (output_grads.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + AT_ASSERTM((softmax_results.scalar_type() == at::ScalarType::Half) || + (softmax_results.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + + return bwd_cuda(output_grads, softmax_results, scale_factor); +} + +} // end namespace scaled_upper_triang_masked_softmax +} // end namespace fused_softmax +} // end namespace multihead_attn + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("forward", + &multihead_attn::fused_softmax::scaled_upper_triang_masked_softmax::fwd, + "Self Multihead Attention scaled, time masked softmax -- Forward."); + m.def("backward", + &multihead_attn::fused_softmax::scaled_upper_triang_masked_softmax::bwd, + "Self Multihead Attention scaled, time masked softmax -- Backward."); +} diff --git a/megatron/fused_kernels/scaled_upper_triang_masked_softmax.h b/megatron/fused_kernels/scaled_upper_triang_masked_softmax.h new file mode 100644 index 0000000000000000000000000000000000000000..b48afeee4c8208b4a9b598e71b73be11db3153b2 --- /dev/null +++ b/megatron/fused_kernels/scaled_upper_triang_masked_softmax.h @@ -0,0 +1,633 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace { + +template +__device__ __inline__ void copy_vector(Datatype* dst, const Datatype* src); + +template <> +__device__ __inline__ void copy_vector(c10::BFloat16* dst, + const c10::BFloat16* src) +{ + *dst = *src; +} + +template <> +__device__ __inline__ void copy_vector(c10::BFloat16* dst, + const c10::BFloat16* src) +{ + *((float2*)dst) = *((float2*)src); +} + +template <> +__device__ __inline__ void copy_vector(c10::Half* dst, const c10::Half* src) +{ + *dst = *src; +} + +template <> +__device__ __inline__ void copy_vector(c10::Half* dst, const c10::Half* src) +{ + *((float2*)dst) = *((float2*)src); +} + +template <> +__device__ __inline__ void copy_vector(uint8_t* dst, const uint8_t* src) +{ + *dst = *src; +} + +template <> +__device__ __inline__ void copy_vector(uint8_t* dst, const uint8_t* src) +{ + *((half2*)dst) = *((half2*)src); +} + +template +__device__ __inline__ void copy_zero_vector(Datatype* dst); + +template <> +__device__ __inline__ void copy_zero_vector(c10::BFloat16* dst) +{ + *dst = 0.0; +} + +template <> +__device__ __inline__ void copy_zero_vector(c10::BFloat16* dst) +{ + *((float2*)dst) = make_float2(0.0f, 0.0f); +} + +template <> +__device__ __inline__ void copy_zero_vector(c10::Half* dst) +{ + *dst = 0.0; +} + +template <> +__device__ __inline__ void copy_zero_vector(c10::Half* dst) +{ + *((float2*)dst) = make_float2(0.0f, 0.0f); +} + +int log2_ceil(int value) +{ + int log2_value = 0; + while ((1 << log2_value) < value) ++log2_value; + return log2_value; +} + +template +struct Add { + __device__ __forceinline__ T operator()(T a, T b) const { return a + b; } +}; + +template +struct Max { + __device__ __forceinline__ T operator()(T a, T b) const { return a < b ? b : a; } +}; + +template +__device__ __forceinline__ T +WARP_SHFL_XOR_NATIVE(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if CUDA_VERSION >= 9000 + return __shfl_xor_sync(mask, value, laneMask, width); +#else + return __shfl_xor(value, laneMask, width); +#endif +} + +template class ReduceOp> +__device__ __forceinline__ void warp_reduce(acc_t* sum) +{ + ReduceOp r; +#pragma unroll + for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) { +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + acc_t b = WARP_SHFL_XOR_NATIVE(sum[i], offset, WARP_SIZE); + sum[i] = r(sum[i], b); + } + } +} + +/* + * Extended softmax (from native aten pytorch) with following additional features + * 1) input scaling + * 2) Implicit time (diagonal masking) + */ +template +__global__ void scaled_upper_triang_masked_softmax_warp_forward(output_t* dst, + const input_t* src, + const acc_t scale, + int micro_batch_size, + int stride, + int element_count) +{ + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + // warp_size of method warp_softmax_forward_kernel. + constexpr int next_power_of_two = 1 << log2_elements; + constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two + : C10_WARP_SIZE; + constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; + constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; + constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4; + + int first_batch = (blockDim.y * blockIdx.y + threadIdx.y) * gridDim.x * WARP_BATCH + blockIdx.x; + int local_seq = blockIdx.x + 1; + int warp_iteration_limit = (local_seq + ELEMENTS_PER_LDG_STG * WARP_SIZE - 1) / WARP_SIZE; + + // micro_batch_size might not be a multiple of WARP_BATCH. Check how + // many batches have to computed within this WARP. + int local_batches = micro_batch_size - first_batch; + if (local_batches > WARP_BATCH) local_batches = WARP_BATCH; + + // there might be multiple batches per warp. compute the index within the batch + int local_idx = threadIdx.x; + + src += first_batch * stride + ELEMENTS_PER_LDG_STG * local_idx; + dst += first_batch * stride + ELEMENTS_PER_LDG_STG * local_idx; + + // load data from global memory + acc_t elements[WARP_BATCH][WARP_ITERATIONS]; + input_t temp_data[ELEMENTS_PER_LDG_STG]; +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + int batch_element_count = (i >= local_batches) ? 0 : local_seq; + +#pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it += ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + + if (element_index < batch_element_count) { + copy_vector( + temp_data, src + i * element_count * stride + it * WARP_SIZE); + +#pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + if ((element_index + element) < batch_element_count) { + elements[i][it + element] = (acc_t)temp_data[element] * scale; + } else { + elements[i][it + element] = -std::numeric_limits::infinity(); + } + } + } else { +#pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + elements[i][it + element] = -std::numeric_limits::infinity(); + } + } + } + } + + // compute max_value + acc_t max_value[WARP_BATCH]; +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + max_value[i] = elements[i][0]; +#pragma unroll + for (int it = 1; it < WARP_ITERATIONS; ++it) { + max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it]; + } + } + warp_reduce(max_value); + + acc_t sum[WARP_BATCH]{0.0f}; +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { +#pragma unroll + for (int it = 0; it < WARP_ITERATIONS; ++it) { + if (it < warp_iteration_limit) { + elements[i][it] = std::exp((elements[i][it] - max_value[i])); + sum[i] += elements[i][it]; + } + } + } + warp_reduce(sum); + + // store result + output_t out[ELEMENTS_PER_LDG_STG]; +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + if (i >= local_batches) break; +#pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it += ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + + if (element_index < local_seq) { +#pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + if (element_index + element < local_seq) { + out[element] = elements[i][it + element] / sum[i]; + } else { + out[element] = 0; + } + } + copy_vector( + dst + i * element_count * stride + it * WARP_SIZE, out); + } else if (element_index < element_count) { + copy_zero_vector(dst + i * element_count * stride + + it * WARP_SIZE); + } else { + break; + } + } + } +} + +template +__global__ void scaled_upper_triang_masked_softmax_warp_backward(output_t* gradInput, + input_t* grad, + const input_t* output, + acc_t scale, + int micro_batch_size, + int stride, + int element_count) +{ + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + // warp_size of method warp_softmax_backward_kernel. + constexpr int next_power_of_two = 1 << log2_elements; + constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two + : C10_WARP_SIZE; + constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; + constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; + constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4; + + int first_batch = (blockDim.y * blockIdx.y + threadIdx.y) * gridDim.x * WARP_BATCH + blockIdx.x; + int local_seq = blockIdx.x + 1; + + // micro_batch_size might not be a multiple of WARP_BATCH. Check how + // many batches have to computed within this WARP. + int local_batches = micro_batch_size - first_batch; + if (local_batches > WARP_BATCH) local_batches = WARP_BATCH; + + // there might be multiple batches per warp. compute the index within the batch + int local_idx = threadIdx.x; + + // the first element to process by the current thread + int thread_offset = first_batch * stride + ELEMENTS_PER_LDG_STG * local_idx; + grad += thread_offset; + output += thread_offset; + gradInput += thread_offset; + + // load data from global memory + acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS]{0.0f}; + acc_t output_reg[WARP_BATCH][WARP_ITERATIONS]{0.0f}; + input_t temp_grad[ELEMENTS_PER_LDG_STG]; + input_t temp_output[ELEMENTS_PER_LDG_STG]; +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + int batch_element_count = (i >= local_batches) ? 0 : local_seq; + +#pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it += ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + if (element_index < batch_element_count) { + copy_vector( + temp_grad, grad + i * element_count * stride + it * WARP_SIZE); + copy_vector( + temp_output, output + i * element_count * stride + it * WARP_SIZE); + +#pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + if (element_index + element < batch_element_count) { + output_reg[i][it + element] = (acc_t)temp_output[element]; + } + } +#pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + if (element_index + element < batch_element_count) { + grad_reg[i][it + element] = + (acc_t)temp_grad[element] * output_reg[i][it + element]; + } + } + } + } + } + + acc_t sum[WARP_BATCH]; +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + sum[i] = grad_reg[i][0]; +#pragma unroll + for (int it = 1; it < WARP_ITERATIONS; ++it) { sum[i] += grad_reg[i][it]; } + } + warp_reduce(sum); + +// store result +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + if (i >= local_batches) break; +#pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it += ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + if (element_index < element_count) { + // compute gradients + output_t out[ELEMENTS_PER_LDG_STG]; +#pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + out[element] = (output_t)(scale * (grad_reg[i][it + element] - + output_reg[i][it + element] * sum[i])); + } + copy_vector( + gradInput + i * element_count * stride + it * WARP_SIZE, out); + } + } + } +} + +} // end of anonymous namespace + +template +void dispatch_scaled_upper_triang_masked_softmax_forward(output_t* dst, + const input_t* src, + const input_t scale, + int softmax_elements, + int softmax_elements_stride, + int attn_batches) +{ + if (softmax_elements == 0) { + return; + } else { + int log2_elements = log2_ceil(softmax_elements); + const int next_power_of_two = 1 << log2_elements; + int seq_len = softmax_elements; + int batch_count = attn_batches * seq_len; + + // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward. + int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + + // This value must match the WARP_BATCH constexpr value computed inside + // softmax_warp_forward. + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + // use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + int blocks_per_seq = attn_batches / batches_per_block; + dim3 blocks(seq_len, blocks_per_seq, 1); + dim3 threads(warp_size, warps_per_block, 1); + // Launch code would be more elegant if C++ supported FOR CONSTEXPR + switch (log2_elements) { + case 0: // 1 + scaled_upper_triang_masked_softmax_warp_forward + <<>>( + dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 1: // 2 + scaled_upper_triang_masked_softmax_warp_forward + <<>>( + dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 2: // 4 + scaled_upper_triang_masked_softmax_warp_forward + <<>>( + dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 3: // 8 + scaled_upper_triang_masked_softmax_warp_forward + <<>>( + dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 4: // 16 + scaled_upper_triang_masked_softmax_warp_forward + <<>>( + dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 5: // 32 + scaled_upper_triang_masked_softmax_warp_forward + <<>>( + dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 6: // 64 + scaled_upper_triang_masked_softmax_warp_forward + <<>>( + dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 7: // 128 + scaled_upper_triang_masked_softmax_warp_forward + <<>>( + dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 8: // 256 + scaled_upper_triang_masked_softmax_warp_forward + <<>>( + dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 9: // 512 + scaled_upper_triang_masked_softmax_warp_forward + <<>>( + dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 10: // 1024 + scaled_upper_triang_masked_softmax_warp_forward + <<>>( + dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 11: // 2048 + scaled_upper_triang_masked_softmax_warp_forward + <<>>( + dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + default: break; + } + } +} + +template +void dispatch_scaled_upper_triang_masked_softmax_backward(output_t* grad_input, + input_t* grad, + const input_t* output, + const acc_t scale, + int softmax_elements, + int softmax_elements_stride, + int attn_batches) +{ + if (softmax_elements == 0) { + return; + } else { + int log2_elements = log2_ceil(softmax_elements); + const int next_power_of_two = 1 << log2_elements; + int seq_len = softmax_elements; + int batch_count = attn_batches * seq_len; + + // This value must match the WARP_SIZE constexpr value computed inside + // softmax_warp_backward. + int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + + // This value must match the WARP_BATCH constexpr value computed inside + // softmax_warp_backward. + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + // use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + int blocks_per_seq = attn_batches / batches_per_block; + dim3 blocks(seq_len, blocks_per_seq, 1); + dim3 threads(warp_size, warps_per_block, 1); + // Launch code would be more elegant if C++ supported FOR CONSTEXPR + switch (log2_elements) { + case 0: // 1 + scaled_upper_triang_masked_softmax_warp_backward + <<>>( + grad_input, + grad, + output, + scale, + batch_count, + softmax_elements_stride, + softmax_elements); + break; + case 1: // 2 + scaled_upper_triang_masked_softmax_warp_backward + <<>>( + grad_input, + grad, + output, + scale, + batch_count, + softmax_elements_stride, + softmax_elements); + break; + case 2: // 4 + scaled_upper_triang_masked_softmax_warp_backward + <<>>( + grad_input, + grad, + output, + scale, + batch_count, + softmax_elements_stride, + softmax_elements); + break; + case 3: // 8 + scaled_upper_triang_masked_softmax_warp_backward + <<>>( + grad_input, + grad, + output, + scale, + batch_count, + softmax_elements_stride, + softmax_elements); + break; + case 4: // 16 + scaled_upper_triang_masked_softmax_warp_backward + <<>>( + grad_input, + grad, + output, + scale, + batch_count, + softmax_elements_stride, + softmax_elements); + break; + case 5: // 32 + scaled_upper_triang_masked_softmax_warp_backward + <<>>( + grad_input, + grad, + output, + scale, + batch_count, + softmax_elements_stride, + softmax_elements); + break; + case 6: // 64 + scaled_upper_triang_masked_softmax_warp_backward + <<>>( + grad_input, + grad, + output, + scale, + batch_count, + softmax_elements_stride, + softmax_elements); + break; + case 7: // 128 + scaled_upper_triang_masked_softmax_warp_backward + <<>>( + grad_input, + grad, + output, + scale, + batch_count, + softmax_elements_stride, + softmax_elements); + break; + case 8: // 256 + scaled_upper_triang_masked_softmax_warp_backward + <<>>( + grad_input, + grad, + output, + scale, + batch_count, + softmax_elements_stride, + softmax_elements); + break; + case 9: // 512 + scaled_upper_triang_masked_softmax_warp_backward + <<>>( + grad_input, + grad, + output, + scale, + batch_count, + softmax_elements_stride, + softmax_elements); + break; + case 10: // 1024 + scaled_upper_triang_masked_softmax_warp_backward + <<>>( + grad_input, + grad, + output, + scale, + batch_count, + softmax_elements_stride, + softmax_elements); + break; + case 11: // 2048 + scaled_upper_triang_masked_softmax_warp_backward + <<>>( + grad_input, + grad, + output, + scale, + batch_count, + softmax_elements_stride, + softmax_elements); + break; + default: break; + } + } +} diff --git a/megatron/fused_kernels/scaled_upper_triang_masked_softmax_cuda.cu b/megatron/fused_kernels/scaled_upper_triang_masked_softmax_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..475c168334428a65ca759d8a64ab0c60972d08fb --- /dev/null +++ b/megatron/fused_kernels/scaled_upper_triang_masked_softmax_cuda.cu @@ -0,0 +1,93 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#ifndef __HIP_PLATFORM_HCC__ +#include +#endif +#include +#include +#include "scaled_upper_triang_masked_softmax.h" +#include "type_shim.h" + +namespace multihead_attn { +namespace fused_softmax { +namespace scaled_upper_triang_masked_softmax { + +torch::Tensor fwd_cuda(torch::Tensor const& input, float scale_factor) +{ + // input is a 3d tensor with dimensions [attn_batches, seq_len, seq_len] + const int attn_batches = input.size(0); + const int seq_len = input.size(1); + TORCH_INTERNAL_ASSERT(seq_len <= 2048); + + // Output + auto act_options = input.options().requires_grad(false); + torch::Tensor softmax_results = torch::empty({attn_batches, seq_len, seq_len}, act_options); + + // Softmax Intermediate Result Ptr + void* input_ptr = static_cast(input.data_ptr()); + void* softmax_results_ptr = static_cast(softmax_results.data_ptr()); + + DISPATCH_HALF_AND_BFLOAT( + input.scalar_type(), + "dispatch_scaled_upper_triang_masked_softmax_forward", + dispatch_scaled_upper_triang_masked_softmax_forward( + reinterpret_cast(softmax_results_ptr), + reinterpret_cast(input_ptr), + scale_factor, + seq_len, + seq_len, + attn_batches);); + return softmax_results; +} + +torch::Tensor bwd_cuda(torch::Tensor const& output_grads_, + torch::Tensor const& softmax_results_, + float scale_factor) +{ + auto output_grads = output_grads_.contiguous(); + auto softmax_results = softmax_results_.contiguous(); + + // output grads is a 3d tensor with dimensions [attn_batches, seq_len, seq_len] + const int attn_batches = output_grads.size(0); + const int seq_len = output_grads.size(1); + TORCH_INTERNAL_ASSERT(output_grads.size(1) == output_grads.size(2)); + + void* output_grads_ptr = static_cast(output_grads.data_ptr()); + + // Softmax Grad + DISPATCH_HALF_AND_BFLOAT( + output_grads_.scalar_type(), + "dispatch_scaled_upper_triang_masked_softmax_backward", + dispatch_scaled_upper_triang_masked_softmax_backward( + reinterpret_cast(output_grads_ptr), + reinterpret_cast(output_grads_ptr), + reinterpret_cast(softmax_results.data_ptr()), + scale_factor, + seq_len, + seq_len, + attn_batches);); + + // backward pass is completely in-place + return output_grads; +} +} // namespace scaled_upper_triang_masked_softmax +} // namespace fused_softmax +} // namespace multihead_attn diff --git a/megatron/fused_kernels/setup.py b/megatron/fused_kernels/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..528a20676cfc721f74635723eda49c2ee1888d6e --- /dev/null +++ b/megatron/fused_kernels/setup.py @@ -0,0 +1,90 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from setuptools import setup, find_packages +from torch.utils import cpp_extension +from torch.utils.cpp_extension import BuildExtension, CUDAExtension +from pathlib import Path +import subprocess + + +def _get_cuda_bare_metal_version(cuda_dir): + raw_output = subprocess.check_output( + [cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True + ) + output = raw_output.split() + release_idx = output.index("release") + 1 + release = output[release_idx].split(".") + bare_metal_major = release[0] + bare_metal_minor = release[1][0] + + return raw_output, bare_metal_major, bare_metal_minor + + +srcpath = Path(__file__).parent.absolute() +cc_flag = [] +_, bare_metal_major, _ = _get_cuda_bare_metal_version(cpp_extension.CUDA_HOME) +if int(bare_metal_major) >= 11: + cc_flag.append("-gencode") + cc_flag.append("arch=compute_80,code=sm_80") + +nvcc_flags = [ + "-O3", + "-gencode", + "arch=compute_70,code=sm_70", + "--use_fast_math", + "-U__CUDA_NO_HALF_OPERATORS__", + "-U__CUDA_NO_HALF_CONVERSIONS__", + "--expt-relaxed-constexpr", + "--expt-extended-lambda", +] +cuda_ext_args = {"cxx": ["-O3"], "nvcc": nvcc_flags + cc_flag} +layernorm_cuda_args = { + "cxx": ["-O3"], + "nvcc": nvcc_flags + cc_flag + ["-maxrregcount=50"], +} +setup( + name="fused_kernels", + version="0.0.2", + author="EleutherAI", + author_email="contact@eleuther.ai", + include_package_data=False, + ext_modules=[ + CUDAExtension( + name="scaled_upper_triang_masked_softmax_cuda", + sources=[ + str(srcpath / "scaled_upper_triang_masked_softmax.cpp"), + str(srcpath / "scaled_upper_triang_masked_softmax_cuda.cu"), + ], + extra_compile_args=cuda_ext_args, + ), + CUDAExtension( + name="scaled_masked_softmax_cuda", + sources=[ + str(srcpath / "scaled_masked_softmax.cpp"), + str(srcpath / "scaled_masked_softmax_cuda.cu"), + ], + extra_compile_args=cuda_ext_args, + ), + CUDAExtension( + name="fused_rotary_positional_embedding", + sources=[ + str(srcpath / "fused_rotary_positional_embedding.cpp"), + str(srcpath / "fused_rotary_positional_embedding_cuda.cu"), + ], + extra_compile_args=cuda_ext_args, + ), + ], + cmdclass={"build_ext": BuildExtension}, +) diff --git a/megatron/fused_kernels/type_shim.h b/megatron/fused_kernels/type_shim.h new file mode 100644 index 0000000000000000000000000000000000000000..6b1c8927d4c540dac35345b95dca5cb9e90677f2 --- /dev/null +++ b/megatron/fused_kernels/type_shim.h @@ -0,0 +1,338 @@ +#include +#include "compat.h" + +// Forward/backward compatibility hack around +// https://github.com/pytorch/pytorch/commit/3aeb78079bcd68282fe9117088e138b77318e288 +// pending more future-proof guidance from upstream. +// struct TypeShim +// { +// const at::Type& payload; +// TypeShim(const at::Type& type) : payload(type) {} +// // Enable trivial conversion to a const at::Type& for pre-3aeb78 +// operator const at::Type&(){ return payload; }; +// // Enable dispatch switch statements to take *this directly for post-3aeb78 +// //operator at::ScalarType(){ return payload.; }; +// }; + +#define DISPATCH_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \ + switch (TYPE) { \ + case at::ScalarType::Float: { \ + using scalar_t_##LEVEL = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: { \ + using scalar_t_##LEVEL = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +#define DISPATCH_FLOAT_HALF_AND_BFLOAT(TYPE, LEVEL, NAME, ...) \ + switch (TYPE) { \ + case at::ScalarType::Float: { \ + using scalar_t_##LEVEL = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: { \ + using scalar_t_##LEVEL = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: { \ + using scalar_t_##LEVEL = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +#define DISPATCH_FLOAT_HALF_AND_BYTE(TYPE, LEVEL, NAME, ...) \ + switch (TYPE) { \ + case at::ScalarType::Float: { \ + using scalar_t_##LEVEL = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: { \ + using scalar_t_##LEVEL = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Byte: { \ + using scalar_t_##LEVEL = uint8_t; \ + __VA_ARGS__; \ + break; \ + } \ + default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +#define DISPATCH_DOUBLE_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \ + switch (TYPE) { \ + case at::ScalarType::Double: { \ + using scalar_t_##LEVEL = double; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Float: { \ + using scalar_t_##LEVEL = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: { \ + using scalar_t_##LEVEL = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +#define DISPATCH_DOUBLE_FLOAT_HALF_AND_BFLOAT(TYPE, LEVEL, NAME, ...) \ + switch (TYPE) { \ + case at::ScalarType::Double: { \ + using scalar_t_##LEVEL = double; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Float: { \ + using scalar_t_##LEVEL = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: { \ + using scalar_t_##LEVEL = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: { \ + using scalar_t_##LEVEL = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +#define DISPATCH_DOUBLE_AND_FLOAT(TYPE, LEVEL, NAME, ...) \ + switch (TYPE) { \ + case at::ScalarType::Double: { \ + using scalar_t_##LEVEL = double; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Float: { \ + using scalar_t_##LEVEL = float; \ + __VA_ARGS__; \ + break; \ + } \ + default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +#define DISPATCH_HALF_AND_BFLOAT(TYPE, NAME, ...) \ + switch (TYPE) { \ + case at::ScalarType::Half: { \ + using scalar_t = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: { \ + using scalar_t = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +#define DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES(TYPEIN, TYPEOUT, NAME, ...) \ + switch (TYPEIN) { \ + case at::ScalarType::Float: { \ + using scalar_t_in = float; \ + switch (TYPEOUT) { \ + case at::ScalarType::Float: { \ + using scalar_t_out = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: { \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: { \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: AT_ERROR(#NAME, " not implemented for '", toString(TYPEOUT), "'"); \ + } \ + break; \ + } \ + case at::ScalarType::Half: { \ + using scalar_t_in = at::Half; \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: { \ + using scalar_t_in = at::BFloat16; \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: AT_ERROR(#NAME, " not implemented for '", toString(TYPEIN), "'"); \ + } + +#define DISPATCH_DOUBLE_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES(TYPEIN, TYPEOUT, NAME, ...) \ + switch (TYPEIN) { \ + case at::ScalarType::Double: { \ + using scalar_t_in = double; \ + switch (TYPEOUT) { \ + case at::ScalarType::Double: { \ + using scalar_t_out = double; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Float: { \ + using scalar_t_out = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: { \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: { \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: AT_ERROR(#NAME, " not implemented for '", toString(TYPEOUT), "'"); \ + } \ + break; \ + } \ + case at::ScalarType::Float: { \ + using scalar_t_in = float; \ + switch (TYPEOUT) { \ + case at::ScalarType::Float: { \ + using scalar_t_out = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: { \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: { \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: AT_ERROR(#NAME, " not implemented for '", toString(TYPEOUT), "'"); \ + } \ + break; \ + } \ + case at::ScalarType::Half: { \ + using scalar_t_in = at::Half; \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: { \ + using scalar_t_in = at::BFloat16; \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: AT_ERROR(#NAME, " not implemented for '", toString(TYPEIN), "'"); \ + } + +template +__device__ __forceinline__ T +reduce_block_into_lanes(T* x, + T val, + int lanes = 1, + bool share_result = false) // lanes is intended to be <= 32. +{ + int tid = threadIdx.x + threadIdx.y * blockDim.x; + int blockSize = blockDim.x * blockDim.y; // blockSize is intended to be a multiple of 32. + + if (blockSize >= 64) { + x[tid] = val; + __syncthreads(); + } + +#pragma unroll + for (int i = (blockSize >> 1); i >= 64; i >>= 1) { + if (tid < i) x[tid] = x[tid] + x[tid + i]; + __syncthreads(); + } + + T final; + + if (tid < 32) { + if (blockSize >= 64) + final = x[tid] + x[tid + 32]; + else + final = val; + // __SYNCWARP(); + +#pragma unroll + for (int i = 16; i >= lanes; i >>= 1) + final = final + __shfl_down_sync(0xffffffff, final, i); + } + + if (share_result) { + if (tid < lanes) x[tid] = final; // EpilogueOp + // Make sure the smem result is visible to all warps. + __syncthreads(); + } + + return final; +} + +template +__device__ __forceinline__ T +reduce_block_into_lanes_max_op(T* x, + T val, + int lanes = 1, + bool share_result = false) // lanes is intended to be <= 32. +{ + int tid = threadIdx.x + threadIdx.y * blockDim.x; + int blockSize = blockDim.x * blockDim.y; // blockSize is intended to be a multiple of 32. + + if (blockSize >= 64) { + x[tid] = val; + __syncthreads(); + } + +#pragma unroll + for (int i = (blockSize >> 1); i >= 64; i >>= 1) { + if (tid < i) x[tid] = fmaxf(fabsf(x[tid]), fabsf(x[tid + i])); + __syncthreads(); + } + + T final; + + if (tid < 32) { + if (blockSize >= 64) + final = fmaxf(fabsf(x[tid]), fabsf(x[tid + 32])); + else + final = val; + // __SYNCWARP(); + +#pragma unroll + for (int i = 16; i >= lanes; i >>= 1) + final = fmaxf(fabsf(final), fabsf(__shfl_down_sync(0xffffffff, final, i))); + } + + if (share_result) { + if (tid < lanes) x[tid] = final; // EpilogueOp + // Make sure the smem result is visible to all warps. + __syncthreads(); + } + + return final; +} diff --git a/megatron/gradient_noise_scale/__init__.py b/megatron/gradient_noise_scale/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6b0d07e94ada9b64a0a054fa003f1e3c90a2bd07 --- /dev/null +++ b/megatron/gradient_noise_scale/__init__.py @@ -0,0 +1 @@ +from .gradient_noise_scale import GradientNoiseScale diff --git a/megatron/gradient_noise_scale/gradient_noise_scale.py b/megatron/gradient_noise_scale/gradient_noise_scale.py new file mode 100644 index 0000000000000000000000000000000000000000..71076ffc3d4992602b57d052d9d76c5b1ccd9b3f --- /dev/null +++ b/megatron/gradient_noise_scale/gradient_noise_scale.py @@ -0,0 +1,210 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch + + +def ema(avg, beta, yi, i): + """Exponential moving average""" + if avg is None: + avg = 0 + avg = beta * avg + (1 - beta) * yi + return avg, avg / (1 - beta ** (i + 1)) + + +class GradientNoiseScale: + """ + A class to measure the gradient noise scale of a model while training (cf. https://arxiv.org/abs/1812.06162). + + The core thesis of the paper is that, if our batch size is small, there will be a lot of noise present in the gradients, and we might update our weights only on noise. + After several updates the optimizer may still push us in the right direction, but we would be better off having used a larger batch size, which is more computationally + efficient and directly averages out the noise in the gradients. + + But there's a limit to the gains large batch sizes can give you - if, after a certain batch size, your gradient is already accurate, there's no point in increasing the + batch size further, as we'll just be wasting compute for little to no gain in accuracy. + + This means there is some theoretically optimal batch size for a given model, which measuring the gradient noise scale can help us to estimate. + + To estimate the 'simple' noise scale (Bsimple), we need to have a measure of the gradients using a large batch size (Bbig) and a small + batch size (Bsmall). + + when we have those: + Bsimple ≈ (tr(Σ) / |G|^2) + + tr(Σ) can be approximated by: + tr(Σ) ≈ (1 / ((1/Bsmall) - (1/Bbig))) * (|Gsmall|^2 - |Gbig|^2) + + and |G|^2 by: + |G|^2 ≈ (1 / (Bbig - Bsmall)) * (Bbig*|Gbig|^2 - Bsmall*|Gsmall|^2) + + - With multi-gpu training, we can do this by taking the gradients of the microbatch_size_per_gpu for Bsmall, + and the gradients of the entire batch for Bbig. + - Alternatively, we can just take Bsmall as a single batch, and Bbig as several sequential batches in a row. + This is the option we've opted for in this implementation because a) it's easier to implement and b) also works in + single-gpu environments. Unfortunately it does come with some memory overhead. + """ + + def __init__( + self, + model, + batch_size_small, + n_batches=10, + beta=0.99, + cpu_offload=False, + neox_args=None, + mpu=None, + ): + self.batch_size_small = batch_size_small + self.batch_size_large = batch_size_small * n_batches + self.n_batches = n_batches + self.beta = beta + self.model = model + self.buffer = None + self.ema_scale = None + self.ema_noise = None + self.noise_scale = None + self.n_updates = 0 + self.cpu_offload = cpu_offload + self.model.store_gradients = True + self.model.store_gradients_cpu = cpu_offload + self.neox_args = neox_args + self.mpu = mpu + + def flatten_grads(self): + grads = [] + assert hasattr( + self.model, "stored_gradients" + ), "You might need to update DeeperSpeed" + if self.model.stored_gradients is not None: + for g in self.model.stored_gradients: + if g is not None and not g.isnan().any() and not g.isinf().any(): + g = g.flatten().view(-1, 1) + if self.cpu_offload: + g = g.cpu() + grads.append(g) + else: + return None + if not grads: + return None + return torch.cat(grads) + + def _sync_overflow(self, is_overflow): + if self.neox_args.is_pipe_parallel: + # Since each model parallel GPU carries only part of the model, + # make sure overflow flag is synced across all the pipe parallel GPUs + overflow_gpu = torch.cuda.ByteTensor([is_overflow]) + torch.distributed.all_reduce( + overflow_gpu, + op=torch.distributed.ReduceOp.MAX, + group=self.mpu.get_pipe_parallel_group(), + ) + overflow = overflow_gpu[0].item() + else: + overflow = is_overflow + return overflow + + def _update(self): + + grad = self.flatten_grads() + is_overflow = self._sync_overflow(grad is None) + if is_overflow: + return + if self.buffer is None: + self.buffer = grad + else: + self.buffer += grad + if self.n_updates % self.n_batches == self.n_batches - 1: + # average grads every n_batches iteration to get a simulation of Bbig + self.buffer /= self.n_batches + grads = self.buffer + self.buffer = None + + # calculate Gbig and Gsmall + # this needs to be done in fp32 or it overflows + if self.neox_args.is_pipe_parallel: + + g_big = torch.square(torch.norm(grads.to(torch.float))) + g_small = torch.square(torch.norm(grad.to(torch.float))) + + # we need to put the tensors back on gpu to do the allreduce + if self.cpu_offload: + g_big = g_big.to(self.model.device) + g_small = g_small.to(self.model.device) + + # avg g_big / g_small across pipe parallel groups + torch.distributed.all_reduce( + g_big, + op=torch.distributed.ReduceOp.SUM, + group=self.mpu.get_pipe_parallel_group(), + ) + torch.distributed.all_reduce( + g_small, + op=torch.distributed.ReduceOp.SUM, + group=self.mpu.get_pipe_parallel_group(), + ) + g_big /= self.mpu.get_pipe_parallel_world_size() + g_small /= self.mpu.get_pipe_parallel_world_size() + + else: + g_big = torch.square(torch.norm(grads.to(torch.float))) + g_small = torch.square(torch.norm(grad.to(torch.float))) + + # communicate any overflows + is_overflow = ( + g_small.isinf().any() + or g_small.isnan().any() + or g_big.isinf().any() + or g_big.isnan().any() + ) + is_overflow = self._sync_overflow(is_overflow) + if is_overflow: + return + + # calculate noise / scale + noise = ( + 1 + / (self.batch_size_large - self.batch_size_small) + * (self.batch_size_large * g_big - self.batch_size_small * g_small) + ) + scale = ( + 1 + / (1 / self.batch_size_small - 1 / self.batch_size_large) + * (g_small - g_big) + ) + + # calculate running average + self.ema_noise, noise = ema( + self.ema_noise, self.beta, noise, self.n_updates + ) + self.ema_scale, scale = ema( + self.ema_scale, self.beta, scale, self.n_updates + ) + + # calculate noise scale + scale = scale.item() + noise = noise.item() + self.noise_scale = scale / noise + + self.n_updates += 1 + + def update(self): + if self.neox_args.is_pipe_parallel: + # update on all ranks + self._update() + else: + # for mp / dp only, the grads will be the same across all ranks, so we can just do the process on a single rank + if torch.distributed.get_rank() == 0: + # only update on 0th rank + self._update() + torch.distributed.barrier() diff --git a/megatron/initialize.py b/megatron/initialize.py new file mode 100644 index 0000000000000000000000000000000000000000..29afe7f9af028908d1ce51344a71de7009eaea60 --- /dev/null +++ b/megatron/initialize.py @@ -0,0 +1,235 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Megatron initialization.""" + +import random +import os + +import numpy as np +import torch + +from megatron import fused_kernels +from megatron import mpu +from megatron.mpu import set_model_parallel_rank, set_model_parallel_world_size + +import deepspeed +import inspect + + +def initialize_megatron(neox_args, allow_no_cuda=False): + """Set initialize distributed and set autoresume and random seeds. + `allow_no_cuda` should not be set unless using megatron for cpu only + data processing. In general this arg should not be set unless you know + what you are doing. + Returns a function to finalize distributed env initialization + (optionally, only when args.lazy_mpu_init == True) + """ + if not allow_no_cuda: + # Make sure cuda is available. + assert torch.cuda.is_available(), "Megatron requires CUDA." + + # torch.distributed initialization + def finish_mpu_init(): + # Pytorch distributed. + _initialize_distributed(neox_args=neox_args) + + # Random seeds for reproducibility. + if neox_args.rank == 0: + print("> setting random seeds to {} ...".format(neox_args.seed)) + _set_random_seed(neox_args.seed) + + # check fused kernels are installed: + if ( + neox_args.scaled_upper_triang_masked_softmax_fusion + or neox_args.scaled_masked_softmax_fusion + or neox_args.rope_fusion + ): + fused_kernels.load(neox_args) + fused_kernels.load_fused_kernels() + + if neox_args.lazy_mpu_init: + neox_args.use_cpu_initialization = True + # delayed initialization of DDP-related stuff + # We only set basic DDP globals + set_model_parallel_world_size(neox_args.model_parallel_size) + # and return function for external DDP manager to call when it has DDP initialized + set_model_parallel_rank(neox_args.rank) + return finish_mpu_init + else: + # Megatron's MPU is the master. Complete initialization right away. + finish_mpu_init() + + # Compile dataset C++ code. + if neox_args.local_rank == 0: + from megatron.data.data_utils import compile_helper + + compile_helper() + + # Write arguments to tensorboard. + _write_args_to_tensorboard(neox_args=neox_args) + # No continuation function + return None + + +def setup_deepspeed_random_and_activation_checkpointing(neox_args): + """Optional DeepSpeed Activation Checkpointing features. + Gives access to partition activations, contiguous memory optimizations + and cpu checkpointing. + + Activation checkpoint requires keep track of the random states + and setting the random seed for each MP process. Megatron uses + mpu.get_cuda_rng_tracker and mpu.model_parallel_cuda_manual_seed + for keeping track of the random states and setting the random seeds. + Since they are used in places outside of activation checkpointing, + we overwrite them to maintain consistency. + + This must be called before all the calls to mpu.model_parallel_cuda_manual_seed + """ + num_layers = neox_args.num_layers // neox_args.checkpoint_num_layers + num_layers = ( + num_layers + if neox_args.num_layers % neox_args.checkpoint_num_layers == 0 + else num_layers + 1 + ) + + deepspeed.checkpointing.configure( + mpu, + partition_activations=neox_args.partition_activations, + contiguous_checkpointing=neox_args.contiguous_checkpointing, + num_checkpoints=num_layers, + checkpoint_in_cpu=neox_args.checkpoint_in_cpu, + synchronize=neox_args.synchronize_each_layer, + profile=neox_args.profile_backward, + ) + + +def _initialize_distributed(neox_args): + """Initialize torch.distributed and mpu.""" + + device_count = torch.cuda.device_count() + if torch.distributed.is_initialized(): + + if neox_args.rank == 0: + print( + "torch distributed is already initialized, " + "skipping initialization ...", + flush=True, + ) + neox_args.rank = torch.distributed.get_rank() + neox_args.world_size = torch.distributed.get_world_size() + + else: + + if neox_args.rank == 0: + print("> initializing torch distributed ...", flush=True) + # Manually set the device ids. + if device_count > 0: + device = neox_args.rank % device_count + if neox_args.local_rank is not None: + assert ( + neox_args.local_rank == device + ), "expected local-rank to be the same as rank % device-count." + else: + neox_args.local_rank = device + torch.cuda.set_device(device) + + deepspeed.init_distributed( + dist_backend=neox_args.distributed_backend, + auto_mpi_discovery=True, + distributed_port=os.getenv("MASTER_PORT", "6000"), + verbose=True, + ) + + # Setup 3D topology. + pp = neox_args.pipe_parallel_size if neox_args.pipe_parallel_size >= 1 else 1 + mp = neox_args.model_parallel_size if neox_args.model_parallel_size >= 1 else 1 + assert ( + neox_args.world_size % (pp * mp) == 0 + ), f"world_size={neox_args.world_size}, pp={pp}, mp={mp}" + dp = neox_args.world_size // (pp * mp) + + from deepspeed.runtime.pipe.topology import PipeModelDataParallelTopology + + # this does pipe on the most outside, then data, then model. + # PipeModelDataParallelTopology is just a wrapper over ProcessTopology that predefines this order. + topo = PipeModelDataParallelTopology(num_pp=pp, num_mp=mp, num_dp=dp) + + # Offset base seeds for the interior pipeline stages. + # TODO: adjust last stage too once IO is improved. + stage_id = topo.get_coord(rank=torch.distributed.get_rank()).pipe + if 0 < stage_id < topo.get_dim("pipe") - 1: + offset = neox_args.seed + 1138 + neox_args.seed = offset + (stage_id * mp) + + # Set the model-parallel / data-parallel communicators. + if device_count > 0: + if mpu.model_parallel_is_initialized(): + print( + "_initialize_distributed() model parallel is already initialized", + flush=True, + ) + else: + mpu.initialize_model_parallel( + neox_args.model_parallel_size, + topology=topo, + fp32_allreduce=neox_args.fp32_allreduce, + ) + + # Init DeepSpeed Activation Checkpointing Features + setup_deepspeed_random_and_activation_checkpointing(neox_args=neox_args) + + +def _init_autoresume(neox_args): + """Set autoresume start time.""" + + if neox_args.adlr_autoresume: + print_rank_0("> enabling autoresume ...") + sys.path.append(os.environ.get("SUBMIT_SCRIPTS", ".")) + try: + from userlib.auto_resume import AutoResume + except BaseException: + print("> ADLR autoresume is not available, exiting ...", flush=True) + sys.exit() + neox_args.adlr_autoresume_object = AutoResume + + if neox_args.adlr_autoresume_object: + torch.distributed.barrier() + neox_args.adlr_autoresume_object.init() + torch.distributed.barrier() + + +def _set_random_seed(seed): + """Set random seed for reproducibility.""" + if seed is not None and seed > 0: + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.device_count() > 0: + mpu.model_parallel_cuda_manual_seed(seed) + else: + raise ValueError("Seed ({}) should be a positive integer.".format(seed)) + + +def _write_args_to_tensorboard(neox_args): + + """Write arguments to tensorboard.""" + if neox_args.tensorboard_writer: + for arg_name in vars(neox_args): + neox_args.tensorboard_writer.add_text( + arg_name, str(getattr(neox_args, arg_name)) + ) diff --git a/megatron/learning_rates.py b/megatron/learning_rates.py new file mode 100644 index 0000000000000000000000000000000000000000..9db951aa0a21481f5a651dde47605d66e058f5df --- /dev/null +++ b/megatron/learning_rates.py @@ -0,0 +1,148 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Learning rate decay functions.""" + +import math + +from megatron import print_rank_0 + + +class AnnealingLR(object): + """Anneals the learning rate.""" + + def __init__( + self, + optimizer, + start_lr, + warmup_iter, + total_iters, + decay_style, + last_iter, + min_lr=0.0, + use_checkpoint_lr_scheduler=True, + override_lr_scheduler=False, + use_mup=False, + ): + + # Class values. + self.optimizer = optimizer + self.start_lr = start_lr + self.min_lr = min_lr + self.warmup_iter = warmup_iter + self.num_iters = last_iter + self.end_iter = total_iters + assert self.end_iter > 0 + self.decay_style = decay_style + self.override_lr_scheduler = override_lr_scheduler + self.use_checkpoint_lr_scheduler = use_checkpoint_lr_scheduler + self.use_mup = use_mup + if self.override_lr_scheduler: + assert not self.use_checkpoint_lr_scheduler, ( + "both override and " "use-checkpoint are set." + ) + # Set the learning rate + self.step(self.num_iters) + + print_rank_0("> learning rate decay style: {}".format(self.decay_style)) + + def get_lr(self): + """Learning rate decay functions from: + https://openreview.net/pdf?id=BJYwwY9ll pg. 4""" + + num_iters_ = self.num_iters + # Warmup. + if self.warmup_iter > 0 and self.num_iters <= self.warmup_iter: + return float(self.start_lr) * num_iters_ / self.warmup_iter + + num_iters_ = num_iters_ - self.warmup_iter + if self.decay_style == "linear": + end_iter_ = self.end_iter - self.warmup_iter + lr = self.start_lr * (end_iter_ - num_iters_) / end_iter_ + elif self.decay_style == "cosine": + end_iter_ = self.end_iter - self.warmup_iter + lr = self.min_lr + ( + (self.start_lr - self.min_lr) + / 2.0 + * (math.cos(math.pi * num_iters_ / end_iter_) + 1) + ) + elif self.decay_style == "exponential": + # exp(-0.693) = 1/2 + end_iter = self.end_iter - self.warmup_iter + lr = self.start_lr * math.exp(-0.693 * num_iters_ / end_iter) + else: + lr = self.start_lr + return max(lr, self.min_lr) + + def step(self, step_num=None): + """Set lr for all parameters groups.""" + if step_num is None: + step_num = self.num_iters + 1 + self.num_iters = step_num + new_lr = self.get_lr() + for group in self.optimizer.param_groups: + if self.use_mup and "width_mult" in group: + group["lr"] = new_lr / group["width_mult"] + else: + group["lr"] = new_lr + + def state_dict(self): + state_dict = { + "start_lr": self.start_lr, + "warmup_iter": self.warmup_iter, + "num_iters": self.num_iters, + "decay_style": self.decay_style, + "end_iter": self.end_iter, + "min_lr": self.min_lr, + } + return state_dict + + def _check_and_set(self, cls_value, sd_value, name): + """Auxiliary function for checking the values in the checkpoint and + setting them.""" + if self.override_lr_scheduler: + print_rank_0(" > overriding {} value to {}".format(name, cls_value)) + return cls_value + + if not self.use_checkpoint_lr_scheduler: + assert cls_value == sd_value, ( + "AnnealingLR: class input value" + "and checkpoint values for {} do not match".format(name) + ) + print_rank_0(" > using checkpoint value {} for {}".format(sd_value, name)) + return sd_value + + def load_state_dict(self, sd): + + self.start_lr = self._check_and_set( + self.start_lr, sd["start_lr"], "learning rate" + ) + self.min_lr = self._check_and_set( + self.min_lr, sd["min_lr"], "minimum learning rate" + ) + self.warmup_iter = self._check_and_set( + self.warmup_iter, sd["warmup_iter"], "warmup iterations" + ) + self.end_iter = self._check_and_set( + self.end_iter, sd["end_iter"], "total number of iterations" + ) + self.decay_style = self._check_and_set( + self.decay_style, sd["decay_style"], "decay style" + ) + + self.num_iters = sd["num_iters"] + self.step(self.num_iters) diff --git a/megatron/logging.py b/megatron/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..af8a41fe5d9b82109d499418840bebaacd9fbbac --- /dev/null +++ b/megatron/logging.py @@ -0,0 +1,456 @@ +# Copyright (c) 2024, EleutherAI. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +import torch + +try: + import wandb +except ModuleNotFoundError: + pass + +from megatron import mpu, print_rank_0 +from megatron.utils import report_memory +import math + + +class Tee: + """Duplicate output to both stdout/err and file""" + + def __init__(self, file, err: bool = False) -> None: + self.file = open(file, "w") + self.err = err + if not err: + self.std = sys.stdout + sys.stdout = self + else: + self.std = sys.stderr + sys.stderr = self + + def __del__(self) -> None: + if not self.err: + sys.stdout = self.std + else: + sys.stderr = self.std + self.file.close() + + def write(self, data) -> None: + try: + self.file.write(data) + except OSError: + pass + try: + self.std.write(data) + except OSError: + pass + + def flush(self) -> None: + try: + self.file.flush() + except OSError: + pass + + +def human_readable_flops(num) -> str: + for unit in [ + "", + "KFLOPS", + "MFLOPS", + "GFLOPS", + "TFLOPS", + "PFLOPS", + "EFLOPS", + "ZFLOPS", + ]: + if abs(num) < 1000.0: + return "%3.1f%s" % (num, unit) + num /= 1000.0 + return "%.1f%s" % (num, "Yi") + + +def get_flops(neox_args, iter_time_s) -> float: + """ + Use FLOPS calculation from Megatron-DeepSpeed: + https://github.com/microsoft/Megatron-DeepSpeed/blob/cc3a94c636789f74be2bc6cfc62a3d723fd5d749/megatron/utils.py#L253 + They get it from https://arxiv.org/pdf/2104.04473.pdf + """ + world_size = torch.distributed.get_world_size() + vocab_size = neox_args.padded_vocab_size + batch_size = neox_args.train_batch_size + seq_len = neox_args.seq_length + hidden_size = neox_args.hidden_size + num_layers = neox_args.num_layers + ckpt_activations_factor = 4 if neox_args.checkpoint_activations else 3 + if "rwkv" in neox_args.attention_config: + num_heads = neox_args.num_attention_heads + + flops_per_iteration = ( + batch_size + * seq_len + * ( + 78 * hidden_size * hidden_size * num_layers + + 84 * hidden_size * num_layers + + 16 * hidden_size + + 12 * hidden_size * vocab_size + + 18 * hidden_size * hidden_size * num_layers / num_heads + ) + ) + elif "mamba" in neox_args.attention_config: + # from https://github.com/Zyphra/zcookbook/blob/main/calc/calc_mamba_flops.py + if neox_args.expansion_factor: + d_inner = neox_args.hidden_size * neox_args.expansion_factor + elif neox_args.intermediate_size: + d_inner = neox_args.intermediate_size + else: + d_inner = neox_args.hidden_size * 2 # default expansion factor + d_state = 16 # TODO make d_state an arg. Currently hardcoded in neox mamba definition and here + conv_dimension = 4 # TODO make conv_dimension an arg. Currently hardcoded in neox mamba definition and here + dt_rank = math.ceil(neox_args.hidden_size / 16) + ssm_flops = ( + ckpt_activations_factor + * d_inner + * seq_len + * batch_size + * (11 * d_state + 4 * dt_rank + 1) + ) + mamba_projectors_flops = ( + ckpt_activations_factor * seq_len * batch_size * 6 * d_inner * hidden_size + ) + mamba_conv_flops = ( + ckpt_activations_factor + * seq_len + * batch_size + * 2 + * d_inner + * conv_dimension + ) + mamba_flops = ssm_flops + mamba_projectors_flops + mamba_conv_flops + embedding_flops = 6 * seq_len * batch_size * hidden_size * vocab_size + flops_per_iteration = mamba_flops * num_layers + embedding_flops + else: + flops_per_iteration = ( + 24 + * ckpt_activations_factor + * batch_size + * seq_len + * num_layers + * (hidden_size**2) + * ( + 1.0 + + (seq_len / (6.0 * hidden_size)) + + (vocab_size / (16.0 * num_layers * hidden_size)) + ) + ) + return flops_per_iteration / (iter_time_s * world_size) + + +def training_log( + neox_args, + timers, + loss_dict, + total_loss_dict, + learning_rate, + iteration, + loss_scale, + report_memory_flag, + skipped_iter, + model, + optimizer, + noise_scale_logger, +): + """Log training information such as losses, timing, etc.""" + + # Update losses. + skipped_iters_key = "skipped iterations" + total_loss_dict[skipped_iters_key] = ( + total_loss_dict.get(skipped_iters_key, 0) + skipped_iter + ) + got_nan_key = "got nan" + + got_nan = False + for key in loss_dict: + if not skipped_iter: + total_loss_dict[key] = total_loss_dict.get(key, 0.0) + loss_dict[key] + else: + value = loss_dict[key].float().sum().item() + is_nan = value == float("inf") or value == -float("inf") or value != value + got_nan = got_nan or is_nan + + total_loss_dict[got_nan_key] = total_loss_dict.get(got_nan_key, 0) + int(got_nan) + + # Logging. + timers_to_log = [] + + def add_to_logging(name): + if name in timers.timers: + timers_to_log.append(name) + + if not neox_args.is_pipe_parallel: + add_to_logging("forward") + add_to_logging("backward") + add_to_logging("backward-backward") + add_to_logging("backward-allreduce") + add_to_logging("backward-master-grad") + add_to_logging("backward-clip-grad") + add_to_logging("optimizer") + add_to_logging("batch generator") + + # Log timer info to tensorboard and wandb + normalizer = iteration % neox_args.log_interval + if normalizer == 0: + normalizer = neox_args.log_interval + if torch.distributed.get_rank() == 0: + timers.write( + names=timers_to_log, iteration=iteration, normalizer=normalizer + ) + else: + # with pipeline parallel, the megatron timers are overridden by the deepspeed ones. + # Try to grab timer values from model engine. Only recently added to deeperspeed, so check that the engine + # has that attribute first + if hasattr(model, "timer_values") and model.timer_values is not None: + if ( + model.wall_clock_breakdown() + and model.global_steps % model.steps_per_print() == 0 + ): + timer_values = model.timer_values + # deepspeed already logs to tensorboard / prints values, so just log to wandb + if neox_args.use_wandb and torch.distributed.get_rank() == 0: + for key in timer_values: + tb_wandb_log( + f"timers/{key}", + timer_values[key], + iteration, + use_wandb=neox_args.use_wandb, + tensorboard_writer=neox_args.tensorboard_writer, + comet_experiment=neox_args.comet_experiment, + ) + + # write losses, lr, etc. every step + tb_wandb_log( + "train/learning_rate", + learning_rate, + iteration, + use_wandb=neox_args.use_wandb, + tensorboard_writer=neox_args.tensorboard_writer, + comet_experiment=neox_args.comet_experiment, + ) + for key in loss_dict: + tb_wandb_log( + f'train/{key.replace(" ", "_")}', + loss_dict[key], + iteration, + use_wandb=neox_args.use_wandb, + tensorboard_writer=neox_args.tensorboard_writer, + comet_experiment=neox_args.comet_experiment, + ) + if neox_args.fp16: + tb_wandb_log( + f"train/loss_scale", + loss_scale, + iteration, + use_wandb=neox_args.use_wandb, + tensorboard_writer=neox_args.tensorboard_writer, + comet_experiment=neox_args.comet_experiment, + ) + + # log gradient noise scale + if neox_args.log_gradient_noise_scale: + if noise_scale_logger.noise_scale is not None: + tb_wandb_log( + f"train/noise_scale", + noise_scale_logger.noise_scale, + iteration, + use_wandb=neox_args.use_wandb, + tensorboard_writer=neox_args.tensorboard_writer, + comet_experiment=neox_args.comet_experiment, + ) + + # (optional) Log optimizer states to wandb / tb every step + if neox_args.log_optimizer_states: + for k, v in optimizer.state_dict()["optimizer_state_dict"]["state"].items(): + for ki, vi in v.items(): # step, module + if ki != "step": + opt_state_norm = torch.norm(vi) if hasattr(vi, "dim") else vi + tb_wandb_log( + f"optimizer_state_norms/{k}_{ki}", + opt_state_norm, + iteration, + use_wandb=neox_args.use_wandb, + tensorboard_writer=neox_args.tensorboard_writer, + comet_experiment=neox_args.comet_experiment, + ) + + # (optional) Log grad/param norms to wandb / tb every step + if ( + neox_args.log_grad_pct_zeros + or neox_args.log_grad_norm + or neox_args.log_param_norm + ): + if neox_args.log_grad_pct_zeros or neox_args.log_grad_norm: + model.store_gradients = True # start storing gradients + + for i, (name, param) in enumerate(model.module.named_parameters()): + if neox_args.log_grad_pct_zeros: + if ( + hasattr(model, "stored_gradients") + and model.stored_gradients is not None + ): + grad = model.stored_gradients[i] + if grad is not None: + tb_wandb_log( + f"pct_grad_zeros/{name}", + (grad == 0).float().mean().item() * 100, + iteration, + use_wandb=neox_args.use_wandb, + tensorboard_writer=neox_args.tensorboard_writer, + comet_experiment=neox_args.comet_experiment, + all_ranks=True, + ) + if neox_args.log_grad_norm: + if ( + hasattr(model, "stored_gradients") + and model.stored_gradients is not None + ): + grad = model.stored_gradients[i] + if grad is not None: + tb_wandb_log( + f"gradient_norms/{name}", + torch.norm(grad), + iteration, + use_wandb=neox_args.use_wandb, + tensorboard_writer=neox_args.tensorboard_writer, + comet_experiment=neox_args.comet_experiment, + all_ranks=True, + ) + if neox_args.log_param_norm: + tb_wandb_log( + f"parameter_norms/{name}", + torch.norm(param), + iteration, + use_wandb=neox_args.use_wandb, + tensorboard_writer=neox_args.tensorboard_writer, + comet_experiment=neox_args.comet_experiment, + all_ranks=True, + ) + + if iteration % neox_args.log_interval == 0: + # log other stuff every neox_args.log_interval iters + elapsed_time = timers("interval time").elapsed() + iteration_time = elapsed_time / neox_args.log_interval + samples_per_sec = neox_args.train_batch_size / iteration_time + log_string = " samples/sec: {:.3f} |".format(samples_per_sec) + tb_wandb_log( + "runtime/samples_per_sec", + samples_per_sec, + iteration, + use_wandb=neox_args.use_wandb, + tensorboard_writer=neox_args.tensorboard_writer, + comet_experiment=neox_args.comet_experiment, + ) + tb_wandb_log( + "runtime/iteration_time", + iteration_time, + iteration, + use_wandb=neox_args.use_wandb, + tensorboard_writer=neox_args.tensorboard_writer, + comet_experiment=neox_args.comet_experiment, + ) + log_string += " iteration {:8d}/{:8d} |".format( + iteration, neox_args.train_iters + ) + log_string += " elapsed time per iteration (ms): {:.1f} |".format( + elapsed_time * 1000.0 / neox_args.log_interval + ) + log_string += " learning rate: {:.3E} |".format(learning_rate) + num_iterations = max( + 1, neox_args.log_interval - total_loss_dict[skipped_iters_key] + ) + + # log curriculum learning + if neox_args.curriculum_learning: + tb_wandb_log( + "curriculum_seqlen", + neox_args.curriculum_seqlen, + iteration, + use_wandb=neox_args.use_wandb, + tensorboard_writer=neox_args.tensorboard_writer, + comet_experiment=neox_args.comet_experiment, + ) + + # log tflop / gpu + flops_per_s_per_gpu = get_flops(neox_args, iteration_time) + + log_string += ( + f" approx flops per GPU: {human_readable_flops(flops_per_s_per_gpu)} |" + ) + tb_wandb_log( + "runtime/flops_per_sec_per_gpu", + flops_per_s_per_gpu, + iteration, + use_wandb=neox_args.use_wandb, + tensorboard_writer=neox_args.tensorboard_writer, + comet_experiment=neox_args.comet_experiment, + ) + + for key in total_loss_dict: + if key not in [skipped_iters_key, got_nan_key]: + v = ( + total_loss_dict[key].item() + if hasattr(total_loss_dict[key], "item") + else total_loss_dict[key] + ) + avg = v / float(num_iterations) + log_string += " {}: {:.6E} |".format(key, avg) + total_loss_dict[key] = 0.0 + if neox_args.precision == "fp16": + log_string += " loss scale: {:.1f} |".format(loss_scale) + log_string += " number of skipped iterations: {:3d} |".format( + total_loss_dict[skipped_iters_key] + ) + log_string += " number of nan iterations: {:3d} |".format( + total_loss_dict[got_nan_key] + ) + total_loss_dict[skipped_iters_key] = 0 + total_loss_dict[got_nan_key] = 0 + print_rank_0(log_string) + if report_memory_flag: + report_memory("after {} iterations".format(iteration)) + report_memory_flag = False + + timers.log(timers_to_log, normalizer=neox_args.log_interval) + + return report_memory_flag + + +def tb_wandb_log( + key: str, + value: float, + iteration_no: int, + use_wandb: bool, + tensorboard_writer=None, + comet_experiment=None, + all_ranks: bool = False, +): + # logs to both tb and wandb (if present) from the zeroth rank + do_log = torch.distributed.get_rank() == 0 or all_ranks + if do_log and value is not None: + if tensorboard_writer: + tensorboard_writer.add_scalar(key, value, iteration_no) + if use_wandb: + wandb.log({key: value}, step=iteration_no) + if comet_experiment: + comet_experiment.__internal_api__log_metric__( + key, value, framework="gpt-neox", step=iteration_no + ) diff --git a/megatron/model/__init__.py b/megatron/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..23be2893650fbd49c88918477dc9db34e25f64ec --- /dev/null +++ b/megatron/model/__init__.py @@ -0,0 +1,23 @@ +# +# Copyright 2024 Biderman et al. This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .gpt2_model import GPT2ModelPipe +from .utils import ( + get_params_for_weight_decay_optimization, + mark_norms_for_sequence_parallel_grad_sync, +) +from .word_embeddings import SoftEmbedding diff --git a/megatron/model/activations.py b/megatron/model/activations.py new file mode 100644 index 0000000000000000000000000000000000000000..c0b8252616f98d5ba0fdab82165d34f8557e4097 --- /dev/null +++ b/megatron/model/activations.py @@ -0,0 +1,135 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn.functional as F + +torch._C._jit_set_profiling_mode(False) +torch._C._jit_set_profiling_executor(False) +torch._C._jit_override_can_fuse_on_cpu(True) +torch._C._jit_override_can_fuse_on_gpu(True) + + +def get_activation(neox_args): + """retrieves the activation function specified in neox_args and whether or not the activation is gated""" + is_gated = False + if neox_args.activation == "geglu": + is_gated = True + activation_func = F.gelu + elif neox_args.activation == "reglu": + is_gated = True + activation_func = F.relu + elif neox_args.activation == "bilinear": + is_gated = True + activation_func = lambda x: x + elif neox_args.activation == "swiglu": + is_gated = True + activation_func = swish + elif neox_args.activation == "glu": + is_gated = True + activation_func = F.sigmoid + elif neox_args.activation == "gelu": + if neox_args.onnx_safe and neox_args.bias_gelu_fusion: + raise ValueError("onnx_safe + bias_gelu_fusion not compatible") + if neox_args.onnx_safe: + activation_func = erf_gelu + elif neox_args.bias_gelu_fusion: + activation_func = bias_gelu_impl + else: + activation_func = F.gelu + elif neox_args.activation == "relu": + activation_func = F.relu + elif neox_args.activation == "softsign": + activation_func = F.softsign + elif neox_args.activation == "swish": + activation_func = swish + elif neox_args.activation == "mish": + activation_func = mish + elif neox_args.activation == "silu": + activation_func = F.silu + else: + raise ValueError(f"Activation function {neox_args.activation} not recognized") + return activation_func, is_gated + + +###### BIAS GELU FUSION/ NO AUTOGRAD ################ +# 1/sqrt(2*pi)-> 0.3989423 +# 1/sqrt(2) -> 0.70710678 +# sqrt(2/pi) -> 0.79788456 +# this function is tanh approximation of gelu +# actual gelu is: +# x * 0.5 * (1.0 + torch.erf(x * 0.70710678)) + + +@torch.jit.script +def bias_gelu(bias, y): + x = bias + y + return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))) + + +# gradient of tanh approximation of gelu +# gradient of actual gelu is: +# 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x) +@torch.jit.script +def bias_gelu_back(g, bias, y): + x = bias + y + tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)) + # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243 + ff = 0.5 * x * ( + (1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x) + ) + 0.5 * (1 + tanh_out) + return ff * g + + +class GeLUFunction(torch.autograd.Function): + @staticmethod + # bias is an optional argument + def forward(ctx, input, bias): + ctx.save_for_backward(input, bias) + return bias_gelu(bias, input) + + @staticmethod + def backward(ctx, grad_output): + input, bias = ctx.saved_tensors + tmp = bias_gelu_back(grad_output, bias, input) + return tmp, tmp + + +bias_gelu_impl = GeLUFunction.apply + + +# This is actually Python equivalent of torch.nn.functional.gelu(), also with type hints for ONNX exporter +@torch.jit.script +def erf_gelu(x): + return ( + x + * 0.5 + * ( + torch.erf(x / 1.41421).to(dtype=x.dtype) + + torch.ones_like(x).to(dtype=x.dtype) + ) + ) + + +@torch.jit.script +def swish(x, beta: float = 1.0): + return x * torch.sigmoid(beta * x) + + +@torch.jit.script +def mish(x): + return x * torch.tanh(F.softplus(x)) diff --git a/megatron/model/fused_bias_dropout.py b/megatron/model/fused_bias_dropout.py new file mode 100644 index 0000000000000000000000000000000000000000..8618a2a7ee9c730d7d423b313d9a96b0548e3f03 --- /dev/null +++ b/megatron/model/fused_bias_dropout.py @@ -0,0 +1,55 @@ +# Copyright (c) 2024, EleutherAI contributors +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn.functional as F +from typing import Optional +from torch import Tensor + +# flags required to enable jit fusion kernels +torch._C._jit_set_profiling_mode(False) +torch._C._jit_set_profiling_executor(False) +torch._C._jit_override_can_fuse_on_cpu(True) +torch._C._jit_override_can_fuse_on_gpu(True) + + +def bias_dropout_add( + x: Tensor, bias: Tensor, residual: Optional[Tensor], prob: float, training: bool +) -> Tensor: + out = torch.nn.functional.dropout(x + bias, p=prob, training=training) + if residual is not None: + out = residual + out + return out + + +def get_bias_dropout_add(training): + def _bias_dropout_add(x, bias, residual, prob): + return bias_dropout_add(x, bias, residual, prob, training) + + return _bias_dropout_add + + +@torch.jit.script +def bias_dropout_add_fused_train( + x: Tensor, bias: Tensor, residual: Optional[Tensor], prob: float +) -> Tensor: + return bias_dropout_add(x, bias, residual, prob, True) + + +@torch.jit.script +def bias_dropout_add_fused_inference( + x: Tensor, bias: Tensor, residual: Optional[Tensor], prob: float +) -> Tensor: + return bias_dropout_add(x, bias, residual, prob, False) diff --git a/megatron/model/fused_layer_norm.py b/megatron/model/fused_layer_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..3fd251147d9bc6bf7b137c81e36007e9a8155118 --- /dev/null +++ b/megatron/model/fused_layer_norm.py @@ -0,0 +1,262 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""This code is copied from NVIDIA apex: + https://github.com/NVIDIA/apex + with some changes. """ + +import numbers +import torch +from torch.nn.parameter import Parameter +from torch.nn import init +import importlib +from torch.nn import functional as F +import inspect + +from megatron.utils import make_viewless_tensor + +try: + from apex.contrib.layer_norm.layer_norm import FastLayerNormFN + + HAVE_PERSIST_LAYER_NORM = True +except: + HAVE_PERSIST_LAYER_NORM = False + +from apex.normalization.fused_layer_norm import ( + FusedLayerNormAffineFunction, + FusedRMSNormAffineFunction, +) + + +global fused_layer_norm_cuda +fused_layer_norm_cuda = None + + +class MixedFusedLayerNorm(torch.nn.Module): + def __init__( + self, + normalized_shape, + eps=1e-5, + no_persist_layer_norm=True, + sequence_parallel=False, + apply_layernorm_1p=False, + mem_efficient_ln=True, + ): + super(MixedFusedLayerNorm, self).__init__() + + self.apply_layernorm_1p = apply_layernorm_1p + self.mem_efficient_ln = mem_efficient_ln + + global fused_layer_norm_cuda + fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda") + + # List of hiddens sizes supported in the persistent layer norm kernel + # If the hidden size is not supported, fall back to the non-persistent + # kernel. + persist_ln_hidden_sizes = [ + 1024, + 1536, + 2048, + 2304, + 3072, + 3840, + 4096, + 5120, + 6144, + 8192, + 10240, + 12288, + 12800, + 15360, + 16384, + 18432, + 20480, + 24576, + 25600, + 30720, + 32768, + 40960, + 49152, + 65536, + ] + if ( + normalized_shape not in persist_ln_hidden_sizes + or not HAVE_PERSIST_LAYER_NORM + ): + no_persist_layer_norm = True + + if isinstance(normalized_shape, numbers.Integral): + normalized_shape = (normalized_shape,) + self.normalized_shape = torch.Size(normalized_shape) + self.eps = eps + self.weight = Parameter(torch.Tensor(*normalized_shape)) + self.bias = Parameter(torch.Tensor(*normalized_shape)) + self.reset_parameters() + self.no_persist_layer_norm = no_persist_layer_norm + self.sequence_parallel = sequence_parallel + + # set sequence parallelism flag on weight and bias parameters + setattr(self.weight, "sequence_parallel", self.sequence_parallel) + setattr(self.bias, "sequence_parallel", self.sequence_parallel) + + def reset_parameters(self): + + if self.apply_layernorm_1p: + init.zeros_(self.weight) + init.zeros_(self.bias) + else: + init.ones_(self.weight) + init.zeros_(self.bias) + + def forward(self, input): + + weight = self.weight + 1 if self.apply_layernorm_1p else self.weight + # CPU path is here for unittest sake. + if not input.is_cuda: + print( + "WARNING! The input of FusedLayerNorm should be on the GPU." + "This warning should only be triggered in the FusedLayerNorm unit tests." + ) + return F.layer_norm( + input, self.normalized_shape, weight, self.bias, self.eps + ) + + if self.no_persist_layer_norm: + # Apex does not have versions yet (https://github.com/NVIDIA/apex/pull/1648), so we need to inspect + # the function manually on whether the extra arg introduced in https://github.com/NVIDIA/apex/pull/1715 exists yet + if ( + "memory_efficient" + in inspect.getfullargspec(FusedLayerNormAffineFunction.forward).args + ): + return FusedLayerNormAffineFunction.apply( + input, + weight, + self.bias, + self.normalized_shape, + self.eps, + self.mem_efficient_ln, + ) + else: + return FusedLayerNormAffineFunction.apply( + input, weight, self.bias, self.normalized_shape, self.eps + ) + else: + output = FastLayerNormFN.apply(input, weight, self.bias, self.eps) + + # Apex's fast layer norm function outputs a 'view' tensor (i.e., has + # a populated '_base' field). This will result in schedule.py's + # deallocate_output_tensor() throwing an error, so a viewless tensor is + # created to prevent this. + output = make_viewless_tensor( + inp=output, requires_grad=input.requires_grad, keep_graph=True + ) + + return output + + +class MixedFusedRMSNorm(torch.nn.Module): + def __init__( + self, + normalized_shape, + eps=1e-5, + no_persist_layer_norm=True, + sequence_parallel=False, + apply_rmsnorm_1p=False, + mem_efficient_rms=True, + ): + super(MixedFusedRMSNorm, self).__init__() + + self.apply_rmsnorm_1p = apply_rmsnorm_1p + self.mem_efficient_rms = mem_efficient_rms + self.norm_fn = FusedRMSNormAffineFunction + + global fused_layer_norm_cuda + fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda") + + # List of hiddens sizes supported in the persistent layer norm kernel + # If the hidden size is not supported, fall back to the non-persistent + # kernel. + persist_ln_hidden_sizes = [ + 1024, + 1536, + 2048, + 2304, + 3072, + 3840, + 4096, + 5120, + 6144, + 8192, + 10240, + 12288, + 12800, + 15360, + 16384, + 18432, + 20480, + 24576, + 25600, + 30720, + 32768, + 40960, + 49152, + 65536, + ] + if ( + normalized_shape not in persist_ln_hidden_sizes + or not HAVE_PERSIST_LAYER_NORM + ): + no_persist_layer_norm = True + + if isinstance(normalized_shape, numbers.Integral): + normalized_shape = (normalized_shape,) + self.normalized_shape = torch.Size(normalized_shape) + self.eps = eps + self.scale = Parameter(torch.Tensor(*normalized_shape)) + self.reset_parameters() + self.no_persist_layer_norm = no_persist_layer_norm + self.sequence_parallel = sequence_parallel + + # set sequence parallelism flag on weight and bias parameters + setattr(self.scale, "sequence_parallel", self.sequence_parallel) + + def reset_parameters(self): + + if self.apply_rmsnorm_1p: + init.zeros_(self.scale) + else: + init.ones_(self.scale) + + def forward(self, input): + + weight = self.scale + 1 if self.apply_rmsnorm_1p else self.scale + # CPU path is here for unittest sake. + if not input.is_cuda: + print( + "WARNING! The input of FusedLayerNorm should be on the GPU." + "This warning should only be triggered in the FusedRMSNorm unit tests." + ) + # Latest pytorch actually supports F.rms_norm but I don't want to break builds so... + return F.layer_norm(input, self.normalized_shape, weight, None, self.eps) + + # Apex does not have versions yet (https://github.com/NVIDIA/apex/pull/1648), so we need to inspect + # the function manually on whether the extra arg introduced in https://github.com/NVIDIA/apex/pull/1715 exists yet + if "memory_efficient" in inspect.getfullargspec(self.norm_fn.forward).args: + return self.norm_fn.apply( + input, + weight, + self.normalized_shape, + self.eps, + self.mem_efficient_rms, + ) + else: + return self.norm_fn.apply(input, weight, self.normalized_shape, self.eps) + + # Apex's fast layer norm function outputs a 'view' tensor (i.e., has + # a populated '_base' field). This will result in schedule.py's + # deallocate_output_tensor() throwing an error, so a viewless tensor is + # created to prevent this. + output = make_viewless_tensor( + inp=output, requires_grad=input.requires_grad, keep_graph=True + ) + + return output diff --git a/megatron/model/fused_rope.py b/megatron/model/fused_rope.py new file mode 100644 index 0000000000000000000000000000000000000000..94e96253de64a46f232f828211af2a106e804474 --- /dev/null +++ b/megatron/model/fused_rope.py @@ -0,0 +1,141 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple, Union +import torch + + +class FusedRoPEFunc(torch.autograd.Function): + """ + Fused RoPE function + + This implementation assumes the input tensor to be in `sbhd` format and the RoPE tensor to be + of shape (s, 1, 1, d). It accepts arbitrary memory layouts to avoid the expensive + `.contiguous()` calls, thus it may not achieve the best memory access pattern. + """ + + @staticmethod + def forward( + ctx, + t: torch.Tensor, + freqs: torch.Tensor, + transpose_output_memory: bool = False, + ) -> torch.Tensor: + import fused_rotary_positional_embedding + + output = fused_rotary_positional_embedding.forward( + t, freqs, transpose_output_memory + ) + ctx.save_for_backward(freqs) + ctx.transpose_output_memory = transpose_output_memory + + return output + + @staticmethod + def backward( + ctx, grad_output: torch.Tensor + ) -> Tuple[Union[torch.Tensor, None], ...]: + import fused_rotary_positional_embedding + + (freqs,) = ctx.saved_tensors + grad_input = fused_rotary_positional_embedding.backward( + grad_output, freqs, ctx.transpose_output_memory + ) + + return grad_input, None, None + + +def fused_apply_rotary_pos_emb( + t: torch.Tensor, + freqs: torch.Tensor, + transpose_output_memory: bool = False, +) -> torch.Tensor: + """Apply rotary positional embedding to input tensor T. + + Args: + t (Tensor): Input tensor T is of shape [s, b, h, d] + freqs (Tensor): Rotary Positional embedding tensor freq is of shape [s, 1, 1, d] and + `float` dtype + transpose_output_memory (bool): Default to False. Whether to transpose the 's' and 'b' + dimension of the output's underlying memory format. This is very helpful when you want to + get a contiguous tensor after calling `output.transpose(0, 1)`. + + Returns: + Tensor: The input tensor after applying RoPE + """ + return FusedRoPEFunc.apply(t, freqs, transpose_output_memory) + + +class FusedRoPECachedFunc(torch.autograd.Function): + """ + Fused RoPE function + + This implementation assumes the input tensor to be in `sbhd` format and the RoPE tensor to be + of shape (s, 1, 1, d). It accepts arbitrary memory layouts to avoid the expensive + `.contiguous()` calls, thus it may not achieve the best memory access pattern. + """ + + @staticmethod + def forward( + ctx, + t: torch.Tensor, + cos_: torch.Tensor, + sin_: torch.Tensor, + transpose_output_memory: bool = False, + ) -> torch.Tensor: + import fused_rotary_positional_embedding + + output = fused_rotary_positional_embedding.forward_cached( + t, cos_, sin_, transpose_output_memory + ) + ctx.save_for_backward(cos_, sin_) + ctx.transpose_output_memory = transpose_output_memory + + return output + + @staticmethod + def backward( + ctx, grad_output: torch.Tensor + ) -> Tuple[Union[torch.Tensor, None], ...]: + import fused_rotary_positional_embedding + + cos_, sin_ = ctx.saved_tensors + grad_input = fused_rotary_positional_embedding.backward_cached( + grad_output, cos_, sin_, ctx.transpose_output_memory + ) + + return grad_input, None, None, None + + +def fused_apply_rotary_pos_emb_cached( + t: torch.Tensor, + cos_: torch.Tensor, + sin_: torch.Tensor, + transpose_output_memory: bool = False, +) -> torch.Tensor: + """Apply rotary positional embedding to input tensor T. + + Args: + t (Tensor): Input tensor T is of shape [s, b, h, d] + cos_ (Tensor): Cached cosine of the rotary positional embedding tensor is of + shape [s, 1, 1, d] and dtype either `float` or the same as `t`. + sin_ (Tensor): Cached sine of the rotary positional embedding tensor is of + shape [s, 1, 1, d] and dtype either `float` or the same as `t`. + transpose_output_memory (bool): Default to False. Whether to transpose the 's' and 'b' + dimension of the output's underlying memory format. This is very helpful when you want to + get a contiguous tensor after calling `output.transpose(0, 1)`. + + Returns: + Tensor: The input tensor after applying RoPE + """ + return FusedRoPECachedFunc.apply(t, cos_, sin_, transpose_output_memory) diff --git a/megatron/model/fused_softmax.py b/megatron/model/fused_softmax.py new file mode 100644 index 0000000000000000000000000000000000000000..bce2e1992fbcf05b03d247f098d5cbd1b5770b85 --- /dev/null +++ b/megatron/model/fused_softmax.py @@ -0,0 +1,205 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +import enum +from ..fused_kernels import load_fused_kernels + + +class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function): + """ + Fused operation which performs following three operations in sequence + 1. Scale the tensor. + 2. Apply upper triangular mask (typically used in gpt models). + 3. Perform softmax. + """ + + @staticmethod + def forward(ctx, inputs, scale): + import scaled_upper_triang_masked_softmax_cuda + + scale_t = torch.tensor([scale]) + + softmax_results = scaled_upper_triang_masked_softmax_cuda.forward( + inputs, scale_t[0] + ) + ctx.save_for_backward(softmax_results, scale_t) + return softmax_results + + @staticmethod + def backward(ctx, output_grads): + import scaled_upper_triang_masked_softmax_cuda + + softmax_results, scale_t = ctx.saved_tensors + + input_grads = scaled_upper_triang_masked_softmax_cuda.backward( + output_grads, softmax_results, scale_t[0] + ) + return input_grads, None + + +class ScaledMaskedSoftmax(torch.autograd.Function): + """ + Fused operation which performs following three operations in sequence + 1. Scale the tensor. + 2. Apply the mask. + 3. Perform softmax. + """ + + @staticmethod + def forward(ctx, inputs, mask, scale): + import scaled_masked_softmax_cuda + + scale_t = torch.tensor([scale]) + + softmax_results = scaled_masked_softmax_cuda.forward(inputs, mask, scale_t[0]) + ctx.save_for_backward(softmax_results, scale_t) + return softmax_results + + @staticmethod + def backward(ctx, output_grads): + import scaled_masked_softmax_cuda + + softmax_results, scale_t = ctx.saved_tensors + + input_grads = scaled_masked_softmax_cuda.backward( + output_grads, softmax_results, scale_t[0] + ) + return input_grads, None, None + + +class SoftmaxFusionTypes(enum.Enum): + upper_triang = 1 # causal mask + general = 2 # general mask + none = 3 # no fusion + + +class FusedScaleMaskSoftmax(nn.Module): + """ + fused operation: scaling + mask + softmax + Arguments: + input_in_fp16: flag to indicate if input in fp16 data format. + input_in_bf16: flag to indicate if input in bf16 data format. + fusion_type: type of fusion to perform, should be either upper_triang, general or none. None will perform a regular torch softmax. + mask_func: mask function to be applied. + softmax_in_fp32: if true, softmax in performed at fp32 precision. + scale: scaling factor used in input tensor scaling. + + """ + + def __init__( + self, + input_in_fp16, + input_in_bf16, + fusion_type, + mask_func, + softmax_in_fp32, + scale, + ): + super().__init__() + self.input_in_fp16 = input_in_fp16 + self.input_in_bf16 = input_in_bf16 + self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16 + + assert fusion_type in [ + SoftmaxFusionTypes.upper_triang, + SoftmaxFusionTypes.general, + SoftmaxFusionTypes.none, + ], f"Invalid fusion type {fusion_type}" + + if fusion_type != SoftmaxFusionTypes.none: + load_fused_kernels() # check fused kernels are installed + + self.upper_triang_mask_fusion = fusion_type == SoftmaxFusionTypes.upper_triang + self.general_mask_fusion = fusion_type == SoftmaxFusionTypes.general + self.fusion = fusion_type != SoftmaxFusionTypes.none + + self.mask_func = mask_func + self.softmax_in_fp32 = softmax_in_fp32 + self.scale = scale + + assert ( + self.scale is None or softmax_in_fp32 + ), "softmax should be in fp32 when scaled" + + def forward(self, input, mask): + # [b, np, sq, sk] + assert input.dim() == 4 + if self.is_kernel_available(mask, *input.size()): + return self.forward_fused_softmax(input, mask) + else: + return self.forward_torch_softmax(input, mask) + + def is_kernel_available(self, mask, b, np, sq, sk): + attn_batches = b * np + + if ( + self.fusion # user wants to fuse + and self.input_in_float16 # input must be fp16 + and mask is not None # mask tensor must not be None + and 16 < sk <= 2048 # sk must be 16 ~ 2048 + and sq % 4 == 0 # sq must be divisor of 4 + and attn_batches % 4 == 0 # np * b must be divisor of 4 + ): + if 0 <= sk <= 2048: + batch_per_block = self.get_batch_per_block(sq, sk, b, np) + + if self.upper_triang_mask_fusion: + if attn_batches % batch_per_block == 0: + return True + else: + if sq % batch_per_block == 0: + return True + return False + + def forward_fused_softmax(self, input, mask): + b, np, sq, sk = input.size() + scale = self.scale if self.scale is not None else 1.0 + if self.upper_triang_mask_fusion: + assert sq == sk, "causal mask is only for self attention" + + # input is 3D tensor (attn_batches, sq, sk) + input = input.view(-1, sq, sk) + probs = ScaledUpperTriangMaskedSoftmax.apply(input, scale) + return probs.view(b, np, sq, sk) + else: + # input is 4D tensor (b, np, sq, sk) + return ScaledMaskedSoftmax.apply(input, mask, scale) + + def forward_torch_softmax(self, input, mask): + if self.input_in_float16 and self.softmax_in_fp32: + input = input.float() + + if self.scale is not None: + input = input * self.scale + mask_output = self.mask_func(input, mask) if mask is not None else input + probs = torch.nn.Softmax(dim=-1)(mask_output) + + if self.input_in_float16 and self.softmax_in_fp32: + if self.input_in_fp16: + probs = probs.half() + else: + probs = probs.bfloat16() + + return probs + + @staticmethod + def get_batch_per_block(sq, sk, b, np): + import scaled_masked_softmax_cuda + + return scaled_masked_softmax_cuda.get_batch_per_block(sq, sk, b, np) diff --git a/megatron/model/gmlp.py b/megatron/model/gmlp.py new file mode 100644 index 0000000000000000000000000000000000000000..6400640bd8c65bf5831df6e1d3571ff0964d8a7c --- /dev/null +++ b/megatron/model/gmlp.py @@ -0,0 +1,141 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from megatron.model.fused_softmax import FusedScaleMaskSoftmax +from megatron.model.activations import get_activation +from megatron.model.norms import get_norm +from megatron.model.utils import get_fusion_type + +from megatron import mpu + + +class TinyAttention(nn.Module): + def __init__(self, neox_args, d_attn, d_ff, mask_fn): + super().__init__() + self.proj_qkv = nn.Linear(d_ff * 2, 3 * d_attn) + self.scale = d_attn**-0.5 + self.proj_ffn = nn.Linear(d_attn, d_ff) + self.softmax = FusedScaleMaskSoftmax( + input_in_fp16=neox_args.precision == "fp16", + input_in_bf16=neox_args.precision == "bfloat16", + fusion_type=get_fusion_type(neox_args), + mask_func=mask_fn, + softmax_in_fp32=neox_args.attention_softmax_in_fp32, + scale=None, + ) + + def forward(self, x, attention_mask): + q, k, v = torch.chunk(self.proj_qkv(x), 3, dim=-1) + w = torch.einsum("bnd,bmd->bnm", q, k).unsqueeze(1) * self.scale + a = self.softmax( + w, mask=attention_mask[..., : w.size(-2), : w.size(-1)] + ).squeeze(1) + x = torch.einsum("bnm,bmd->bnd", a, v) + return self.proj_ffn(x) + + +class SpatialGatingUnit(nn.Module): + def __init__(self, neox_args, d_ff, d_attn=None, causal=True, mask_fn=None): + super().__init__() + self.causal = causal + self.use_attn = d_attn is not None + + norm, eps = get_norm(neox_args) + self.norm = norm(d_ff, eps=eps) + self.proj = nn.Linear(neox_args.seq_length, neox_args.seq_length) + if self.use_attn: + assert mask_fn is not None + self.attn = TinyAttention( + neox_args=neox_args, d_attn=d_attn, d_ff=d_ff, mask_fn=mask_fn + ) + nn.init.zeros_(self.proj.weight) + nn.init.constant_(self.proj.bias, 1.0) + + def forward(self, x, attention_mask): + device, n = x.device, x.shape[1] + x = x.transpose(0, 1) # [s, b, d] -> [b, s, d] + + res, gate = x.chunk(2, dim=-1) # split along dim + gate = self.norm(gate) + + weight, bias = self.proj.weight, self.proj.bias + if self.causal: + weight, bias = weight[:n, :n], bias[:n] + mask = torch.ones(weight.shape[:2], device=device).triu_(1).bool() + weight = weight.masked_fill(mask, 0.0) + + gate = F.linear(gate.transpose(2, 1), weight, self.proj.bias).transpose(2, 1) + + if self.use_attn: + gate = gate + self.attn(x, attention_mask) + + return (gate * res).transpose(0, 1) # [b, s, d] -> [s, b, d] + + +class GMLPBlock(nn.Module): + def __init__( + self, + neox_args, + init_method, + output_layer_init_method, + layer_number, + ff_mult=4, + mask_fn=None, + ): + super().__init__() + self.layer_number = layer_number + + ff_dim = neox_args.hidden_size * ff_mult + norm, eps = get_norm(neox_args) + self.norm = norm(neox_args.hidden_size, eps=eps) + self.input_linear = mpu.ColumnParallelLinear( + neox_args=neox_args, + input_size=neox_args.hidden_size, + output_size=ff_dim * 2, + gather_output=False, + init_method=init_method, + skip_bias_add=True, + ) + self.activation_func, _ = get_activation(neox_args) + ff_dim_parallel = mpu.divide(ff_dim, mpu.get_model_parallel_world_size()) + if neox_args.attention_config[layer_number] == "amlp": + d_attn = neox_args.gmlp_attn_dim + else: + d_attn = None + self.sgu = SpatialGatingUnit( + neox_args, ff_dim_parallel, d_attn, causal=True, mask_fn=mask_fn + ) + self.output_linear = mpu.RowParallelLinear( + neox_args=neox_args, + input_size=ff_dim, + output_size=neox_args.hidden_size, + input_is_parallel=True, + init_method=output_layer_init_method, + skip_bias_add=True, + ) + + def forward(self, args): + assert len(args) == 2, "GMLPBlock expects 2 arguments" + x, attention_mask = args + x = self.norm(x) + x, _ = self.input_linear(x) + x = self.activation_func(x) + x = self.sgu(x, attention_mask) + x, _ = self.output_linear(x) + return x, attention_mask diff --git a/megatron/model/gpt2_model.py b/megatron/model/gpt2_model.py new file mode 100644 index 0000000000000000000000000000000000000000..7899048dbd4de834b819e1c0de3d378e357f1a59 --- /dev/null +++ b/megatron/model/gpt2_model.py @@ -0,0 +1,413 @@ +# Copyright (c) 2024 EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GPT-2 model.""" + +import math +import torch +import torch.nn as nn +from collections import defaultdict + +from functools import partial +from megatron.model.utils import Lambda, SequentialWrapper, recursive_setattr +from megatron.model.norms import get_norm +from megatron.model.init_functions import get_init_methods + +from megatron import mpu +from megatron.mpu import ParallelRelativePositionBias +from megatron.model.transformer import ( + ParallelTransformerLayerPipe, + NormPipe, + ParallelLinearPipe, + parallel_lm_logits, + ParallelLinear, +) +from megatron.model.gmlp import GMLPBlock +from megatron.model.rwkv.v6 import RWKVResidualLayerPipe +from megatron.model.mamba import ParallelMambaResidualLayerPipe +from megatron.model.word_embeddings import EmbeddingPipe, SoftEmbedding + +# Pipeline parallelism +from deepspeed.pipe import PipelineModule, LayerSpec, TiedLayerSpec +from typing import Union, List + + +def gpt2_attention_mask_func(attention_scores, ltor_mask): + mask_value = torch.finfo(attention_scores.dtype).min + # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. + # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` + mask_value = torch.tensor( + mask_value, dtype=attention_scores.dtype, device=attention_scores.device + ) + attention_scores.masked_fill_(ltor_mask, mask_value) + return attention_scores + + +def cross_entropy(output, labels, _fp16=False): + """From pretrain_gpt2:forward_step()""" + """ + if self.fp16_lm_cross_entropy: + assert output.dtype == torch.half + loss = mpu.vocab_parallel_cross_entropy(output, labels) + else: + loss = mpu.vocab_parallel_cross_entropy(output.float(), labels) + return loss + """ + labels, loss_mask = labels[0], labels[1] + if _fp16: + assert output.dtype == torch.half and loss_mask.dtype == torch.half + losses = mpu.vocab_parallel_cross_entropy(output.contiguous(), labels) + else: + losses = mpu.vocab_parallel_cross_entropy(output.float().contiguous(), labels) + loss_mask = loss_mask.view(-1) + loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum() + return loss + + +def _pre_transformer_block(args): + # data format change for hidden_states to avoid explicit tranposes : [b s h] --> [s b h] + assert len(args) == 2, "Incorrect number of arguments to _pre_transformer_block" + fn = lambda _args: (_args[0].transpose(0, 1).contiguous(), *_args[1:]) + return fn(args) + + +def _post_transformer_block(args): + # from (hidden_states, attention_mask) + # to (hidden_states.T) + assert len(args) == 2, "Incorrect number of arguments to _post_transformer_block" + fn = lambda _args: (_args[0].transpose(0, 1).contiguous()) + return fn(args) + + +class GPT2ModelPipe(PipelineModule, torch.nn.Module): + """GPT2Model adapted for pipeline parallelism. + + The largest change is flattening the GPTModel class so we can express it as a + sequence of layers including embedding, transformer layers, and output. + + :param neox_args: NeoX arguments object (configuration) + :param num_tokentypes: number of token types (TODO: deprecated, remove) + :param parallel_output: if true, don't gather the output logits, and calculate loss in parallel. Set to true by default in training for efficiency, but set to false for inference. + :param topology: deepspeed topology object specifying pipe / model parallelism topology. + :param use_cache: if true, cache key/value pairs for each layer in inference. + """ + + def __init__( + self, + neox_args, + num_tokentypes=0, + parallel_output=True, + topology=None, + use_cache=False, + ): + self.neox_args = neox_args + + self.use_cache = use_cache + self.parallel_output = parallel_output + self.hidden_size = self.neox_args.hidden_size + self.num_tokentypes = num_tokentypes + self.init_method, self.output_layer_init_method = get_init_methods( + self.neox_args + ) + self.__topology__ = topology + + self.specs = [] + self.init_specs() # initializes the layer specs (basically a fancy nn.Sequential) + + super().__init__( + layers=self.specs, + loss_fn=partial(cross_entropy, _fp16=self.neox_args.fp16_lm_cross_entropy), + topology=topology, + activation_checkpoint_interval=self.neox_args.checkpoint_num_layers + if self.neox_args.checkpoint_activations + else 0, + partition_method=neox_args.pipe_partition_method, + checkpointable_layers=[ + "GMLPBlock", + "ParallelTransformerLayerPipe", + "ParallelMambaResidualLayerPipe", + ], + ) + + def insert_layers( + self, layers: Union[nn.Module, nn.ModuleList, nn.Sequential, List], idx + ): + """ + inserts the layers in `layers` into the pipe model at `idx`. + """ + if isinstance(layers, nn.Module): + self.specs.insert(idx, layers) + elif any( + [isinstance(layers, nn.ModuleList), isinstance(layers, nn.Sequential)] + ): + self.specs[idx:idx] = layers + elif isinstance(layers, list): + assert all( + [hasattr(l, "__call__") for l in layers] + ), "all items in `layers` must be Callables" + self.specs[idx:idx] = layers + else: + raise ValueError( + f"layer passed into {self.__class__.__name__}.insert_layer() should be either an nn.Module, an nn.ModuleList, an nn.Sequential object, or a list of callables not a {type(layers)}" + ) + + # re-initialize parent class + super().__init__( + layers=self.specs, + loss_fn=self.loss_fn, + topology=self.__topology__, + activation_checkpoint_interval=self.activation_checkpoint_interval, + partition_method=self.neox_args.pipe_partition_method, + checkpointable_layers=[ + "GMLPBlock", + "ParallelTransformerLayerPipe", + "ParallelMambaResidualLayerPipe", + "RWKVResidualLayerPipe", + ], + ) + + def init_specs(self): + + weight_tying = not self.neox_args.no_weight_tying + self.specs = [] + + # Embedding layer + # input will be (input_ids, position_ids, attention_mask) + + if weight_tying: + self.specs.append( + TiedLayerSpec( + "embed", + EmbeddingPipe, + self.neox_args, + self.hidden_size, + self.neox_args.padded_vocab_size, + self.neox_args.max_position_embeddings, + self.neox_args.hidden_dropout, + self.init_method, + self.num_tokentypes, + tied_weight_attr="word_embeddings_weight", + ) + ) + else: + self.specs.append( + LayerSpec( + EmbeddingPipe, + self.neox_args, + self.hidden_size, + self.neox_args.padded_vocab_size, + self.neox_args.max_position_embeddings, + self.neox_args.hidden_dropout, + self.init_method, + self.num_tokentypes, + ) + ) + + # NB: the attention mask always needs to be the *last* item in the args when being passed from + # one stage to the next, because deepspeed is hacks on top of hacks. + # + # outputs are now (hidden_states, attention_mask) + + self.specs.append(_pre_transformer_block) + + # T5 RPE positional embedding + if self.neox_args.pos_emb == "rpe": + hidden_size_per_attention_head = mpu.divide( + self.neox_args.hidden_size, self.neox_args.num_attention_heads + ) + rpe_scale = math.sqrt(hidden_size_per_attention_head) + rpe_emb = ParallelRelativePositionBias( + neox_args=self.neox_args, + scale=rpe_scale, + causal=True, + num_buckets=self.neox_args.rpe_num_buckets, + max_distance=self.neox_args.rpe_max_distance, + heads=self.neox_args.num_attention_heads, + ) + + # Transformer layers + for i in range(self.neox_args.num_layers): + layer_type = self.neox_args.attention_config[i] + if layer_type in ["gmlp", "amlp"]: + self.specs.append( + LayerSpec( + GMLPBlock, + init_method=self.init_method, + layer_number=i, + output_layer_init_method=self.output_layer_init_method, + neox_args=self.neox_args, + mask_fn=gpt2_attention_mask_func, + ) + ) + elif layer_type == "rwkv": + self.specs.append( + LayerSpec( + RWKVResidualLayerPipe, + neox_args=self.neox_args, + layer_number=i, + ) + ) + elif layer_type in ["mamba"]: + self.specs.append( + LayerSpec( + ParallelMambaResidualLayerPipe, + neox_args=self.neox_args, + init_method=self.init_method, + output_layer_init_method=self.output_layer_init_method, + layer_number=i, + ) + ) + else: + self.specs.append( + LayerSpec( + ParallelTransformerLayerPipe, + neox_args=self.neox_args, + attention_mask_func=gpt2_attention_mask_func, + init_method=self.init_method, + output_layer_init_method=self.output_layer_init_method, + layer_number=i, + rpe=rpe_emb if self.neox_args.pos_emb == "rpe" else None, + rotary=self.neox_args.pos_emb == "rotary", + use_cache=self.use_cache, + ) + ) + + # used to drop attention mask + reshape hidden states + self.specs.append(_post_transformer_block) + + # NormPipe is a (deprecated) helper class that used to be used to pass presents along the pipeline - since presents are now cached to the `TransformerLayer` class this is no longer needed + norm, eps = get_norm(self.neox_args) + self.specs.append( + LayerSpec(NormPipe, norm, self.neox_args.hidden_size, eps=eps) + ) + + # outputs are now a single tensor: hidden_states + + def _logits_helper(embedding, lm_output): + """Just a wrapper to massage inputs/outputs from pipeline.""" + if self.neox_args.use_mup: + # Since we're using pipeline parallelism, we can't directly use MuReadout. Instead, use this workaround that does the same thing as MuReadout. + # https://github.com/microsoft/mup/issues/6#issuecomment-1082156274 + lm_output = ( + lm_output + / self.tied_modules.embed.word_embeddings.weight.infshape.width_mult() + ) + + logits = parallel_lm_logits( + lm_output, + embedding.word_embeddings_weight, + self.parallel_output, + seq_parallel=self.neox_args.sequence_parallel, + ) + return logits + + if weight_tying: + self.specs.append( + TiedLayerSpec( + "embed", + EmbeddingPipe, + self.neox_args, + self.hidden_size, + self.neox_args.padded_vocab_size, + self.neox_args.max_position_embeddings, + self.neox_args.hidden_dropout, + self.init_method, + self.num_tokentypes, + forward_fn=_logits_helper, + tied_weight_attr="word_embeddings_weight", + ) + ) + else: + self.specs.append( + LayerSpec( + ParallelLinearPipe, + neox_args=self.neox_args, + init_method=self.init_method, + parallel_output=self.parallel_output, + is_last_layer=True, + ) + ) + + def _set_parallel_output(self, value): + # sets the parallel output value of the final layer to value + final_layer = list(self.forward_funcs)[-1] + if isinstance(final_layer, (ParallelLinearPipe, ParallelLinear)): + final_layer.final_linear.set_parallel_output(value) + + def inference_mode(self, use_cache=True): + """ + Sets up the model for inference by turning on k/v caching (if specified) and setting `parallel output` of the final layer to false, + so logits are gathered across model parallel ranks. + + :param cache: (bool) True if you want to use caching during inference, False otherwise + """ + # first set caching to true if specified + recursive_setattr(self.forward_funcs, "use_cache", use_cache, assert_type=bool) + # then set parallel output of the final layer to false so we don't have to gather the output manually + self._set_parallel_output(False) + recursive_setattr(self.forward_funcs, "training", False) + + def train_mode(self): + """ + Sets up the model for training by turning off k/v caching and setting `parallel output` of the final layer to True, + so logits are not gathered across model parallel ranks, and loss is computed in parallel (more efficient). + """ + # set caching to false + recursive_setattr(self.forward_funcs, "use_cache", False) + # then set parallel output to true (more efficient training) + self._set_parallel_output(True) + recursive_setattr(self.forward_funcs, "training", True) + + def clear_cache(self): + """ + Recursively clears the kv cache on all layers + """ + recursive_setattr(self.forward_funcs, "layer_past", None) + + def to_sequential(self): + """ + Transforms the PipelineModule to a plain nn.Sequential module + :return: + """ + layers = [] + tied_layers = defaultdict(list) + for n, spec in enumerate(self.specs): + if isinstance(spec, TiedLayerSpec): + if spec.key in tied_layers: + # receiver + layers.append( + Lambda(lambda x: spec.forward_fn(tied_layers[spec.key][0], x)) + ) + else: + # owner + module = spec.build(log=False) + layers.append(module) + tied_layers[spec.key].append(module) + elif isinstance(spec, LayerSpec): + layers.append(spec.build(log=False)) + elif hasattr(spec, "__call__"): + # check that it's a callable function + layers.append(Lambda(spec)) + else: + raise ValueError(f"Layer number {n} ({spec}) Not recognized") + model = SequentialWrapper( + layers, + self.activation_checkpoint_interval, + self.activation_checkpoint_func, + parent_class_name=self.__class__.__name__, + ) + return model diff --git a/megatron/model/init_functions.py b/megatron/model/init_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..8a0b8e25190c0579ada7c00dd574509dcbae34db --- /dev/null +++ b/megatron/model/init_functions.py @@ -0,0 +1,227 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +import torch + +try: + import mup +except ImportError: + pass + + +def init_method_normal(sigma, use_mup_outer=False, mup_init_scale=1.0): + """Init method based on N(0, sigma).""" + + def init_(tensor, use_mup=use_mup_outer): + if use_mup: + mup.init.normal_(tensor, mean=0.0, std=sigma) + with torch.no_grad(): + tensor.mul_(mup_init_scale) + return tensor + else: + return torch.nn.init.normal_(tensor, mean=0.0, std=sigma) + + return init_ + + +def scaled_init_method_normal( + sigma, + num_layers, + use_mup_outer=False, + mup_init_scale=1.0, + num_residuals_per_layer=2, +): + """Init method based on N(0, sigma/sqrt(2*num_layers). + + Also allows for N(0, sigma/sqrt(x*num_layers)) where + x=number of residuals per layer (e.g. 1 for Mamba.) + """ + std = sigma / math.sqrt(num_residuals_per_layer * num_layers) + + def init_(tensor, use_mup=use_mup_outer): + if use_mup: + mup.init.normal_(tensor, mean=0.0, std=std) + with torch.no_grad(): + tensor.mul_(mup_init_scale) + return tensor + else: + return torch.nn.init.normal_(tensor, mean=0.0, std=std) + + return init_ + + +# orthogonal init does not support fp16, so have to patch it +def _orthogonal(tensor, gain=1): + + if tensor.ndimension() < 2: + raise ValueError("Only tensors with 2 or more dimensions are supported") + + rows = tensor.size(0) + cols = tensor.numel() // rows + flattened = tensor.new(rows, cols).normal_(0, 1) + + if rows < cols: + flattened.t_() + + # Compute the qr factorization + dt = flattened.dtype + flattened = flattened.to(torch.float32) # orthogonal init does not support fp16 + q, r = torch.qr(flattened) + q, r = q.to(dtype=dt), r.to(dtype=dt) + # Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf + d = torch.diag(r, 0) + ph = d.sign() + q *= ph + + if rows < cols: + q.t_() + + with torch.no_grad(): + tensor.view_as(q).copy_(q) + tensor.mul_(gain) + return tensor + + +def orthogonal_init_method(n_layers=1, use_mup=False, mup_init_scale=1.0): + """Fills the input Tensor with a (semi) orthogonal matrix, as described in + Exact solutions to the nonlinear dynamics of learning in deep linear neural networks - Saxe, A. et al. (2013) + Optionally scaling by number of layers possible, as introduced in OBST - Nestler et. al. (2021, to be released)""" + + if use_mup: + raise ValueError( + "Orthogonal init needs to be patched to support mup. Disable mup or use a different init method to avoid this error" + ) + + def init_(tensor): + return _orthogonal(tensor, math.sqrt(2 / n_layers)) + + return init_ + + +def xavier_uniform_init_method(use_mup_outer=False, mup_init_scale=1.0): + """Fills the input Tensor with values according to the method described in Understanding the difficulty of + training deep feedforward neural networks - Glorot, X. & Bengio, Y. (2010), using a uniform distribution.""" + + def init_(tensor, use_mup=use_mup_outer): + if use_mup: + mup.init.xavier_uniform_(tensor) + with torch.no_grad(): + tensor.mul_(mup_init_scale) + return tensor + else: + return torch.nn.init.xavier_uniform_(tensor) + + return init_ + + +def xavier_normal_init_method(use_mup_outer=False, mup_init_scale=1.0): + """Fills the input Tensor with values according to the method described in Understanding the difficulty of + training deep feedforward neural networks - Glorot, X. & Bengio, Y. (2010), using a normal distribution.""" + + def init_(tensor, use_mup=use_mup_outer): + if use_mup: + mup.init.xavier_normal_(tensor) + with torch.no_grad(): + tensor.mul_(mup_init_scale) + return tensor + else: + return torch.nn.init.xavier_normal_(tensor) + + return init_ + + +def small_init_init_method(dim, use_mup_outer=False, mup_init_scale=1.0): + """Fills the input Tensor with values according to the method described in Transformers without Tears: Improving + the Normalization of Self-Attention - Nguyen, T. & Salazar, J. (2019), using a normal distribution.""" + std = math.sqrt(2 / (5 * dim)) + + def init_(tensor, use_mup=use_mup_outer): + if use_mup: + mup.init.normal_(tensor, mean=0.0, std=std) + with torch.no_grad(): + tensor.mul_(mup_init_scale) + return tensor + else: + return torch.nn.init.normal_(tensor, mean=0.0, std=std) + + return init_ + + +def wang_init_method(n_layers, dim, use_mup_outer=False, mup_init_scale=1.0): + std = 2 / n_layers / math.sqrt(dim) + + def init_(tensor, use_mup=use_mup_outer): + if use_mup: + mup.init.normal_(tensor, mean=0.0, std=std) + with torch.no_grad(): + tensor.mul_(mup_init_scale) + return tensor + else: + return torch.nn.init.normal_(tensor, mean=0.0, std=std) + + return init_ + + +def get_init_methods(args): + + if args.use_mup: + try: + import mup + except ModuleNotFoundError: + print("Please install mup https://github.com/microsoft/mup") + raise Exception + + def _get(name): + if name == "normal": + return init_method_normal( + args.init_method_std, args.use_mup, args.mup_init_scale + ) + elif name == "scaled_normal": + return scaled_init_method_normal( + args.init_method_std, args.num_layers, args.use_mup, args.mup_init_scale + ) + elif name == "orthogonal": + return orthogonal_init_method(args.use_mup, args.mup_init_scale) + elif name == "scaled_orthogonal": + return orthogonal_init_method( + args.num_layers, args.use_mup, args.mup_init_scale + ) + elif name == "xavier_uniform": + return xavier_uniform_init_method(args.use_mup, args.mup_init_scale) + elif name == "xavier_normal": + return xavier_normal_init_method(args.use_mup, args.mup_init_scale) + elif name == "wang_init": + return wang_init_method( + args.num_layers, args.hidden_size, args.use_mup, args.mup_init_scale + ) + elif name == "small_init": + return small_init_init_method( + args.hidden_size, args.use_mup, args.mup_init_scale + ) + elif name == "single_residual_scaled_normal": + # mamba init uses scaled_normal but no need for 2 * num_layers + # since only one residual per layer + return scaled_init_method_normal( + args.init_method_std, + args.num_layers, + args.use_mup, + args.mup_init_scale, + num_residuals_per_layer=1, + ) + else: + raise NotImplementedError(f"Unknown init method {name}") + + return _get(args.init_method), _get(args.output_layer_init_method) diff --git a/megatron/model/mamba/__init__.py b/megatron/model/mamba/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a024707ad51b5b7c75128ae4ea9651aac8989b7d --- /dev/null +++ b/megatron/model/mamba/__init__.py @@ -0,0 +1,4 @@ +from .mamba import ( + ParallelMambaResidualLayer, + ParallelMambaResidualLayerPipe, +) diff --git a/megatron/model/mamba/mamba.py b/megatron/model/mamba/mamba.py new file mode 100644 index 0000000000000000000000000000000000000000..950e36fedfdb4141eb79a723f4cd2547333eacf7 --- /dev/null +++ b/megatron/model/mamba/mamba.py @@ -0,0 +1,413 @@ +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +try: + from mamba_ssm.ops.selective_scan_interface import ( + selective_scan_ref, + selective_scan_fn, + mamba_inner_fn, + ) + from causal_conv1d import causal_conv1d_fn + import einops +except ModuleNotFoundError: + print( + "Unable to import Mamba kernels. Install them from our requirements/requirements-mamba.txt, \ + or directly from https://github.com/state-spaces/mamba" + ) + pass + +from megatron.model.norms import get_norm +from megatron import mpu + +# Mamba sublayer, with tensor parallelism +class ParallelMambaBlock(nn.Module): + def __init__( + self, + neox_args, + init_method, + output_layer_init_method, + ): + super().__init__() + + self.neox_args = neox_args + + dtype = { + "fp16": torch.float16, + "bf16": torch.bfloat16, + "fp32": torch.float32, + }[neox_args.precision] + self.precision = dtype + factory_kwargs = {"device": torch.cuda.current_device(), "dtype": dtype} + + assert not ( + neox_args.mamba_use_bias_in_linears and neox_args.mamba_inner_func_fusion + ), "Mamba fused inner fn and bias in x_proj not compatible!" + + assert ( + neox_args.intermediate_size == None or neox_args.expansion_factor == None + ), "Must pass either the absolute intermediate size or the relative expansion factor for the mamba projections" + + # set variables, mostly following mamba defaults + self.d_model = neox_args.hidden_size + self.d_state = 16 # state dimensions per channel + self.d_conv = 4 # convolution width + if neox_args.intermediate_size: + self.d_inner = neox_args.intermediate_size + else: + self.expand = ( + neox_args.expansion_factor if neox_args.expansion_factor else 2 + ) + self.d_inner = int(self.expand * self.d_model) + self.dt_rank = math.ceil(self.d_model / 16) # rank of dt / Delta parameter + self.dt_scale = 1.0 + + self.dt_init = "random" + self.dt_min, self.dt_max, self.dt_init_floor = 0.001, 0.1, 1e-4 + assert self.dt_init in ["constant", "random"] + + # TP-specific setup + world_size = mpu.get_model_parallel_world_size() + self.d_inner_per_rank = mpu.divide(self.d_inner, world_size) + + if neox_args.mamba_inner_func_fusion and world_size > 1: + # as with gpt-j residual, we must manually reduce output from final proj + # across TP ranks, since it is not done by fused mamba_inner_fn . + self.reduce = mpu.mappings.reduce_from_model_parallel_region + + # up-projection. + self.in_proj = mpu.ColumnParallelLinear( + neox_args=neox_args, + input_size=self.d_model, + output_size=self.d_inner * 2, + gather_output=False, + init_method=init_method, + skip_bias_add=not neox_args.mamba_use_bias_in_linears, + bias=neox_args.mamba_use_bias_in_linears, + ) + + # convolution (parallelized across d_inner) + self.conv1d = nn.Conv1d( + in_channels=self.d_inner_per_rank, + out_channels=self.d_inner_per_rank, + bias=neox_args.mamba_use_bias_in_conv, + kernel_size=self.d_conv, + groups=self.d_inner_per_rank, + padding=self.d_conv - 1, + **factory_kwargs, + ) + # Conv bias sometimes in 32-bit erroneously, when holding other parameters in fp32. + # Uncertain why + self.conv1d.to(self.precision) + + self.act_fn = F.silu # we do not allow for other activation fns + + # x_proj corresponds to s_B(x), s_C(x), s_Delta(x) + # in https://arxiv.org/pdf/2312.00752.pdf Algorithm 2 + # (computes data-dependent B, C, Delta/dt) + self.x_proj = mpu.RowParallelLinear( + neox_args=neox_args, + input_size=self.d_inner, + output_size=self.dt_rank + self.d_state * 2, + input_is_parallel=True, + init_method=init_method, + skip_bias_add=not neox_args.mamba_use_bias_in_linears, + parallel_output=True, + bias=neox_args.mamba_use_bias_in_linears, + ) + + # up-project dt / Delta from dt_rank to d_inner + # dt_proj 's bias is a special case and should be kept always turned on -- Alg. 2 in the Mamba paper (https://arxiv.org/abs/2312.00752) + # defines Delta as Delta = Tau_{Delta}(Parameter + s_{Delta}(x)) where s_{Delta}(x) = Broadcast_{D}(Linear_{1}(x)) + # or as they further explain in section 3.6 can be also s_{Delta}(x) = Linear_{D}(Linear_{R}(x)) where Linear_R + # is the delta portion of x_proj and Linear_D is the dt_proj weight. Then, the Parameter term from Alg. 2 can + # be viewed as the bias term in dt_proj, with a special initialization from https://arxiv.org/abs/2206.12037 + self.dt_proj = nn.Linear( + self.dt_rank, self.d_inner_per_rank, bias=True, **factory_kwargs + ) + + # special init for dt_proj + dt_init_std = (self.dt_rank**-0.5) * self.dt_scale + if self.dt_init == "constant": + nn.init.constant_(self.dt_proj.weight, dt_init_std) + elif self.dt_init == "random": + nn.init.uniform_(self.dt_proj.weight, -dt_init_std, dt_init_std) + else: + raise NotImplementedError + + # more dt_proj init stuff. copied from https://github.com/state-spaces/mamba/blob/009bec5ee37f586844a3fc89c040a9c1a9d8badf/mamba_ssm/modules/mamba_simple.py#L91-L101 + dt = torch.exp( + torch.rand(self.d_inner_per_rank, **factory_kwargs) + * (math.log(self.dt_max) - math.log(self.dt_min)) + + math.log(self.dt_min) + ).clamp(min=self.dt_init_floor) + # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759 + inv_dt = dt + torch.log(-torch.expm1(-dt)) + with torch.no_grad(): + self.dt_proj.bias.copy_(inv_dt) + + # initialize A . uses S4D real initialization + A = einops.repeat( + torch.arange( + 1, + self.d_state + 1, + dtype=torch.float32, + device=torch.cuda.current_device(), + ), + "n -> d n", + d=self.d_inner_per_rank, + ).contiguous() + A_log = torch.log(A).to( + torch.float32 + ) # Keep in fp32, following https://github.com/state-spaces/mamba#precision and code comments + self.A_log = nn.Parameter(A_log) + self.A_log._no_weight_decay = ( + True # setting this attribute turns off weight decay for this param + ) + # setting this attribute prevents deeperspeed from casting this param to fp32 + # requires DeepersSpeed commit https://github.com/EleutherAI/DeeperSpeed/commit/6d097beccc4e3b0ac806c7d975f8c10d4689de26 or later + if self.neox_args.mamba_selective_fp32_params: + self.A_log._deepspeed_no_cast = True + + # D parameter + self.D = nn.Parameter( + torch.ones( + self.d_inner_per_rank, + device=torch.cuda.current_device(), + dtype=torch.float32, + ) + ).to( + torch.float32 + ) # Keep in fp32, following https://github.com/state-spaces/mamba#precision and code comments + self.D._no_weight_decay = ( + True # setting this attribute turns off weight decay for this param + ) + # setting this attribute prevents deeperspeed from casting this param to fp32 + # requires DeeperSpeed commit https://github.com/EleutherAI/DeeperSpeed/commit/6d097beccc4e3b0ac806c7d975f8c10d4689de26 or later + if self.neox_args.mamba_selective_fp32_params: + self.D._deepspeed_no_cast = True + + # out down-projection. + # use "single_residual_scaled_normal" + # for output_layer_init_method + # to perform gpt-2 style scaled init as done in Mamba paper. + self.out_proj = mpu.RowParallelLinear( + neox_args=neox_args, + input_size=self.d_inner, + output_size=self.d_model, + input_is_parallel=True, + init_method=output_layer_init_method, + skip_bias_add=not neox_args.mamba_use_bias_in_linears, + bias=neox_args.mamba_use_bias_in_linears, + parallel_output=False, + ) + + def selective_scan( + self, + x, + dt, + A, + B, + C, + D, + z=None, + delta_bias=None, + delta_softplus=True, + ): + + if not self.neox_args.mamba_selective_scan_fusion: + y = selective_scan_ref( + u=x, + delta=dt, + A=A, + B=B, + C=C, + D=D, + z=z, + delta_bias=delta_bias, + delta_softplus=delta_softplus, + return_last_state=False, + ) + else: + y = selective_scan_fn( + x, + dt, + A, + B, + C, + D=D, + z=z, + delta_bias=delta_bias, + delta_softplus=delta_softplus, + return_last_state=False, + ) + + return y + + def forward(self, hidden_states): + """ """ + # TODO: support inference natively in neox. + # For now, we only handle training (parallel scan). + assert self.training, "Mamba in NeoX does not support inference!" + + # hidden_states: [sq, b, h] + seqlen, batch, dim = hidden_states.shape + + # first up: perform in_proj + xz, _ = self.in_proj(hidden_states) + xz = einops.rearrange(xz, "l b d -> b d l") + + A = -torch.exp(self.A_log.float()) # (d_inner, d_state) + + if self.neox_args.mamba_inner_func_fusion: + # ================= + # Fused mamba inner + # ================= + + # mamba provides a mamba_inner fn that computes the entire (post-in_proj) Mamba block. + # we want to use it if we can, as it saves memory and provides speedups. + # equivalent to use_fast_path=True in state-spaces/mamba. + out = mamba_inner_fn( + xz, + self.conv1d.weight, + # for some bizarre reason this becomes fp32 sometime after init, when A and D held in fp32. + # cast it manually if the bias exists + self.conv1d.bias.to(self.precision) + if self.conv1d.bias is not None + else self.conv1d.bias, + self.x_proj.weight, + self.dt_proj.weight, + self.out_proj.weight, + self.out_proj.bias, + A, + None, # B is input-dependent, will compute from x_proj + None, # C is input-dependent, will compute from x_proj + self.D.float(), + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + ) + if getattr(self, "reduce", None): + # manually reduce after mamba_inner_fn + # to collect outputs from different TP ranks. + # handled by running self.out_proj(y) below + # so only needed here. + out = self.reduce(out) + + out = einops.rearrange(out, "b l h -> l b h") + + return out + + x, z = xz.chunk(2, dim=1) + + # =========== + # Convolution + # =========== + + if not self.neox_args.mamba_causal_conv_fusion: + self.conv1d.to(self.precision) # required if keeping fp32 A_log, D + x = self.act_fn(self.conv1d(x)[..., :seqlen]) + else: + # Note: this requires silu as activation. + x = causal_conv1d_fn( + x=x, + weight=einops.rearrange(self.conv1d.weight, "d 1 w -> d w"), + bias=self.conv1d.bias.to(self.precision) + if self.conv1d.bias is not None + else self.conv1d.bias, + activation="silu", + ) + + # ============== + # SSM (S6) layer + # ============== + + # project: perform s_B, s_C, s_Delta projections + x_dbl, _ = self.x_proj(einops.rearrange(x, "b d l -> (b l) d")) + # split into component dt, B, C + dt, B, C = torch.split( + x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1 + ) + + # up-project Delta / dt + dt = self.dt_proj.weight @ dt.t() + dt = einops.rearrange(dt, "d (b l) -> b d l", l=seqlen) + + # rearrange B, C + B = einops.rearrange(B, "(b l) d_state -> b d_state l", l=seqlen).contiguous() + C = einops.rearrange(C, "(b l) d_state -> b d_state l", l=seqlen).contiguous() + + # perform selective scan. + y = self.selective_scan( + x, + dt, + A, + B, + C, + self.D.float(), + z=z, + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + ) + + # =============== + # Down-Projection + # =============== + y = einops.rearrange(y, "b d l -> b l d") + + out, _ = self.out_proj(y) + + out = einops.rearrange(out, "b l h -> l b h") + + return out + + +class ParallelMambaResidualLayer(nn.Module): + """ + Pre-norm Mamba Block with residual connection. No parallelism yet supported. + """ + + def __init__( + self, + neox_args, + init_method, + output_layer_init_method, + layer_number, + ): + super().__init__() + # TODO: allow for residual in fp32 if it helps? + self.layer_number = layer_number + + # TODO: Add support for triton RMSNorm fused kernel at https://github.com/state-spaces/mamba/blob/v1.2.0/mamba_ssm/ops/triton/layernorm.py + norm, eps = get_norm(neox_args) + + self.norm = norm(neox_args.hidden_size, eps=eps) + + self.mixer = ParallelMambaBlock( + neox_args=neox_args, + init_method=init_method, + output_layer_init_method=output_layer_init_method, + ) + + def forward(self, x, attention_mask=None, layer_past=None): + + # pseudocode: + # x = x + mixer(norm(x)) + residual = x + + hidden_states = self.mixer(self.norm(x)) + + return hidden_states + residual + + +class ParallelMambaResidualLayerPipe(ParallelMambaResidualLayer): + """Extends MambaResidualLayer to forward attention_mask through the pipeline. DeepSpeed requires this.""" + + def forward(self, args): + assert ( + len(args) == 2 + ), "MambaResidualLayerPipe expects 2 arguments - hidden_states and attention_mask" + hidden_states, attention_mask = args + # we are returning just [hidden_states, mask] + return super().forward(hidden_states, attention_mask), attention_mask diff --git a/megatron/model/megablocks_utils.py b/megatron/model/megablocks_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0b9c5ecb7fc364ac1112f9ceb678592c34c4f6c7 --- /dev/null +++ b/megatron/model/megablocks_utils.py @@ -0,0 +1,32 @@ +"""Adapter to expose MegaBlocks package, if available.""" + +try: + import megablocks +except ImportError: + megablocks = None + + +def megablocks_is_available(): + return megablocks is not None + + +def assert_megablocks_is_available(): + assert ( + megablocks_is_available() + ), "MegaBlocks not available. Please run `pip install megablocks`." + + +moe = megablocks.layers.moe if megablocks_is_available() else None +dmoe = megablocks.layers.dmoe if megablocks_is_available() else None +arguments = megablocks.layers.arguments if megablocks_is_available() else None + + +def as_megablocks_args(neox_args): + import copy + + tmp = copy.copy(neox_args) + args = arguments.from_megatron(tmp) + args.moe_lbl_in_fp32 = True + args.fp16 = neox_args.precision == "fp16" + args.moe_loss_weight = neox_args.moe_loss_coeff + return args diff --git a/megatron/model/norms.py b/megatron/model/norms.py new file mode 100644 index 0000000000000000000000000000000000000000..ba175d3eb2960baaae764e1a6879be026547c463 --- /dev/null +++ b/megatron/model/norms.py @@ -0,0 +1,107 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +from torch.nn import LayerNorm as LayerNorm + + +def get_norm(neox_args): + if neox_args.norm == "rmsnorm": + eps = neox_args.rms_norm_epsilon + if neox_args.rmsnorm_fusion: + from .fused_layer_norm import MixedFusedRMSNorm + + norm = MixedFusedRMSNorm + else: + norm = RMSNorm + elif neox_args.norm == "layernorm": + eps = neox_args.layernorm_epsilon + if neox_args.layernorm_fusion: + from .fused_layer_norm import MixedFusedLayerNorm + + norm = MixedFusedLayerNorm + else: + norm = LayerNorm + elif neox_args.norm == "scalenorm": + eps = neox_args.scalenorm_epsilon + norm = ScaleNorm + elif neox_args.norm == "te_rmsnorm": + from .transformer_engine import TERMSNorm + + norm = TERMSNorm + eps = neox_args.rms_norm_epsilon + elif neox_args.norm == "te_layernorm": + from .transformer_engine import TELayerNorm + + norm = TELayerNorm + eps = neox_args.layernorm_epsilon + else: + raise ValueError(f"norm {neox_args.norm} not recognized") + return norm, eps + + +class RMSNorm(torch.nn.Module): + def __init__(self, dim, p=-1.0, eps=1e-8, bias=False): + """ + Root Mean Square Layer Normalization + :param dim: model size + :param p: partial RMSNorm, valid value [0, 1], default -1.0 (disabled) + :param eps: epsilon value, default 1e-8 + :param bias: whether use bias term for RMSNorm, disabled by + default because RMSNorm doesn't enforce re-centering invariance. + """ + super(RMSNorm, self).__init__() + + self.eps = eps + self.d = dim + self.p = p + self.bias = bias + + self.scale = torch.nn.Parameter(torch.ones(dim)) + self.register_parameter("scale", self.scale) + + if self.bias: + self.offset = torch.nn.Parameter(torch.zeros(dim)) + self.register_parameter("offset", self.offset) + + def forward(self, x): + dtype = x.dtype + if self.p < 0.0 or self.p > 1.0: + norm_x = x.norm(2, dim=-1, keepdim=True) + d_x = self.d + else: + partial_size = int(self.d * self.p) + partial_x, _ = torch.split(x, [partial_size, self.d - partial_size], dim=-1) + + norm_x = partial_x.norm(2, dim=-1, keepdim=True) + d_x = partial_size + + rms_x = norm_x * d_x ** (-1.0 / 2) + x_normed = x / (rms_x + self.eps) + + if self.bias: + return self.scale * x_normed + self.offset + + return (self.scale * x_normed).to(dtype) + + +class ScaleNorm(torch.nn.Module): + def __init__(self, dim, eps=1e-5): + super().__init__() + self.g = torch.nn.Parameter(torch.ones(1)) + self.eps = eps + + def forward(self, x): + n = torch.norm(x, dim=-1, keepdim=True).clamp(min=self.eps) + return x / n * self.g diff --git a/megatron/model/positional_embeddings.py b/megatron/model/positional_embeddings.py new file mode 100644 index 0000000000000000000000000000000000000000..fcded9e96c48436f0e0014ed7781c32835d86d9f --- /dev/null +++ b/megatron/model/positional_embeddings.py @@ -0,0 +1,252 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import math + + +class SinusoidalPositionalEmbedding(torch.nn.Module): + def __init__(self, dim, base=10000, precision=torch.half): + super().__init__() + inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim)) + self.register_buffer("inv_freq", inv_freq) + self.precision = precision + + def forward(self, x, seq_dim=1): + t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + sinusoid_inp = torch.einsum("i,j->ij", t, self.inv_freq) + if self.precision == torch.bfloat16: + sinusoid_inp = sinusoid_inp.float() + sin, cos = sinusoid_inp.sin(), sinusoid_inp.cos() + if self.precision == torch.bfloat16: + sin, cos = sin.bfloat16(), cos.bfloat16() + emb = torch.cat((sin, cos), dim=-1) + return emb[None, :, :] + + +class RotaryEmbedding(torch.nn.Module): + def __init__( + self, dim, max_seq_len, base=10000, precision=torch.half, save_inv_freqs=False + ): + super().__init__() + inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim)) + self.register_buffer("inv_freq", inv_freq, persistent=save_inv_freqs) + self.seq_len_cached = None + self.cos_cached = None + self.sin_cached = None + self.precision = precision + self.max_seq_len = max_seq_len + self.base = base + self.dim = dim + + # precompute cos_cached, sin_cached in fp32 + cos_cached, sin_cached, inv_freq = self._prepare_cache( + max_seq_len, precision, base + ) + + self.register_buffer("inv_freq", inv_freq, persistent=save_inv_freqs) + self.cos_cached = cos_cached + self.sin_cached = sin_cached + + def _prepare_cache(self, seq_len, precision, base): + # precompute cos_cached, sin_cached in fp32 + inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float() / self.dim)) + + t = torch.arange(seq_len).type_as(inv_freq) + freqs = torch.einsum("i,j->ij", t, inv_freq) + emb = torch.cat((freqs, freqs), dim=-1) + + cos_cached = emb.cos()[:, None, None, :] + sin_cached = emb.sin()[:, None, None, :] + + return ( + cos_cached.to(precision), + sin_cached.to(precision), + inv_freq.to(precision), + ) + + def forward(self, x, seq_dim=0, seq_len=None): + if seq_len is None: + seq_len = x.shape[seq_dim] + + assert seq_len <= self.max_seq_len + + if seq_len != self.max_seq_len: + # y, z, _ = self._prepare_cache(seq_len, self.precision, self.base) + return ( + self.cos_cached[:seq_len, ...].to(x.device), + self.sin_cached[:seq_len, ...].to(x.device), + ) + else: + return self.cos_cached.to(x.device), self.sin_cached.to(x.device) + + +# rotary pos emb helpers: + + +def rotate_half(x): + x1, x2 = x[..., : x.shape[-1] // 2], x[..., x.shape[-1] // 2 :] + return torch.cat( + (-x2, x1), dim=x1.ndim - 1 + ) # dim=-1 triggers a bug in earlier torch versions + + +@torch.jit.script +def apply_rotary_pos_emb(q, k, cos, sin, offset: int = 0): + cos, sin = ( + cos[offset : q.shape[0] + offset, ...], + sin[offset : q.shape[0] + offset, ...], + ) + return (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin) + + +def apply_rotary_pos_emb_torch( + q, k, cos, sin, offset: int = 0 +): # jitting fails with bf16 + cos, sin = ( + cos[offset : q.shape[0] + offset, ...], + sin[offset : q.shape[0] + offset, ...], + ) + return (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin) + + +class AliBi(torch.nn.Module): + def __init__(self, num_heads, mp_size=1, mp_rank=1): + super().__init__() + # megatron splits across heads, so we need to make sure each + # head receives the correct matrix + assert mp_size <= num_heads and mp_rank <= mp_size + self.mp_size = mp_size + self.mp_rank = mp_rank + self.num_heads = num_heads + self.slice_size = num_heads // mp_size + self.cached_matrix = None + self.cached_seq_len = None + slopes = torch.Tensor(self._get_slopes(num_heads))[ + mp_rank * self.slice_size : (mp_rank + 1) * self.slice_size + ] + self.register_buffer("slopes", slopes) + + def _get_slopes(self, n): + """ + Get slopes for Alibi positional embedding + n : int = number of heads. + For best performance, restrict n to a power of 2. + """ + + def get_slopes_power_of_2(n): + start = 2 ** (-(2 ** -(math.log2(n) - 3))) + ratio = start + return [start * ratio**i for i in range(n)] + + if math.log2(n).is_integer(): + return get_slopes_power_of_2(n) + else: + closest_power_of_2 = 2 ** math.floor(math.log2(n)) + return ( + get_slopes_power_of_2(closest_power_of_2) + + self._get_slopes(2 * closest_power_of_2)[0::2][ + : n - closest_power_of_2 + ] + ) + + def bias(self, seq_len_q, seq_len_k, device, dtype): + # [b, np, sq, sk] + # seq_len_q = x.shape[-2] + # seq_len_k = x.shape[-1] + + # Initialize the AliBi matrix to match the first provided key length; grow it exponentially + # afterwards if longer inputs are provided. This is important for inference, where we will + # encounter progressively longer samples; it should have no effect at training time. + if self.cached_seq_len is not None and self.cached_seq_len >= seq_len_k: + a = self.cached_matrix + else: + target_seq_len = ( + seq_len_k if self.cached_seq_len is None else self.cached_seq_len * 4 + ) + a = -torch.tril( + torch.arange(target_seq_len) + .view(target_seq_len, 1) + .repeat(1, target_seq_len) + + torch.arange(0, -target_seq_len, -1) + ) + a = a.to(device).to(dtype) + slopes = self.slopes.to(a.device).to(a.dtype) + a = a * slopes.view(self.slopes.shape[0], 1, 1) + self.cached_seq_len = target_seq_len + self.cached_matrix = a + + # If the AliBi matrix is larger than the key length, clip it. + if self.cached_seq_len > seq_len_k: + a = self.cached_matrix[:, :seq_len_k, :seq_len_k] + + if seq_len_q != seq_len_k: + # In the train case x has dimensionality [b, np, sq, sk] with sq == sk + # The number of query tokens is equal to the number of key tokens + # At inference time with cache in layer_past sq is not equal to sk. sq only contains one token (the last one in the full sequence) + # In this case we use the appropriate token index of the cache matrix. + # As the cache matrix could already be bigger from a past inference, not the last token index in the sq sequence is used + assert ( + seq_len_q == 1 + ), "assumption sq == sk unless at inference time with cache in layer_past with sq == 1" + a = a[:, seq_len_k - 1, :].view( + a.shape[0], 1, a.shape[2] + ) # seq_len_k - 1 points to the last token index in the current inference batch. + + return a + + def forward(self, x): + # [b, np, sq, sk] + seq_len_q = x.shape[-2] + seq_len_k = x.shape[-1] + + # Initialize the AliBi matrix to match the first provided key length; grow it exponentially + # afterwards if longer inputs are provided. This is important for inference, where we will + # encounter progressively longer samples; it should have no effect at training time. + if self.cached_seq_len is not None and self.cached_seq_len >= seq_len_k: + a = self.cached_matrix + else: + target_seq_len = ( + seq_len_k if self.cached_seq_len is None else self.cached_seq_len * 4 + ) + a = -torch.tril( + torch.arange(target_seq_len) + .view(target_seq_len, 1) + .repeat(1, target_seq_len) + + torch.arange(0, -target_seq_len, -1) + ) + a = a.to(x.device).to(x.dtype) + slopes = self.slopes.to(a.device).to(a.dtype) + a = a * slopes.view(self.slopes.shape[0], 1, 1) + self.cached_seq_len = target_seq_len + self.cached_matrix = a + + # If the AliBi matrix is larger than the key length, clip it. + if self.cached_seq_len > seq_len_k: + a = self.cached_matrix[:, :seq_len_k, :seq_len_k] + + if seq_len_q != seq_len_k: + # In the train case x has dimensionality [b, np, sq, sk] with sq == sk + # The number of query tokens is equal to the number of key tokens + # At inference time with cache in layer_past sq is not equal to sk. sq only contains one token (the last one in the full sequence) + # In this case we use the appropriate token index of the cache matrix. + # As the cache matrix could already be bigger from a past inference, not the last token index in the sq sequence is used + assert ( + seq_len_q == 1 + ), "assumption sq == sk unless at inference time with cache in layer_past with sq == 1" + a = a[:, seq_len_k - 1, :].view( + a.shape[0], 1, a.shape[2] + ) # seq_len_k - 1 points to the last token index in the current inference batch. + + return x + a diff --git a/megatron/model/rwkv/__init__.py b/megatron/model/rwkv/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/megatron/model/rwkv/v6/__init__.py b/megatron/model/rwkv/v6/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c0d8d4ba1ac1dcdd8ad878019b8e56c15141d303 --- /dev/null +++ b/megatron/model/rwkv/v6/__init__.py @@ -0,0 +1 @@ +from .rwkv import RWKVResidualLayerPipe, RWKVResidualLayer diff --git a/megatron/model/rwkv/v6/cuda/wkv6_cuda.cu b/megatron/model/rwkv/v6/cuda/wkv6_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..2b228e90f22ffc31e40d16254dfca5acba27106f --- /dev/null +++ b/megatron/model/rwkv/v6/cuda/wkv6_cuda.cu @@ -0,0 +1,270 @@ +#include +#include +#include "ATen/ATen.h" +typedef at::BFloat16 bf16; + +template +__global__ void kernel_forward(const int B, + const int T, + const int C, + const int H, + const F* __restrict__ const _r, + const F* __restrict__ const _k, + const F* __restrict__ const _v, + const float* __restrict__ _w, + const F* __restrict__ _u, + F* __restrict__ const _y) +{ + const int b = blockIdx.x / H; + const int h = blockIdx.x % H; + const int i = threadIdx.x; + _u += h * _N_; + + __shared__ float r[_N_], k[_N_], u[_N_], w[_N_]; + float state[_N_] = {0}; + + __syncthreads(); + u[i] = float(_u[i]); + __syncthreads(); + + for (int t = b * T * C + h * _N_ + i; t < (b + 1) * T * C + h * _N_ + i; t += C) { + __syncthreads(); + w[i] = exp(_w[t]); + r[i] = float(_r[t]); + k[i] = float(_k[t]); + __syncthreads(); + + const float v = float(_v[t]); + float y = 0; + +#pragma unroll + for (int j = 0; j < _N_; j += 4) { + const float4& r_ = (float4&)(r[j]); + const float4& k_ = (float4&)(k[j]); + const float4& w_ = (float4&)(w[j]); + const float4& u_ = (float4&)(u[j]); + float4& s = (float4&)(state[j]); + float4 x; + + x.x = k_.x * v; + x.y = k_.y * v; + x.z = k_.z * v; + x.w = k_.w * v; + + y += r_.x * (u_.x * x.x + s.x); + y += r_.y * (u_.y * x.y + s.y); + y += r_.z * (u_.z * x.z + s.z); + y += r_.w * (u_.w * x.w + s.w); + + s.x = s.x * w_.x + x.x; + s.y = s.y * w_.y + x.y; + s.z = s.z * w_.z + x.z; + s.w = s.w * w_.w + x.w; + } + _y[t] = F(y); + } +} + +template +__global__ void kernel_backward_111(const int B, + const int T, + const int C, + const int H, + const F* __restrict__ const _r, + const F* __restrict__ const _k, + const F* __restrict__ const _v, + const float* __restrict__ _w, + const F* __restrict__ _u, + const F* __restrict__ const _gy, + F* __restrict__ const _gr, + F* __restrict__ const _gk, + F* __restrict__ const _gv, + F* __restrict__ const _gu) +{ + const int b = blockIdx.x / H; + const int h = blockIdx.x % H; + const int i = threadIdx.x; + _u += h * _N_; + + __shared__ float u_[_N_]; + __shared__ float r[_N_], k[_N_], v[_N_], w_[_N_], gy[_N_]; + __syncthreads(); + u_[i] = float(_u[i]); + __syncthreads(); + + const float u = u_[i]; + + float state[_N_] = {0}, scccc[_N_] = {0}, sdddd[_N_] = {0}; + + const int t_0 = b * T * C + h * _N_ + i; + const int t_T_1 = t_0 + (T - 1) * C; + const int t_T = t_0 + T * C; + + float gu = 0; + for (int t = t_0; t < t_T; t += C) { + __syncthreads(); + v[i] = float(_v[t]); + gy[i] = float(_gy[t]); + __syncthreads(); + + const float k = float(_k[t]); + const float w = exp(_w[t]); + float gr = 0, gu_ = 0; + +#pragma unroll + for (int j = 0; j < _N_; j++) { + float& s = state[j]; + float x = k * v[j]; + + gr += (u * x + s) * gy[j]; + gu_ += x * gy[j]; + s = s * w + x; + } + _gr[t] = F(gr); + gu += float(_r[t]) * gu_; + } + _gu[b * C + h * _N_ + i] = F(gu); + + for (int t = t_T_1; t >= t_0; t -= C) { + __syncthreads(); + v[i] = float(_v[t]); + gy[i] = float(_gy[t]); + __syncthreads(); + + const float rr = float(_r[t]); + const float w = exp(_w[t]); + float gk = 0; + +#pragma unroll + for (int j = 0; j < _N_; j++) { + float& s = scccc[j]; + float x = rr * gy[j]; + + gk += (u * x + s) * v[j]; + s = x + s * w; + } + _gk[t] = F(gk); + } + + for (int t = t_T_1; t >= t_0; t -= C) { + __syncthreads(); + r[i] = float(_r[t]); + k[i] = float(_k[t]); + w_[i] = exp(_w[t]); + __syncthreads(); + + const float gyy = float(_gy[t]); + float gv = 0; + +#pragma unroll + for (int j = 0; j < _N_; j++) { + float& s = sdddd[j]; + float x = gyy * r[j]; + + gv += (u_[j] * x + s) * k[j]; + s = x + s * w_[j]; + } + _gv[t] = F(gv); + } +} + +template +__global__ void kernel_backward_222(const int B, + const int T, + const int C, + const int H, + const F* __restrict__ const _r, + const F* __restrict__ const _k, + const F* __restrict__ const _v, + const float* __restrict__ _w, + const F* __restrict__ _u, + const F* __restrict__ const _gy, + F* __restrict__ const _gw) +{ + const int b = blockIdx.x / H; + const int h = blockIdx.x % H; + const int i = threadIdx.x; + + __shared__ float v[_N_], gy[_N_]; + float saaaa[_N_] = {0}, sbbbb[_T_ - 2] = {0}, scccc[_N_] = {0}; + + const int t_0 = b * T * C + h * _N_ + i; + const int t_1 = t_0 + C; + const int t_2 = t_0 + 2 * C; + const int t_T_1 = t_0 + (T - 1) * C; + + for (int t = t_T_1; t > t_1; t -= C) { + __syncthreads(); + gy[i] = float(_gy[t]); + v[i] = float(_v[t - 2 * C]); + __syncthreads(); + + const float r = float(_r[t]); + const float w = exp(_w[t - C]); + float sum = 0.0f; + +#pragma unroll + for (int j = 0; j < _N_; j++) { + float& s = saaaa[j]; + float x = r * gy[j]; + s = (s + x) * w; + sum += s * v[j]; + } + sbbbb[(t - t_2) / C] = sum * float(_k[t - 2 * C]); + } + + float sss = sbbbb[0]; + _gw[t_0] = 0; + _gw[t_1] = F(sss * _w[t_1]); + + for (int t = t_2; t < t_T_1; t += C) { + __syncthreads(); + gy[i] = float(_gy[t]); + v[i] = float(_v[t - 2 * C]); + __syncthreads(); + + const float w = exp(_w[t - C]); + const float k = float(_k[t - 2 * C]); + float sum = 0.0f; + +#pragma unroll + for (int j = 0; j < _N_; j++) { + float& s = scccc[j]; + float x = k * v[j]; + s = (s + x) * w; + sum += s * gy[j]; + } + sss += sbbbb[(t - t_1) / C] - (sum * float(_r[t])); + _gw[t] = F(sss * _w[t]); + } + _gw[t_T_1] = 0; +} + +void cuda_forward(int B, int T, int C, int H, bf16* r, bf16* k, bf16* v, float* w, bf16* u, bf16* y) +{ + assert(H * _N_ == C); + assert(_N_ % 4 == 0); + kernel_forward<<>>(B, T, C, H, r, k, v, w, u, y); +} + +void cuda_backward(int B, + int T, + int C, + int H, + bf16* r, + bf16* k, + bf16* v, + float* w, + bf16* u, + bf16* gy, + bf16* gr, + bf16* gk, + bf16* gv, + bf16* gw, + bf16* gu) +{ + assert(H * _N_ == C); + assert(_N_ % 4 == 0); + kernel_backward_111<<>>(B, T, C, H, r, k, v, w, u, gy, gr, gk, gv, gu); + kernel_backward_222<<>>(B, T, C, H, r, k, v, w, u, gy, gw); +} diff --git a/megatron/model/rwkv/v6/cuda/wkv6_op.cpp b/megatron/model/rwkv/v6/cuda/wkv6_op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..385b474871325976442c718dcaa1f8fc2bd4c00a --- /dev/null +++ b/megatron/model/rwkv/v6/cuda/wkv6_op.cpp @@ -0,0 +1,95 @@ +#include +#include "ATen/ATen.h" +typedef at::BFloat16 bf16; + +void cuda_forward(int B, + int T, + int C, + int H, + bf16* r, + bf16* k, + bf16* v, + float* w, + bf16* u, + bf16* y); +void cuda_backward(int B, + int T, + int C, + int H, + bf16* r, + bf16* k, + bf16* v, + float* w, + bf16* u, + bf16* gy, + bf16* gr, + bf16* gk, + bf16* gv, + bf16* gw, + bf16* gu); + +void forward(int64_t B, + int64_t T, + int64_t C, + int64_t H, + torch::Tensor& r, + torch::Tensor& k, + torch::Tensor& v, + torch::Tensor& w, + torch::Tensor& u, + torch::Tensor& y) +{ + cuda_forward(B, + T, + C, + H, + r.data_ptr(), + k.data_ptr(), + v.data_ptr(), + w.data_ptr(), + u.data_ptr(), + y.data_ptr()); +} +void backward(int64_t B, + int64_t T, + int64_t C, + int64_t H, + torch::Tensor& r, + torch::Tensor& k, + torch::Tensor& v, + torch::Tensor& w, + torch::Tensor& u, + torch::Tensor& gy, + torch::Tensor& gr, + torch::Tensor& gk, + torch::Tensor& gv, + torch::Tensor& gw, + torch::Tensor& gu) +{ + cuda_backward(B, + T, + C, + H, + r.data_ptr(), + k.data_ptr(), + v.data_ptr(), + w.data_ptr(), + u.data_ptr(), + gy.data_ptr(), + gr.data_ptr(), + gk.data_ptr(), + gv.data_ptr(), + gw.data_ptr(), + gu.data_ptr()); +} +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("forward", &forward, "wkv6 forward"); + m.def("backward", &backward, "wkv6 backward"); +} + +TORCH_LIBRARY(wkv6, m) +{ + m.def("forward", forward); + m.def("backward", backward); +} diff --git a/megatron/model/rwkv/v6/rwkv.py b/megatron/model/rwkv/v6/rwkv.py new file mode 100644 index 0000000000000000000000000000000000000000..b3741a3fc2a5bdc3fda34bafc13b2e6325374182 --- /dev/null +++ b/megatron/model/rwkv/v6/rwkv.py @@ -0,0 +1,366 @@ +######################################################################################################## +# The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM +######################################################################################################## + +import os, math, gc, importlib +import torch +import torch.nn as nn +from torch.nn import functional as F +from torch.utils.cpp_extension import load + + +class WKV(torch.autograd.Function): + """ + WKV block, using cuda kernel. + """ + + @staticmethod + def forward(ctx, B, T, C, H, r, k, v, w, u): + with torch.no_grad(): + assert r.dtype == torch.bfloat16 + assert k.dtype == torch.bfloat16 + assert v.dtype == torch.bfloat16 + assert w.dtype == torch.bfloat16 + assert u.dtype == torch.bfloat16 + ctx.B = B + ctx.T = T + ctx.C = C + ctx.H = H + assert r.is_contiguous() + assert k.is_contiguous() + assert v.is_contiguous() + assert w.is_contiguous() + assert u.is_contiguous() + ew = (-torch.exp(w.float())).contiguous() + ctx.save_for_backward(r, k, v, ew, u) + y = torch.empty( + (B, T, C), + device=r.device, + dtype=torch.bfloat16, + memory_format=torch.contiguous_format, + ) # .uniform_(-100, 100) + wkv_cuda.forward(B, T, C, H, r, k, v, ew, u, y) + return y + + @staticmethod + def backward(ctx, gy): + with torch.no_grad(): + assert gy.dtype == torch.bfloat16 + B = ctx.B + T = ctx.T + C = ctx.C + H = ctx.H + assert gy.is_contiguous() + r, k, v, ew, u = ctx.saved_tensors + gr = torch.empty( + (B, T, C), + device=gy.device, + requires_grad=False, + dtype=torch.bfloat16, + memory_format=torch.contiguous_format, + ) # .uniform_(-100, 100) + gk = torch.empty( + (B, T, C), + device=gy.device, + requires_grad=False, + dtype=torch.bfloat16, + memory_format=torch.contiguous_format, + ) # .uniform_(-100, 100) + gv = torch.empty( + (B, T, C), + device=gy.device, + requires_grad=False, + dtype=torch.bfloat16, + memory_format=torch.contiguous_format, + ) # .uniform_(-100, 100) + gw = torch.empty( + (B, T, C), + device=gy.device, + requires_grad=False, + dtype=torch.bfloat16, + memory_format=torch.contiguous_format, + ) # .uniform_(-100, 100) + gu = torch.empty( + (B, C), + device=gy.device, + requires_grad=False, + dtype=torch.bfloat16, + memory_format=torch.contiguous_format, + ) # .uniform_(-100, 100) + wkv_cuda.backward(B, T, C, H, r, k, v, ew, u, gy, gr, gk, gv, gw, gu) + gu = torch.sum(gu, 0).view(H, C // H) + return (None, None, None, None, gr, gk, gv, gw, gu) + + +def RUN_CUDA_RWKV(B, T, C, H, r, k, v, w, u): + return WKV.apply(B, T, C, H, r, k, v, w, u) + + +# RWKV6 time mix +class RWKV_TimeMix(nn.Module): + """ + Time Mixing Layer + The RWKV substitute for attention. + TODO: fix jit compiling. + """ + + def __init__(self, neox_args, layer_number): + super().__init__() + self.neox_args = neox_args + self.layer_number = layer_number + + with torch.no_grad(): + ratio_0_to_1 = layer_number / (neox_args.num_layers - 1) # 0 to 1 + ratio_1_to_almost0 = 1.0 - (layer_number / neox_args.num_layers) # 1 to ~0 + ddd = torch.ones(1, 1, neox_args.hidden_size) + for i in range(neox_args.hidden_size): + ddd[0, 0, i] = i / neox_args.hidden_size + + # fancy time_mix + self.time_maa_x = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) + self.time_maa_w = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) + self.time_maa_k = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) + self.time_maa_v = nn.Parameter( + 1.0 - (torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1) + ) + self.time_maa_r = nn.Parameter( + 1.0 - torch.pow(ddd, 0.5 * ratio_1_to_almost0) + ) + self.time_maa_g = nn.Parameter( + 1.0 - torch.pow(ddd, 0.5 * ratio_1_to_almost0) + ) + + TIME_MIX_EXTRA_DIM = 32 # generate TIME_MIX for w,k,v,r,g + self.time_maa_w1 = nn.Parameter( + torch.zeros(neox_args.hidden_size, TIME_MIX_EXTRA_DIM * 5).uniform_( + -1e-4, 1e-4 + ) + ) + self.time_maa_w2 = nn.Parameter( + torch.zeros(5, TIME_MIX_EXTRA_DIM, neox_args.hidden_size).uniform_( + -1e-4, 1e-4 + ) + ) + + # fancy time_decay + decay_speed = torch.ones(neox_args.dim_att) + for n in range(neox_args.dim_att): + decay_speed[n] = -6 + 5 * (n / (neox_args.dim_att - 1)) ** ( + 0.7 + 1.3 * ratio_0_to_1 + ) + self.time_decay = nn.Parameter(decay_speed.reshape(1, 1, neox_args.dim_att)) + + TIME_DECAY_EXTRA_DIM = 64 + self.time_decay_w1 = nn.Parameter( + torch.zeros(neox_args.hidden_size, TIME_DECAY_EXTRA_DIM).uniform_( + -1e-4, 1e-4 + ) + ) + self.time_decay_w2 = nn.Parameter( + torch.zeros(TIME_DECAY_EXTRA_DIM, neox_args.dim_att).uniform_( + -1e-4, 1e-4 + ) + ) + + tmp = torch.zeros(neox_args.dim_att) + for n in range(neox_args.dim_att): + zigzag = ((n + 1) % 3 - 1) * 0.1 + tmp[n] = ratio_0_to_1 * (1 - (n / (neox_args.dim_att - 1))) + zigzag + + self.time_faaaa = nn.Parameter( + tmp.reshape(neox_args.num_attention_heads, neox_args.head_size) + ) + + self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) + self.receptance = nn.Linear( + neox_args.hidden_size, neox_args.dim_att, bias=False + ) + self.key = nn.Linear(neox_args.hidden_size, neox_args.dim_att, bias=False) + + self.value = nn.Linear(neox_args.hidden_size, neox_args.dim_att, bias=False) + self.output = nn.Linear(neox_args.dim_att, neox_args.hidden_size, bias=False) + self.gate = nn.Linear(neox_args.hidden_size, neox_args.dim_att, bias=False) + self.ln_x = nn.GroupNorm( + neox_args.num_attention_heads, neox_args.dim_att, eps=(1e-5) * (8**2) + ) + + def jit_func(self, x): + B, T, C = x.size() + + xx = self.time_shift(x) - x + + xxx = x + xx * self.time_maa_x + xxx = torch.tanh(xxx @ self.time_maa_w1).view(B * T, 5, -1).transpose(0, 1) + xxx = torch.bmm(xxx, self.time_maa_w2).view(5, B, T, -1) + mw, mk, mv, mr, mg = xxx.unbind(dim=0) + + xw = x + xx * (self.time_maa_w + mw) + xk = x + xx * (self.time_maa_k + mk) + xv = x + xx * (self.time_maa_v + mv) + xr = x + xx * (self.time_maa_r + mr) + xg = x + xx * (self.time_maa_g + mg) + + r = self.receptance(xr) + k = self.key(xk) + v = self.value(xv) + g = F.silu(self.gate(xg)) + + ww = torch.tanh(xw @ self.time_decay_w1) @ self.time_decay_w2 + w = self.time_decay + ww + + return r, k, v, g, w + + def jit_func_2(self, x, g): + B, T, C = x.size() + x = x.view(B * T, C) + + x = self.ln_x(x).view(B, T, C) + x = self.output(x * g) + return x + + def forward(self, x): + B, T, C = x.size() + H = self.neox_args.num_attention_heads + + r, k, v, g, w = self.jit_func(x) + x = RUN_CUDA_RWKV(B, T, C, H, r, k, v, w, u=self.time_faaaa) + + return self.jit_func_2(x, g) + + +class RWKV_ChannelMix(nn.Module): + """ + Channel Mix layer. The ffn in RWKV + """ + + def __init__(self, neox_args, layer_number): + super().__init__() + self.neox_args = neox_args + self.layer_number = layer_number + self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) + + with torch.no_grad(): # fancy init of time_mix + ratio_1_to_almost0 = 1.0 - (layer_number / neox_args.num_layers) # 1 to ~0 + ddd = torch.ones(1, 1, neox_args.hidden_size) + for i in range(neox_args.hidden_size): + ddd[0, 0, i] = i / neox_args.hidden_size + self.time_maa_k = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) + self.time_maa_r = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) + + self.key = nn.Linear(neox_args.hidden_size, neox_args.ffn_dim, bias=False) + self.receptance = nn.Linear( + neox_args.hidden_size, neox_args.hidden_size, bias=False + ) + self.value = nn.Linear(neox_args.ffn_dim, neox_args.hidden_size, bias=False) + + def forward(self, x): + xx = self.time_shift(x) - x + xk = x + xx * self.time_maa_k + xr = x + xx * self.time_maa_r + + k = self.key(xk) + k = torch.relu(k) ** 2 + kv = self.value(k) + return torch.sigmoid(self.receptance(xr)) * kv + + +class RWKVResidualLayer(nn.Module): + """ + RWKV layer definition + """ + + def __init__(self, neox_args, layer_number): + super().__init__() + self.neox_args = neox_args + self.layer_number = layer_number + self.fp16 = neox_args.precision == "fp16" + self.bf16 = neox_args.precision == "bfloat16" + assert ( + neox_args.intermediate_size == None or neox_args.expansion_factor == None + ), "Must pass either the absolute intermediate size or the relative expansion factor for the mamba projections" + if not hasattr(neox_args, "dim_att"): + neox_args.dim_att = neox_args.hidden_size + if neox_args.intermediate_size: + neox_args.ffn_dim = neox_args.intermediate_size + else: + self.expand = ( + neox_args.expansion_factor if neox_args.expansion_factor else 3.5 + ) + neox_args.ffn_dim = int(self.expand * neox_args.hidden_size) + # Make hidden size 3.5x by default. Round to nearest multiple of 32 until we add hdim rounding logic + neox_args.ffn_dim = int(neox_args.ffn_dim // 32 * 32) + assert neox_args.hidden_size % 32 == 0 + assert neox_args.dim_att % 32 == 0 + assert neox_args.ffn_dim % 32 == 0 + self.neox_args.head_size = neox_args.dim_att // neox_args.num_attention_heads + self.head_size = self.neox_args.head_size + self.num_attention_heads = neox_args.num_attention_heads + assert neox_args.dim_att % self.num_attention_heads == 0 + + if neox_args.attention_dropout > 0: + self.drop0 = nn.Dropout(p=neox_args.attention_dropout) + + self.ln1 = nn.LayerNorm(neox_args.hidden_size) + self.ln2 = nn.LayerNorm(neox_args.hidden_size) + + self.att = RWKV_TimeMix(neox_args, layer_number) + + self.ffn = RWKV_ChannelMix(neox_args, layer_number) + + if neox_args.attention_dropout > 0: + self.drop0 = nn.Dropout(p=neox_args.attention_dropout) + if neox_args.hidden_dropout > 0: + self.drop1 = nn.Dropout(p=neox_args.hidden_dropout) + + if layer_number == 0: + global wkv_cuda + """ + Load cuda kernel at runtime. The kernel uses run time variables to build, ideally it should not. + """ + wkv_cuda = load( + name="wkv6", + sources=[ + "megatron/model/rwkv/v6/cuda/wkv6_op.cpp", + f"megatron/model/rwkv/v6/cuda/wkv6_cuda.cu", + ], + verbose=True, + extra_cuda_cflags=[ + "-res-usage", + "--use_fast_math", + "-O3", + "-Xptxas -O3", + "--extra-device-vectorization", + f"-D_N_={self.neox_args.head_size}", + f"-D_T_={self.neox_args.seq_length}", + ], + ) + + def forward(self, x): + neox_args = self.neox_args + B, T, C = x.size() + if self.layer_number == 0: + x = self.ln1(x) + + if self.neox_args.attention_dropout == 0: + x = x + self.att(self.ln1(x)) + else: + x = self.drop0(x + self.att(self.ln1(x))) + + if self.neox_args.hidden_dropout == 0: + x = x + self.ffn(self.ln2(x)) + else: + x = self.drop1(x + self.ffn(self.ln2(x))) + + return x + + +class RWKVResidualLayerPipe(RWKVResidualLayer): + """ + RWKV Pipeline Layer + """ + + def forward(self, args): + assert len(args) == 2 + hidden_states, mask = args + neox_args = self.neox_args + return super().forward(hidden_states), mask diff --git a/megatron/model/transformer.py b/megatron/model/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..d112a7461acd6f41c8254476a9e84d4987a50ec4 --- /dev/null +++ b/megatron/model/transformer.py @@ -0,0 +1,1361 @@ +# Copyright (c) 2024 EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Transformer.""" + +import math +from contextlib import nullcontext + +import torch +import torch.nn.functional as F +import torch.nn as nn +from pkg_resources import packaging +from importlib.metadata import version + +from .norms import get_norm +from megatron import mpu +from megatron.model import megablocks_utils +from megatron.model.fused_softmax import FusedScaleMaskSoftmax +from megatron.model.activations import get_activation +from megatron.model.utils import exists, get_fusion_type +from megatron.model.positional_embeddings import ( + RotaryEmbedding, + apply_rotary_pos_emb_torch, + apply_rotary_pos_emb, + AliBi, +) +from megatron.model.fused_rope import ( + FusedRoPEFunc, + fused_apply_rotary_pos_emb_cached, +) +from megatron.model.fused_bias_dropout import ( + get_bias_dropout_add, + bias_dropout_add_fused_train, + bias_dropout_add_fused_inference, +) +from megatron.model.utils import configure_sparse_attention +from deepspeed.moe.layer import MoE + +try: + from flash_attn.ops.activations import swiglu +except ImportError: + swiglu = None + +# flags required to enable jit fusion kernels +torch._C._jit_set_profiling_mode(False) +torch._C._jit_set_profiling_executor(False) +torch._C._jit_override_can_fuse_on_cpu(True) +torch._C._jit_override_can_fuse_on_gpu(True) + +""" We use the following notation throughout this file: + h: hidden size + n: number of attention heads + kv: number of key or value heads + p: number of model parallel partitions + np: n/p + kvp: kv/p + hp: h/p + hn: h/n + b: batch size + s: sequence length + l: number of layers + Transformer takes input of size [s, b, h] and returns a + tensor of the same size. We use the following arguments: + hyperparameters: transformer hyperparameters + attention_mask_func: a function that takes `unmasked-attention-scores` + with size [b, np, s, s] and an `attention-mask` and will apply + the masking. The function should return a masked score of the + same size [b, np, s, s]. + masked-attention-scores = attention_mask_func( + unmasked-attention-scores, attention-mask) +""" + + +class ParallelMLP(nn.Module): + """MLP. + + MLP will take the input with h hidden state, project it to 4*h + hidden dimension, perform nonlinear transformation, and project the + state back into h hidden dimension. At the end, dropout is also + applied. + """ + + def __init__( + self, + neox_args, + init_method, + output_layer_init_method, + parallel_output=False, + multiple_of=256, + MOE=False, + MoE_mp_size=1, + ): + super().__init__() + assert ( + neox_args.intermediate_size == None or neox_args.expansion_factor == None + ), "Must pass either the absolute intermediate size or the relative expansion factor for the mamba projections" + + self.activation_func, self.is_gated = get_activation(neox_args) + self.activation_type = neox_args.activation + self.bias_gelu_fusion = neox_args.bias_gelu_fusion + self.multiple_of = multiple_of + + if neox_args.intermediate_size: + ffn_dim = neox_args.intermediate_size + elif neox_args.expansion_factor: + ffn_dim = int(neox_args.expansion_factor * neox_args.hidden_size) + else: + # 4h is default for ffn_dim + ffn_dim = 4 * neox_args.hidden_size + ffn_dim_in = ffn_dim + if self.is_gated: + # set activation function to be gated implementation + self.activation_func = Gated_Activation( + self.activation_func, + (swiglu is not None) + and (neox_args.activation == "swiglu") + and neox_args.use_flashattn_swiglu, + ) + # auto scale so gated activations has equal parameters + ffn_dim = int(ffn_dim * 2 / 3) + ffn_dim_in = ffn_dim // 2 + # set multiple + ffn_dim = int( + (2 * self.multiple_of) + * ((ffn_dim + (2 * multiple_of) - 1) // (2 * multiple_of)) + ) + ffn_dim_in = int( + self.multiple_of * ((ffn_dim_in + multiple_of - 1) // multiple_of) + ) + + self.linear1 = mpu.ColumnParallelLinear( + neox_args=neox_args, + input_size=neox_args.hidden_size, + output_size=ffn_dim, + gather_output=False, + init_method=init_method, + skip_bias_add=True, + MOE=MOE, + MoE_mp_size=MoE_mp_size, + bias=neox_args.use_bias_in_mlp, + ) + # Project back to h. + self.linear2 = mpu.RowParallelLinear( + neox_args=neox_args, + input_size=ffn_dim_in, + output_size=neox_args.hidden_size, + input_is_parallel=True, + init_method=output_layer_init_method, + parallel_output=parallel_output, + skip_bias_add=True, + MOE=MOE, + MoE_mp_size=MoE_mp_size, + bias=neox_args.use_bias_in_mlp, + ) + + def forward(self, hidden_states): + # [s, b, intermediate_size] + intermediate_parallel, bias_parallel = self.linear1(hidden_states) + + if self.is_gated or (self.activation_type == "gelu" and self.bias_gelu_fusion): + intermediate_parallel = self.activation_func( + intermediate_parallel, bias_parallel + ) + else: + intermediate_parallel = self.activation_func( + intermediate_parallel + bias_parallel + ) + + # [s, b, h] + output, output_bias = self.linear2(intermediate_parallel) + return output, output_bias + + +class Gated_Activation(torch.nn.Module): + def __init__(self, activation_func, use_swiglu=False): + super().__init__() + self.activation_func = activation_func + self.use_swiglu = use_swiglu + + def forward(self, x, bias=None): + x, gate = x.chunk(2, dim=-1) + if bias is not None: + bias_1, bias_2 = bias.chunk(2, dim=-1) + x = x + bias_1 + gate = gate + bias_2 + if not self.use_swiglu: + intermediate_parallel = self.activation_func(gate) + return intermediate_parallel * x + else: + return swiglu(gate, x) + + +class ParallelLinear(nn.Module): + """ + A Parallel Linear Layer transforming the transformer outputs from hidden_size -> vocab_size + """ + + def __init__( + self, + neox_args, + parallel_output=True, + init_method=nn.init.xavier_normal_, + is_last_layer=False, + ): + super().__init__() + self.is_rm = neox_args.train_impl == "rm" + parallelism = neox_args.output_layer_parallelism if not self.is_rm else "row" + if parallelism == "column": + self.final_linear = mpu.ColumnParallelLinear( + neox_args=neox_args, + input_size=neox_args.hidden_size, + output_size=neox_args.padded_vocab_size, + bias=False, + init_method=init_method, + gather_output=not parallel_output, + skip_bias_add=False, + mup_rescale_parameters=is_last_layer, # rescale params only called if neox_args.use_mup = True, despite it not being included here + seq_dim=1, # important: must mark that this layer receives shape [b, s, h] not [s, b, h] and so Seq. Parallel comms must gather along dim=1 rather than dim=0 + ) + else: + if not self.is_rm: + print( + 'ERROR: Output layer parallelism over the hidden dim is currently broken (https://github.com/EleutherAI/gpt-neox/issues/905). Please run with output_layer_parallelism = "column" until this issue is fixed.' + ) + exit() + # self.final_linear = mpu.RowParallelLinear( + # neox_args=neox_args, + # input_size=neox_args.hidden_size, + # output_size=neox_args.padded_vocab_size, + # bias=False, + # input_is_parallel=False, + # init_method=init_method, + # parallel_output=parallel_output, + # skip_bias_add=False, + # mup_rescale_parameters=is_last_layer, # only called if neox_args.use_mup = True, despite it not being included here + # ) + else: # Not using cross entropy loss for RMs + self.rm_linear = mpu.RowParallelLinear( + neox_args=neox_args, + input_size=neox_args.hidden_size, + output_size=1, + bias=False, + input_is_parallel=False, + init_method=init_method, + parallel_output=False, + skip_bias_add=False, + mup_rescale_parameters=is_last_layer, # only called if neox_args.use_mup = True, despite it not being included here + ) + + def forward(self, hidden_states): + if not self.is_rm: + return self.final_linear(hidden_states) + else: + return self.rm_linear(hidden_states) + + +class _MegablocksAdapter(nn.Module): + def __init__( + self, neox_args, layer_cls, init_method, output_layer_init_method, ep_group + ): + super().__init__() + megablocks_utils.assert_megablocks_is_available() + args = megablocks_utils.as_megablocks_args(neox_args) + args.device = torch.cuda.current_device() + args.init_method = init_method + args.output_layer_init_method = output_layer_init_method + + # NOTE: Shard the MoE layers over the data parallel group. Expert + # parallel sharding and data parallel sharding could be decoupled + # by extending the optimizer to handle data parallel reductions for + # MoE and non-MoE parameters separately. + if args.moe_expert_model_parallelism: + args.expert_parallel_group = ep_group + + self.moe = layer_cls(args) + + def forward(self, x): + return self.moe.forward(x) + + +class MbMoE(_MegablocksAdapter): + def __init__(self, neox_args, init_method, output_layer_init_method, ep_group): + super().__init__( + neox_args, + megablocks_utils.moe.MoE, + init_method, + output_layer_init_method, + ep_group, + ) + + +class dMoE(_MegablocksAdapter): + def __init__(self, neox_args, init_method, output_layer_init_method, ep_group): + super().__init__( + neox_args, + megablocks_utils.dmoe.dMoE, + init_method, + output_layer_init_method, + ep_group, + ) + + +class ParallelSelfAttention(nn.Module): + """Parallel self-attention layer abstract class. + + Self-attention layer takes input with size [b, s, h] + and returns output of the same size. + """ + + def __init__( + self, + neox_args, + attention_mask_func, + init_method, + output_layer_init_method, + layer_number, + rpe=None, + rotary=False, + use_cache=False, + parallel_output=False, + ): + super().__init__() + + self.fp16 = neox_args.precision == "fp16" + self.bf16 = neox_args.precision == "bfloat16" + self.attention_mask_func = attention_mask_func + self.apply_query_key_layer_scaling = neox_args.apply_query_key_layer_scaling + self.use_cache = use_cache + self.attention_softmax_in_fp32 = neox_args.attention_softmax_in_fp32 + if self.apply_query_key_layer_scaling: + self.attention_softmax_in_fp32 = True + self.layer_number = layer_number + # Per attention head and per partition values. + world_size = mpu.get_model_parallel_world_size() + self.hidden_size_per_partition = mpu.divide(neox_args.hidden_size, world_size) + self.hidden_size_per_attention_head = mpu.divide( + neox_args.hidden_size, neox_args.num_attention_heads + ) + self.num_attention_heads_per_partition = mpu.divide( + neox_args.num_attention_heads, world_size + ) + self.pos_emb = neox_args.pos_emb + + self.use_qk_layernorm = neox_args.use_qk_layernorm + if self.use_qk_layernorm: + norm, eps = get_norm(neox_args) + self.qk_layernorm = norm( + [ + self.num_attention_heads_per_partition, + self.hidden_size_per_attention_head, + ], + eps=eps, + ) + + self.sliding_window_width = neox_args.sliding_window_width + + if ( + not neox_args.num_kv_heads + or neox_args.num_kv_heads == neox_args.num_attention_heads + ): + self.gqa = False + else: + self.gqa = True + if self.gqa: + self.num_kv_heads_per_partition = mpu.divide( + neox_args.num_kv_heads, world_size + ) # we do not yet clone KV heads in MQA across TP ranks... + self.kv_hidden_size = ( + neox_args.num_kv_heads * self.hidden_size_per_attention_head + ) # how large the total hidden dim for each of K and V is + else: + self.num_kv_heads_per_partition = self.num_attention_heads_per_partition + self.kv_hidden_size = neox_args.hidden_size + + if not self.gqa: + # Strided linear layer. + self.query_key_value = mpu.ColumnParallelLinear( + neox_args=neox_args, + input_size=neox_args.hidden_size, + output_size=3 * neox_args.hidden_size, + gather_output=False, + init_method=init_method, + bias=neox_args.use_bias_in_attn_linear, + ) + else: + # QKV proj is smaller if we are using GQA / MQA + self.query_key_value = mpu.ColumnParallelLinear( + neox_args=neox_args, + input_size=neox_args.hidden_size, + output_size=neox_args.hidden_size + 2 * self.kv_hidden_size, + gather_output=False, + init_method=init_method, + bias=neox_args.use_bias_in_attn_linear, + ) + + coeff = None + self.norm_factor = math.sqrt(self.hidden_size_per_attention_head) + if self.apply_query_key_layer_scaling: + coeff = max(1, self.layer_number) + self.norm_factor *= coeff + + if neox_args.use_mup: + self.norm_factor = self.hidden_size_per_attention_head + + self.rpe = rpe + + if self.pos_emb == "alibi": + self.alibi_embed = AliBi( + neox_args.num_attention_heads, + neox_args.model_parallel_size, + mpu.get_model_parallel_rank(), + ) + + # TODO: this arg shouldn't need to be passed in - get from neox_args + if rotary: + if neox_args.rotary_pct == 1: + self.rotary_ndims = None + else: + assert neox_args.rotary_pct < 1 + self.rotary_ndims = int( + self.hidden_size_per_attention_head * neox_args.rotary_pct + ) + dim = ( + self.rotary_ndims + if self.rotary_ndims is not None + else self.hidden_size_per_attention_head + ) + self.rotary_emb = RotaryEmbedding( + dim, + base=neox_args.rotary_emb_base, + max_seq_len=neox_args.seq_length, + precision=neox_args.params_dtype, + save_inv_freqs=neox_args.rotary_save_freqs_buffer, + ) + else: + self.rotary_emb = None + + self.rope_fusion = neox_args.rope_fusion + self.attention_type = neox_args.attention_config[layer_number] + self.use_flash_attention = self.attention_type == "flash" + self.use_triton = ( + self.use_flash_attention + and self.pos_emb == "alibi" + and ( + not packaging.version.Version(version("flash-attn")) + >= packaging.version.Version("2.4.0.post1") + ) + ) + self.sparse = self.attention_type not in ("global", "flash") + + if self.gqa: + assert not self.sparse + + if self.sparse: + self.sparse_attn = configure_sparse_attention( + neox_args, + self.attention_type, + self.num_attention_heads_per_partition, + mpu=mpu, + ) + else: + if self.use_flash_attention: + # we now use Flash Attention 2's provided interface. + # TODO: we no longer need to use flash_triton_fn since flash cuda supports alibi. + # consider adding OpenAI's more recent Flash-2 Triton kernel in future + # from https://github.com/openai/triton/blob/main/python/tutorials/06-fused-attention.py + from flash_attn.flash_attn_interface import ( + flash_attn_func, + flash_attn_varlen_func, + ) + from flash_attn.flash_attn_triton import ( + flash_attn_func as flash_attn_unpadded_unpacked_func_triton, + ) + + self.flash_triton_fn = flash_attn_unpadded_unpacked_func_triton + self.flash_qkv_fn = flash_attn_func + self.flash_varlen_qkv_fn = flash_attn_varlen_func + else: + self.scale_mask_softmax = FusedScaleMaskSoftmax( + input_in_fp16=self.fp16, + input_in_bf16=self.bf16, + fusion_type=get_fusion_type(neox_args), + mask_func=self.attention_mask_func, + softmax_in_fp32=self.attention_softmax_in_fp32, + scale=coeff, + ) + + # Dropout. Note that for a single iteration, this layer will generate + # different outputs on different number of parallel partitions but + # on average it should not be partition dependent. + self.dropout_p = neox_args.attention_dropout + self.attention_dropout = nn.Dropout(self.dropout_p) + + # Output. + self.dense = mpu.RowParallelLinear( + neox_args=neox_args, + input_size=neox_args.hidden_size, + output_size=neox_args.hidden_size, + input_is_parallel=True, + init_method=output_layer_init_method, + skip_bias_add=True, + parallel_output=parallel_output, + bias=neox_args.use_bias_in_attn_linear, + ) + + def attention( + self, query_layer, key_layer, value_layer, layer_past, attention_mask + ): + # =================================== + # Raw attention scores. [b, np, s, s] + # =================================== + + # [b, np, sq, sk] + output_size = ( + query_layer.size(1), + query_layer.size(2), + query_layer.size(0), + key_layer.size(0), + ) + # [sq, b, np, hn] -> [sq, b * np, hn] + query_layer = query_layer.view( + output_size[2], output_size[0] * output_size[1], -1 + ) + key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1) + # preallocating result tensor: [b * np, sq, sk] + matmul_result = torch.empty( + output_size[0] * output_size[1], + output_size[2], + output_size[3], + dtype=query_layer.dtype, + device=torch.cuda.current_device(), + ) + + # Raw attention scores. [b * np, sq, sk] + matmul_result = torch.baddbmm( + matmul_result, + query_layer.transpose(0, 1), # [b * np, sq, hn] + key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk] + beta=0.0, + alpha=(1.0 / self.norm_factor), + ) + + # change view to [b, np, sq, sk] + attention_scores = matmul_result.view(*output_size) + # ================================================== + # Update attention mask for inference. [b, np, sq, sk] + # ================================================== + + if self.use_cache: + with torch.no_grad(): + attention_mask = attention_mask[ + ..., : attention_scores.size(3), : attention_scores.size(3) + ] + + # =========================== + # Attention probs and dropout + # =========================== + + if exists(self.rpe): + rpe = self.rpe(query_layer.size(0), key_layer.size(0)) + attention_scores += rpe # [1, np, sq, sk] + + if self.pos_emb == "alibi": + attention_scores = self.alibi_embed(attention_scores) + + # attention scores and attention mask [b, np, sq, sk] + attention_probs = self.scale_mask_softmax(attention_scores, attention_mask) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + with mpu.get_cuda_rng_tracker().fork(): + attention_probs = self.attention_dropout(attention_probs) + + # ========================= + # Context layer. [sq, b, hp] + # ========================= + + # value_layer -> context layer. + # [sk, b, np, hn] --> [b, np, sq, hn] + + # context layer shape: [b, np, sq, hn] + output_size = ( + value_layer.size(1), + value_layer.size(2), + query_layer.size(0), + value_layer.size(3), + ) + + # change view [sk, b * np, hn] + value_layer = value_layer.view( + value_layer.size(0), output_size[0] * output_size[1], -1 + ) + + # change view [b * np, sq, sk] + attention_probs = attention_probs.view( + output_size[0] * output_size[1], output_size[2], -1 + ) + + # matmul: [b * np, sq, hn] + context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1)) + + # change view [b, np, sq, hn] + context_layer = context_layer.view(*output_size) + return context_layer + + def flash_attention(self, query_layer, key_layer, value_layer): + # [b, np, sq, sk] + output_size = ( + query_layer.size(1), + query_layer.size(2), + query_layer.size(0), + key_layer.size(0), + ) + + if self.use_flash_attention and not self.use_triton: + + # [sk, b, np, hn] -> [b, sk, np, hn] -> [b * sk, 1, np, hn] + key_layer = key_layer.transpose(0, 1).reshape( + output_size[0], output_size[3], self.num_kv_heads_per_partition, -1 + ) + value_layer = value_layer.transpose(0, 1).reshape( + output_size[0], output_size[3], self.num_kv_heads_per_partition, -1 + ) + + # [sq, b, np, hn] -> [b, sq, np, hn] + query_layer = query_layer.transpose(0, 1).reshape( + output_size[0], output_size[2], output_size[1], -1 + ) + + # only pass in window_size or alibi_slopes kwarg + # if we use Sliding Window Attention / AliBi. + # Flash attn defaults to (-1,-1), or + # does not have this kwarg prior to v2.3.0 + extra_kwargs = ( + {"window_size": (self.sliding_window_width, -1)} + if self.sliding_window_width is not None + else {} + ) + if self.pos_emb == "alibi": + extra_kwargs["alibi_slopes"] = self.alibi_embed.slopes.to( + query_layer.device + ).to(torch.float32) + + if not self.training: + batch_size = output_size[0] + max_seqlen_q = output_size[2] + max_seqlen_k = output_size[3] + + cu_seqlens_q = torch.arange( + 0, + (batch_size + 1) * max_seqlen_q, + step=max_seqlen_q, + dtype=torch.int32, + device=query_layer.device, + ) + + cu_seqlens_k = torch.arange( + 0, + (batch_size + 1) * max_seqlen_k, + step=max_seqlen_k, + dtype=torch.int32, + device=key_layer.device, + ) + + q_shape = query_layer.shape + k_shape = key_layer.shape + v_shape = value_layer.shape + is_causal = max_seqlen_q == max_seqlen_k + output = self.flash_varlen_qkv_fn( + query_layer.reshape( + (q_shape[0] * q_shape[1], q_shape[2], q_shape[3]) + ), + key_layer.reshape( + (k_shape[0] * k_shape[1], k_shape[2], k_shape[3]) + ), + value_layer.reshape( + (v_shape[0] * v_shape[1], v_shape[2], v_shape[3]) + ), + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q, + max_seqlen_k, + softmax_scale=None, + causal=is_causal, + **extra_kwargs, + ) + output = output.reshape(q_shape) + else: + output = self.flash_qkv_fn( + query_layer, + key_layer, + value_layer, + self.dropout_p if self.training else 0.0, + softmax_scale=None, + causal=True, + **extra_kwargs, + ) + + matmul_result = output + # [b, sq, np, hn] -> [b, np, sq, hn] + matmul_result = matmul_result.transpose(1, 2) + + else: + # we still use Triton if using AliBi with flash-attn<2.4.0.post1. + + # [sq, b, np, hn] -> [b, sq, np, hn] + sq = query_layer.size(0) + b = query_layer.size(1) + sk = key_layer.size(0) + + query_layer = query_layer.transpose(0, 1) + key_layer = key_layer.transpose(0, 1) + value_layer = value_layer.transpose(0, 1) + + bias = self.alibi_embed.bias(sq, sk, query_layer.device, query_layer.dtype) + bias = bias.unsqueeze(0).tile((b, 1, 1, 1)) + + matmul_result = self.flash_triton_fn( + query_layer, key_layer, value_layer, bias=bias, causal=True + ) + matmul_result = matmul_result.transpose(1, 2) + + return matmul_result + + def sparse_attention(self, query_layer, key_layer, value_layer, attention_mask): + # TODO: sparse attn dropout? + # TODO: pad to block size + # shape of q/k/v is [sq, b, np, hn] and needs to be transposed to [b, np, sq, hn] + query_layer, key_layer, value_layer = map( + lambda t: t.permute(1, 2, 0, 3).contiguous(), + (query_layer, key_layer, value_layer), + ) + # output shape [b, np(heads), sq, hn] + attn_mask = attention_mask.to(query_layer.dtype) * -10000 + if exists(self.rpe): + rpe = self.rpe(query_layer.size(0), key_layer.size(0)) + else: + rpe = None + return self.sparse_attn( + query_layer, key_layer, value_layer, attn_mask=attn_mask, rpe=rpe + ) + + def gqa_project(self, hidden_states, attention_mask, layer_past=None): + # QKV projection and separation into separate Q/K/V layers for GQA, + # where KV projections may be smaller than Q projection. + # the logic for this is explained in comments of this function + # detailing the intermediate sizes of tensors at each reshape. + + # pass through projection: [sq, b, h] --> [sq, b, ((np + 2 * kvp) * hn)] + mixed_x_layer, _ = self.query_key_value(hidden_states) + + # First: reshape so we have seqlen, batch, and num. query heads each as separate dims + # Final dim is not exactly head dim: the first (head dim) dims are query heads, + # The last (head dim * ratio of kv to q heads) each are the "k/v heads" + # (right now we treat like we have same num. heads, but smaller head dim) + + # [sq, b, ((np + 2 * kvp) * hn)] --> [sq, b, np, (hn * (1 + 2 * (kvp / np)))] + new_qkv_shape = ( + mixed_x_layer.shape[0], + mixed_x_layer.shape[1], + self.num_attention_heads_per_partition, + int( + self.hidden_size_per_attention_head + * ( + 1 + + 2 + * ( + self.num_kv_heads_per_partition + / self.num_attention_heads_per_partition + ) + ) + ), + ) + mixed_x_layer = mixed_x_layer.reshape(*new_qkv_shape) + + # Next: split our fake head dim. (last dim) so that the first (head dim) dimensions go to Q, + # the last smaller 2 * (head dim * kv to q head ratio) each divided between K and V separately + split_sizes = ( + self.hidden_size_per_attention_head, + int( + ( + self.num_kv_heads_per_partition + / self.num_attention_heads_per_partition + ) + * self.hidden_size_per_attention_head + ), + int( + ( + self.num_kv_heads_per_partition + / self.num_attention_heads_per_partition + ) + * self.hidden_size_per_attention_head + ), + ) + + # [sq, b, np, (hn * (1 + 2 * (kvp / np)))] --> 1 x [sq, b, np, hn] , 2 x [sq, b, np, (hn * (kvp / np))] + (query_layer, key_layer, value_layer) = [ + x.contiguous() + for x in torch.split( + mixed_x_layer, + split_sizes, + dim=mixed_x_layer.dim() - 1, + ) + ] + + # reshape K/V to proper output shape (last dim = correct full "real" head size again) + # 2 x [sq, b, np, (hn * (kvp / np))] --> 2 x [sq, b, kvp, hn] + new_kv_shape = ( + key_layer.size(0), + key_layer.size(1), + self.num_kv_heads_per_partition, + self.hidden_size_per_attention_head, + ) + + key_layer = key_layer.view(*new_kv_shape) + + value_layer = value_layer.view(*new_kv_shape) + + # if not using Flash attention, we repeat K/V heads to match Q head counts + if not self.use_flash_attention: + key_layer = torch.repeat_interleave( + key_layer, + repeats=int( + self.num_attention_heads_per_partition + // self.num_kv_heads_per_partition + ), + dim=2, + ) + value_layer = torch.repeat_interleave( + value_layer, + repeats=int( + self.num_attention_heads_per_partition + // self.num_kv_heads_per_partition + ), + dim=2, + ) + + return query_layer, key_layer, value_layer + + def forward(self, hidden_states, attention_mask, layer_past=None): + + # hidden_states: [sq, b, h] + + # ===================== + # Query, Key, and Value + # ===================== + + if not self.gqa: + # QKV projection for MHA. + + # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)] + mixed_x_layer, _ = self.query_key_value(hidden_states) + + # [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn] + new_tensor_shape = mixed_x_layer.size()[:-1] + ( + self.num_attention_heads_per_partition, + 3 * self.hidden_size_per_attention_head, + ) + mixed_x_layer = mixed_x_layer.view(*new_tensor_shape) + + # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn] + (query_layer, key_layer, value_layer) = mpu.split_tensor_along_last_dim( + mixed_x_layer, 3 + ) + else: + # Grouped Query Attention (GQA) - specific logic for performing QKV proj + # and separating out Q, K, and V outputs. + + # output shapes: 1 x [sq, b, np, hn], 2 x [sq, b, kvp, hn] if using flash + query_layer, key_layer, value_layer = self.gqa_project( + hidden_states, attention_mask, layer_past=layer_past + ) + + # QK Normalization https://arxiv.org/abs/2302.05442 + if self.use_qk_layernorm: + query_layer = self.qk_layernorm(query_layer) + key_layer = self.qk_layernorm(key_layer) + + if exists(self.rotary_emb): + if exists(self.rotary_ndims): + # partial rotary + query_rot, query_pass = ( + query_layer[..., : self.rotary_ndims], + query_layer[..., self.rotary_ndims :], + ) + key_rot, key_pass = ( + key_layer[..., : self.rotary_ndims], + key_layer[..., self.rotary_ndims :], + ) + else: + # full rotary + query_rot, key_rot = query_layer, key_layer + + seq_len = key_layer.shape[0] + offset = 0 + if exists(layer_past) and layer_past.numel() > 0: + offset = layer_past[0].shape[0] + seq_len += offset + cos, sin = self.rotary_emb(value_layer, seq_len=seq_len) + if self.rope_fusion: + query_layer, key_layer = ( + fused_apply_rotary_pos_emb_cached(rot, cos, sin) + for rot in [query_rot, key_rot] + ) + else: + if self.bf16: + apply_rotary_fn = apply_rotary_pos_emb_torch + else: + apply_rotary_fn = apply_rotary_pos_emb + query_layer, key_layer = apply_rotary_fn( + query_rot, key_rot, cos, sin, offset=offset + ) + + if exists(self.rotary_ndims): + query_layer = torch.cat((query_layer, query_pass), dim=-1) + key_layer = torch.cat((key_layer, key_pass), dim=-1) + + # ================================== + # Cache key and value for inference + # ================================== + + if exists(layer_past) and layer_past.numel() > 0: + past_key, past_value = layer_past + key_layer = torch.cat((past_key.type_as(key_layer), key_layer), dim=0) + value_layer = torch.cat( + (past_value.type_as(value_layer), value_layer), dim=0 + ) + + if self.use_cache: + present = torch.stack((key_layer, value_layer)) + + if self.use_flash_attention: + context_layer = self.flash_attention(query_layer, key_layer, value_layer) + elif not self.sparse: + context_layer = self.attention( + query_layer, key_layer, value_layer, layer_past, attention_mask + ) + else: + context_layer = self.sparse_attention( + query_layer, key_layer, value_layer, attention_mask + ) + + # [b, np, sq, hn] --> [sq, b, np, hn] + context_layer = context_layer.permute(2, 0, 1, 3).contiguous() + + # [sq, b, np, hn] --> [sq, b, hp] + new_context_layer_shape = context_layer.size()[:-2] + ( + self.hidden_size_per_partition, + ) + context_layer = context_layer.view(*new_context_layer_shape) + + # ================= + # Output. [sq, b, h] + # ================= + + output, bias = self.dense(context_layer) + + if self.use_cache: + output = [output, present] + + return output, bias + + +class ParallelTransformerLayer(nn.Module): + """A single transformer layer. + + Transformer layer takes input with size [b, s, h] and returns an + output of the same size. + """ + + def __init__( + self, + neox_args, + attention_mask_func, + init_method, + output_layer_init_method, + layer_number, + rpe=None, + rotary=False, + use_cache=False, + ): + + super().__init__() + self.layer_number = layer_number + self.neox_args = neox_args + + norm, eps = get_norm(neox_args) + + # Layernorm on the input data. + self.input_layernorm = norm(neox_args.hidden_size, eps=eps) + self.use_cache = use_cache + + self.hidden_dropout = neox_args.hidden_dropout + self.bias_dropout_fusion = neox_args.bias_dropout_fusion + self.gpt_j_residual = neox_args.gpt_j_residual + self.gpt_j_tied = neox_args.gpt_j_tied + self.moe_type = neox_args.moe_type + self.activation = neox_args.activation + + if self.gpt_j_residual: + # GPT-J style layers allow us to defer the reduction of results across TP ranks until the end of the two sublayers. + # the reduction we use is a simple allreduce for pure Tensor Parallel, + # but needs to be a reduce-scatter when using Megatron-style Sequence Parallel (LN sharding.) + self.reduce = ( + mpu.mappings.reduce_from_model_parallel_region + if not neox_args.sequence_parallel + else mpu.mappings.reduce_scatter_to_sequence_parallel_region + ) + + # Self attention. + self.attention = ParallelSelfAttention( + neox_args=neox_args, + attention_mask_func=attention_mask_func, + init_method=init_method, + output_layer_init_method=output_layer_init_method, + layer_number=layer_number, + rpe=rpe, + use_cache=self.use_cache, + rotary=rotary, + parallel_output=self.gpt_j_residual, + ) + + # Layernorm on the output of the attention layer. + # If GPT-J residuals are used, this is surpurfulous but leaving it in + # leads to cleaner code + self.post_attention_layernorm = norm(neox_args.hidden_size, eps=eps) + + # MLP + def get_mlp(**kw): + return ParallelMLP( + neox_args=neox_args, + init_method=init_method, + output_layer_init_method=output_layer_init_method, + parallel_output=self.gpt_j_residual, + multiple_of=neox_args.mlp_multiple_of, + **kw, + ) + + self.num_experts = ( + neox_args.moe_num_experts + if layer_number % neox_args.expert_interval == 0 + else 1 + ) + args = neox_args + if self.num_experts <= 1: + self.mlp = get_mlp() + else: + from torch import distributed as dist + + if self.num_experts > dist.get_world_size(): + moe_mp_size = 1 + else: + moe_mp_size = dist.get_world_size() // self.num_experts + + if neox_args.moe_type == "deepspeed": + self.mlp = MoE( + args.hidden_size, + get_mlp( + "regular", + MOE=True, + MoE_mp_size=moe_mp_size, + ), + num_experts=self.num_experts, + ep_size=args.moe_expert_parallel_size, + k=args.moe_top_k, + use_residual=args.moe_use_residual, + capacity_factor=args.moe_train_capacity_factor, + eval_capacity_factor=args.moe_eval_capacity_factor, + min_capacity=args.moe_min_capacity, + drop_tokens=args.moe_token_dropping, + use_tutel=args.use_tutel, + enable_expert_tensor_parallelism=args.enable_expert_tensor_parallelism, + ) + elif neox_args.moe_type == "megablocks": + + def integrate_megablocks_with_ds_expert_parallelism(): + # We make megablocks work with DS parallelism. + # + # We fool DS into accepting these MoE parameters as its own DS MoE params, + # which makes things work with the underlying expert parallelism, + # including TED parallelism. + # + # Effectively, we want to: + # + # - Make DS's data parallel gradient all-reduction skip these params. + # - But make these params participate in the expert parallel all-reduction! + # + # Further background: + # + # Normally, with the original megablocks demo codebase, it + # only supports 1 copy of any expert throughout + # the network, since it uses EP group = DP group. + # + # First, we trigger DS initialization of the MoE expert parallel groups and internal state. + throwaway = MoE( + args.hidden_size, + get_mlp( + "regular", + MOE=True, + MoE_mp_size=moe_mp_size, + ), + num_experts=self.num_experts, + ep_size=args.moe_expert_parallel_size, + k=args.moe_top_k, + use_residual=args.moe_use_residual, + capacity_factor=args.moe_train_capacity_factor, + eval_capacity_factor=args.moe_eval_capacity_factor, + min_capacity=args.moe_min_capacity, + drop_tokens=args.moe_token_dropping, + use_tutel=args.use_tutel, + enable_expert_tensor_parallelism=args.enable_expert_tensor_parallelism, + ) + throwaway.set_deepspeed_parallelism() + + ep_group = throwaway.deepspeed_moe.ep_group + if args.moe_token_dropping: + self.mlp = MbMoE( + neox_args, init_method, output_layer_init_method, ep_group + ) + else: + self.mlp = dMoE( + neox_args, init_method, output_layer_init_method, ep_group + ) + + # Next, we trick DS into seeing these as its own MoE params. + for param in self.mlp.parameters(): + if getattr(param, "expert_model_parallel", None) is not None: + # is_moe_param looks for this attr. + param.allreduce = False + param.group_name = throwaway.expert_group_name + + integrate_megablocks_with_ds_expert_parallelism() + + else: + raise KeyError(neox_args.moe_type) + + self.layer_past = None # used to cache k/v pairs in inference + + def _get_bias_dropout(self): + if self.bias_dropout_fusion: + fn = ( + bias_dropout_add_fused_train + if self.training + else bias_dropout_add_fused_inference + ) + else: + fn = get_bias_dropout_add(self.training) + return fn + + def forward(self, x, attention_mask, layer_past=None): + layer_past = layer_past if layer_past is not None else self.layer_past + bias_dropout_fn = self._get_bias_dropout() + moe_loss = torch.tensor(0.0, device=x.device, dtype=x.dtype) + # x: [b, s, h] + if self.gpt_j_residual: + # pseudocode: + # x = x + attn(ln(x)) + mlp(ln(x)) + # this means we can avoid doing the allreduce in the attn / mlp outputs + # to save communication time (we can do a single allreduce after we add mlp / attn outputs). + # due to a bug, the two layernorms are not tied in GPT-NeoX-20B. This is non-desirable, but + # we preserve the functionality for backwards compatibility + + residual = x + # applies the correct normalization depending on if the norms are tied + if self.gpt_j_tied: + x = self.input_layernorm(x) + x1, x2 = x, x + else: + x1, x2 = self.input_layernorm(x), self.post_attention_layernorm(x) + + # attention operator + attention_output, attention_bias = self.attention( + x1, attention_mask, layer_past=layer_past + ) + if self.use_cache: + attention_output, presents = attention_output + self.layer_past = presents + + if attention_bias is not None: + with torch.enable_grad() if not self.eval else nullcontext(): + attention_output = bias_dropout_fn( + attention_output, + bias=attention_bias.expand_as(attention_output), + residual=None, + prob=self.hidden_dropout, + ) + + # mlp operator + mlp_output, mlp_bias = self.mlp(x2) + if mlp_bias is not None: + with torch.enable_grad() if not self.eval else nullcontext(): + output = bias_dropout_fn( + mlp_output, + bias=mlp_bias.expand_as(mlp_output), + residual=attention_output, + prob=self.hidden_dropout, + ) + else: + output = mlp_output + + # output = (x + attn(ln(x)) + mlp(ln(x)) + output = residual + self.reduce(output) + else: + # pseudocode: + # x = x + attn(ln1(x)) + # x = x + mlp(ln2(x)) + + residual = x + + # x = x + attn(ln1(x)) + attention_output, attention_bias = self.attention( + self.input_layernorm(x), attention_mask, layer_past=layer_past + ) + if self.use_cache: + attention_output, presents = attention_output + self.layer_past = presents + with torch.enable_grad() if not self.eval else nullcontext(): + if attention_bias is not None: + # Use special bias_dropout_fn if we have a bias term from the above attention layer + attention_output = bias_dropout_fn( + attention_output, + bias=attention_bias.expand_as(residual), + residual=residual, + prob=self.hidden_dropout, + ) + else: + # Otherwise just apply dropout + residual + attention_output = ( + torch.nn.functional.dropout( + attention_output, + p=self.hidden_dropout, + training=self.training, + ) + + residual + ) + + # output = x + mlp(ln2(x)) + layernorm_output = self.post_attention_layernorm(attention_output) + mlp_bias = torch.tensor( + 0.0, device=layernorm_output.device, dtype=layernorm_output.dtype + ) + + if self.num_experts == 1: + mlp_output, mlp_bias = self.mlp(layernorm_output) + else: + if self.moe_type == "deepspeed": + mlp_output, moe_loss, _ = self.mlp(layernorm_output) + mlp_bias = ( + None # deepspeed.moe.layer.MoE.forward ignores the bias term + ) + elif self.moe_type == "megablocks": + mlp_output, mlp_bias = self.mlp(layernorm_output) + else: + raise KeyError(self.moe_type) + + with torch.enable_grad() if not self.eval else nullcontext(): + if ( + self.activation == "swiglu" + or self.num_experts > 1 + and self.moe_type == "deepspeed" + ): + # No dropout either + assert mlp_bias is None + output = mlp_output + attention_output + else: + output = bias_dropout_fn( + mlp_output, + bias=mlp_bias.expand_as(attention_output), + residual=attention_output, + prob=self.hidden_dropout, + ) + + return output, moe_loss + + +class ParallelTransformerLayerPipe(ParallelTransformerLayer): + """Extends ParallelTransformerLayer to forward attention_mask through the pipeline.""" + + def forward(self, args): + assert ( + len(args) == 2 + ), "ParallelTransformerLayerPipe expects 2 arguments - hidden_states and attention_mask" + hidden_states, attention_mask = args + # we are returning just [hidden_states, mask] + output, moe_loss = super().forward(hidden_states, attention_mask) + # auxiliary output + self.last_moe_loss = moe_loss + return output, attention_mask + + +class ParallelLinearPipe(ParallelLinear): + """Another helper class to pass presents through to the output when doing inference with a Pipe Parallel model""" + + def forward(self, args): + assert isinstance( + args, torch.Tensor + ), "ParallelLinearPipe expects a single argument - hidden_states" + hidden_state = args + logits, bias = super().forward(hidden_state) + return logits + + +class NormPipe(nn.Module): + """Just a helper class to pass presents through to the output when doing inference with a Pipe Parallel model""" + + def __init__(self, norm_class, hidden_size, eps): + super().__init__() + self.norm = norm_class(hidden_size, eps=eps) + + def forward(self, args): + assert not isinstance( + args, tuple + ), "NormPipe should only receive a single tensor as input" + return self.norm(args) + + +def parallel_lm_logits( + input_, + word_embeddings_weight, + parallel_output, + seq_parallel=False, + seq_dim=1, + bias=None, +): + """LM logits using word embedding weights.""" + # Parallel logits. + if seq_parallel: + # if using Sequence Parallelism, our logits are sharded along the sequence dimension. + # gather them here. (backward pass: reduce-scatter) + input_parallel = mpu.gather_from_sequence_parallel_region( + input_, seq_dim=seq_dim + ) + else: + # Set up backprop all-reduce. + input_parallel = mpu.copy_to_model_parallel_region(input_) + + # Matrix multiply. + if bias is None: + logits_parallel = F.linear(input_parallel, word_embeddings_weight) + else: + logits_parallel = F.linear(input_parallel, word_embeddings_weight, bias) + + # Gather if needed. + if parallel_output: + return logits_parallel + + return mpu.gather_from_model_parallel_region(logits_parallel) diff --git a/megatron/model/transformer_engine.py b/megatron/model/transformer_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..338513a9712eac45ef7073f8dd9642fd8f47bcb5 --- /dev/null +++ b/megatron/model/transformer_engine.py @@ -0,0 +1,137 @@ +import torch + +try: + import transformer_engine as te +except ImportError: + raise ImportError( + "Unable to import transformer-engine. Please refer to " + "https://github.com/NVIDIA/TransformerEngine for installation instructions." + ) + + +class TERMSNorm(torch.nn.Module): + def __init__(self, dim, eps=1e-8, **kwargs): + """ + A conditional wrapper to initialize an instance of Transformer-Engine's + `RMSNorm` based on input + :param dim: model size + :param eps: epsilon value, default 1e-8 + """ + super(TERMSNorm, self).__init__() + + self.d = dim + self.eps = eps + self.norm = te.pytorch.RMSNorm( + hidden_size=self.d, + eps=self.eps, + **kwargs, + ) + + def forward(self, x): + return self.norm(x) + + +class TELayerNorm(torch.nn.Module): + def __init__(self, dim, eps=1.0e-5, **kwargs): + """ + A conditional wrapper to initialize an instance of Transformer-Engine's + `LayerNorm` based on input + :param dim: model size + :param eps: epsilon value, default 1.0e-5 + """ + super(TELayerNorm, self).__init__() + + self.d = dim + self.eps = eps + self.norm = te.pytorch.LayerNorm( + hidden_size=self.d, + eps=self.eps, + **kwargs, + ) + + def forward(self, x): + return self.norm(x) + + +class TELinear(te.pytorch.Linear): + """ + Wrapper for the Transformer-Engine's `Linear` layer. + """ + + def __init__(self): + # TODO + return + + def forward(self, x): + # TODO + return + + +class TELayerNormColumnParallelLinear(te.pytorch.LayerNormLinear): + """ + Wrapper for the Transformer-Engine's `LayerNormLinear` layer that combines + layernorm and linear layers + """ + + def __init__(self): + # TODO + return + + def forward(self, x): + # TODO + return + + +class TEColumnParallelLinear(TELinear): + """ + Wrapper for the Transformer-Engine's `Linear` layer but specialized similar + to megatron's `ColumnParallelLinear` layer. + """ + + def __init__(self): + # TODO + return + + def forward(self, x): + # TODO + return + + +class TERowParallelLinear(TELinear): + """ + Wrapper for the Transformer-Engine's `Linear` layer but specialized similar + to megatron's `RowParallelLinear` layer. + """ + + def __init__(self): + # TODO + return + + def forward(self, x): + # TODO + return + + +class TEDotProductAttention(te.pytorch.DotProductAttention): + """ + Wrapper for the Transformer-Engine's `DotProductAttention` layer that also + has "flash attention" enabled. + """ + + def __init__(self): + # TODO + return + + def forward(self, x): + # TODO + return + + +class TEDelayedScaling(te.common.recipe.DelayedScaling): + """ + Wrapper for the Transformer-Engine's `DelayedScaling` layer. + """ + + def __init__(self): + # TODO + return diff --git a/megatron/model/utils.py b/megatron/model/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8176f1f7ac5621e8658815f74a3a141e4df633ed --- /dev/null +++ b/megatron/model/utils.py @@ -0,0 +1,404 @@ +# Copyright (c) 2024 EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for models.""" + +import torch +from megatron.model.fused_softmax import SoftmaxFusionTypes +from megatron import mpu +from types import GeneratorType +import torch.distributed as dist + +import importlib +from typing import List, Dict, Any + + +def get_params_for_weight_decay_optimization(module: Any, neox_args: Any): + """ + Divide params into with-weight-decay and without-weight-decay groups. + Layernorms and biases will have no weight decay but the rest will. + """ + weight_decay_params = {"params": [], "name": "weight_decay_params"} + no_weight_decay_params = { + "params": [], + "weight_decay": 0.0, + "name": "no_weight_decay_params", + } + + def is_no_weight_decay_module(module_: Any) -> bool: + return ( + type(module_).__name__ + in [ + "LayerNorm", + "RMSNorm", + "ScaleNorm", + "TELayerNorm", + "TERMSNorm", + "MixedFusedLayerNorm", + "MixedFusedRMSNorm", + ] + or neox_args.weight_decay == 0.0 + ) + + for module_ in module.modules(): + if is_no_weight_decay_module(module_): + no_weight_decay_params["params"].extend( + [p for p in module_._parameters.values() if p is not None] + ) + else: + for name, param in module_._parameters.items(): + if param is None: + continue + if name == "bias" or getattr(param, "_no_weight_decay", False): + no_weight_decay_params["params"].append(param) + else: + weight_decay_params["params"].append(param) + + if neox_args.weight_decay == 0.0: + # Only return a single param group to minimize calls to compressed_allreduce with onebitadam + return [no_weight_decay_params] + return weight_decay_params, no_weight_decay_params + + +def exists(x): + return x is not None + + +class Lambda(torch.nn.Module): + def __init__(self, func): + super().__init__() + self.func = func + + def forward(self, x): + return self.func(x) + + +class SequentialWrapper(torch.nn.Module): + """ + Used to convert a deepspeed PipelineModule to an nn.Sequential like model whilst retaining + activation checkpointing. + """ + + def __init__( + self, + layers, + activation_checkpoint_interval, + activation_checkpoint_func, + parent_class_name=None, + ): + super().__init__() + self.sequential = torch.nn.Sequential(*layers) + self.activation_checkpoint_interval = activation_checkpoint_interval + self.parent_class_name = parent_class_name + self.activation_checkpoint_func = activation_checkpoint_func + self.batch_fn = None + + def _is_checkpointable(self, funcs): + if self.parent_class_name == "GPT2ModelPipe": + return all( + "ParallelTransformerLayerPipe" in f.__class__.__name__ for f in funcs + ) + params = [f.parameters() for f in funcs if isinstance(f, torch.nn.Module)] + return any(len(list(p)) > 0 for p in params) + + def set_batch_fn(self, fn): + """Execute a post-processing function on input data. + + Args: + fn (function): The function to run. + """ + self.batch_fn = fn + + def inference_mode(self, use_cache=True): + """ + Sets up the model for inference by turning on k/v caching (if specified) and setting `parallel output` of the final layer to false, + so logits are gathered across model parallel ranks. + + :param cache: (bool) True if you want to use caching during inference, False otherwise + """ + _set_use_cache(self.sequential, use_cache) + recursive_setattr(self.sequential, "training", False) + + def train_mode(self): + """ + Sets up the model for training by turning off k/v caching. + """ + _set_use_cache(self.sequential, False) + recursive_setattr(self.sequential, "training", True) + + def forward( + self, forward_input, curriculum_seqlen=None, labels=None, neox_args=None + ): + + if self.batch_fn: + forward_input = self.batch_fn(forward_input) + + if ( + curriculum_seqlen is not None + and isinstance(forward_input, tuple) + and len(forward_input) == 3 + ): + neox_args.update_value("curriculum_seqlen", curriculum_seqlen) + tokens = forward_input[0] + input_ids = forward_input[1] + attention_mask = forward_input[2] + if curriculum_seqlen < input_ids.size()[1]: + # seqlen-based curriculum learning + # input_ids, position_ids, labels have size [batch size, seqlen] + input_ids = input_ids[:, :curriculum_seqlen].contiguous() + tokens = tokens[:, :curriculum_seqlen].contiguous() + # position_ids = position_ids[:, :curriculum_seqlen].contiguous() + if labels is not None: + labels = labels[:, :curriculum_seqlen].contiguous() + # attention_mask has size [1, 1, seqlen, seqlen] + attention_mask = attention_mask[ + :, :, :curriculum_seqlen, :curriculum_seqlen + ].contiguous() + forward_input = (tokens, input_ids, attention_mask) + + moe_losses = [] + + def exec_range_func(start, end): + """Helper function to be used with checkpoint() + Adapted from torch.utils.checkpoint:checkpoint_sequential() + """ + + def exec_func(*inputs): + # Single tensor inputs need to be unwrapped + if len(inputs) == 1: + inputs = inputs[0] + for idx, layer in enumerate(self.sequential[start:end]): + inputs = layer(inputs) + if hasattr(layer, "last_moe_loss"): + moe_losses.append(layer.last_moe_loss) + return inputs + + return exec_func + + if self.activation_checkpoint_interval == 0: + func = exec_range_func(0, len(self.sequential)) + x = func(forward_input) + else: + num_layers = len(self.sequential) + x = forward_input + for start_idx in range(0, num_layers, self.activation_checkpoint_interval): + end_idx = min( + start_idx + self.activation_checkpoint_interval, num_layers + ) + + funcs = self.sequential[start_idx:end_idx] + # Since we either pass tensors or tuples of tensors without unpacking, we + # need to be careful not to double-wrap tensors with tuple. + if not isinstance(x, tuple): + x = (x,) + + if self._is_checkpointable(funcs): + x = self.activation_checkpoint_func( + exec_range_func(start_idx, end_idx), *x + ) + else: + x = exec_range_func(start_idx, end_idx)(*x) + return x, moe_losses + + def clear_cache(self): + """ + Recursively clears the kv cache on all layers + """ + recursive_setattr(self.sequential, "layer_past", None) + + +def recursive_setattr(m, attr, value, assert_type=None, type_filter=None): + """ + Recursively set attributes on a pytorch module or an iterable of modules. + If an assert_type is provided, it will assert that the type of the value is the same as the assert_type. + If a type_filter is provided, it will only set attributes on modules that match that type. + """ + if assert_type is not None: + assert isinstance(value, assert_type), "Value is not the correct type." + + # if m is a list or a generator, iterate over the elements + if isinstance(m, (list, GeneratorType)): + for i in m: + recursive_setattr(i, attr, value, assert_type, type_filter) + elif isinstance(m, torch.nn.Module): + if hasattr(m, attr): + if type_filter is None or isinstance(m, type_filter): + setattr(m, attr, value) + if hasattr(m, "children"): + recursive_setattr(m.children(), attr, value, assert_type, type_filter) + + +def _set_use_cache(modules, value: bool): + """ + Recursively sets an use_cache to `value` on a list of pytorch modules, if they have a use_cache attribute. + use_cache is used to decide whether we cache past key value activations or not in inference. + """ + recursive_setattr(modules, "use_cache", value, assert_type=bool) + + +def configure_sparse_attention(neox_args, attention_type, num_attention_heads, mpu): + from deepspeed.ops.sparse_attention import ( + SparseSelfAttention, + VariableSparsityConfig, + FixedSparsityConfig, + BigBirdSparsityConfig, + BSLongformerSparsityConfig, + ) + from deepspeed.ops.sparse_attention.sparsity_config import ( + LocalSlidingWindowSparsityConfig, + ) + + if attention_type == "sparse_fixed": + # you can think of local window size as `block_size` * `num_local_blocks`. + # so if you wanted to set a local window size of 256, set block size to 16 and `num_local_blocks` to 16 + sparsity_config = FixedSparsityConfig( + num_heads=num_attention_heads, + block=neox_args.sparsity_config.get("block", 16), + different_layout_per_head=neox_args.sparsity_config.get( + "different_layout_per_head", False + ), + num_local_blocks=neox_args.sparsity_config.get("num_local_blocks", 4), + num_global_blocks=neox_args.sparsity_config.get("num_global_blocks", 1), + num_different_global_patterns=neox_args.sparsity_config.get( + "num_different_global_patterns", 1 + ), + attention="unidirectional", + horizontal_global_attention=False, + ) + elif attention_type == "sparse_variable": + sparsity_config = VariableSparsityConfig( + num_heads=num_attention_heads, + block=neox_args.sparsity_config.get("block", 16), + different_layout_per_head=neox_args.sparsity_config.get( + "different_layout_per_head", False + ), + num_random_blocks=neox_args.sparsity_config.get("num_random_blocks", 0), + local_window_blocks=neox_args.sparsity_config.get( + "local_window_blocks", [4] + ), + global_block_indices=neox_args.sparsity_config.get( + "global_block_indices", [0] + ), + global_block_end_indices=neox_args.sparsity_config.get( + "global_block_end_indices", None + ), + attention="unidirectional", + horizontal_global_attention=False, + ) + elif attention_type == "local": + # can configure with `num_local_blocks` or `num_sliding_window_blocks` + num_local_blocks = neox_args.sparsity_config.get( + "num_local_blocks", + neox_args.sparsity_config.get("num_sliding_window_blocks", 4), + ) + sparsity_config = LocalSlidingWindowSparsityConfig( + num_heads=num_attention_heads, + block=neox_args.sparsity_config.get("block", 16), + num_sliding_window_blocks=num_local_blocks, + attention="unidirectional", + ) + elif attention_type == "bigbird": + sparsity_config = BigBirdSparsityConfig( + num_heads=num_attention_heads, + block=neox_args.sparsity_config.get("block", 16), + different_layout_per_head=neox_args.sparsity_config.get( + "different_layout_per_head", False + ), + num_random_blocks=neox_args.sparsity_config.get("num_random_blocks", 1), + num_sliding_window_blocks=neox_args.sparsity_config.get( + "num_sliding_window_blocks", 3 + ), + num_global_blocks=neox_args.sparsity_config.get("num_global_blocks", 1), + attention="unidirectional", + ) + elif attention_type == "bslongformer": + sparsity_config = BSLongformerSparsityConfig( + num_heads=num_attention_heads, + block=neox_args.sparsity_config.get("block", 16), + different_layout_per_head=neox_args.sparsity_config.get( + "different_layout_per_head", False + ), + num_sliding_window_blocks=neox_args.sparsity_config.get( + "num_sliding_window_blocks", 3 + ), + global_block_indices=neox_args.sparsity_config.get( + "global_block_indices", [0] + ), + global_block_end_indices=neox_args.sparsity_config.get( + "global_block_end_indices", None + ), + attention="unidirectional", + ) + else: + raise ValueError(f"Attention type {attention_type} not recognized") + return SparseSelfAttention( + sparsity_config=sparsity_config, + max_seq_length=neox_args.seq_length, + attn_mask_mode="add", + mpu=mpu, + ) + + +def get_fusion_type(neox_args): + fusion_type = SoftmaxFusionTypes.none + if neox_args.scaled_upper_triang_masked_softmax_fusion: + fusion_type = SoftmaxFusionTypes.upper_triang + elif neox_args.scaled_masked_softmax_fusion: + fusion_type = SoftmaxFusionTypes.general + return fusion_type + + +def reduce_weight_grads_from_model_parallel_region(input_): + """A hook that can be applied to any weight tensor via .register_hook(). + Allreduces grads for e.g. LN weights across the model parallel group. + Needed to keep LNs in sync, despite them getting diff data -> diff gradients when using sequence parallel. + """ + # Bypass the function if no TP -> no comm needed. + if mpu.get_model_parallel_world_size() == 1: + return input_ + + # Bf16 convert + dt = input_.dtype + if dt == torch.bfloat16 and mpu.get_fp32_allreduce(): + input_ = input_.float() + + # All-reduce. + dist.all_reduce(input_, group=mpu.get_model_parallel_group()) + + # Bf16 convert + if dt == torch.bfloat16 and mpu.get_fp32_allreduce(): + input_ = input_.bfloat16() + + return input_ + + +def mark_norms_for_sequence_parallel_grad_sync(module, neox_args): + """Iterate through the modules in our model, and for any "...Norm" classnames, + register a hook on each of that module's parameters which will allreduce norms' weights' grads across + the model (sequence) parallel region. + """ + + if not neox_args.sequence_parallel: + # if we aren't using sequence parallelism, this is a no-op + return + + for module_ in module.modules(): + if "norm" in type(module_).__name__.lower(): + # this is a norm, we want to allreduce its weight grads across sequence parallel region + for name, param in module_.named_parameters(): + if param.requires_grad: + param.register_hook(reduce_weight_grads_from_model_parallel_region) diff --git a/megatron/model/word_embeddings.py b/megatron/model/word_embeddings.py new file mode 100644 index 0000000000000000000000000000000000000000..ce3c1117eec3024f4e29ba5e25fb39202889f388 --- /dev/null +++ b/megatron/model/word_embeddings.py @@ -0,0 +1,251 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import math +from torch.nn.parameter import Parameter + +from megatron import mpu +from megatron.model.positional_embeddings import SinusoidalPositionalEmbedding +from megatron.model.init_functions import get_init_methods + + +class Embedding(torch.nn.Module): + """Language model embeddings. + Arguments: + hidden_size: hidden size + vocab_size: vocabulary size + max_sequence_length: maximum size of sequence. This + is used for positional embedding + embedding_dropout_prob: dropout probability for embeddings + init_method: weight initialization method + num_tokentypes: size of the token-type embeddings. 0 value + will ignore this embedding + """ + + def __init__( + self, + neox_args, + hidden_size, + vocab_size, + max_sequence_length, + embedding_dropout_prob, + init_method, + num_tokentypes=0, + use_pos_emb=True, + ): + super(Embedding, self).__init__() + + self.hidden_size = hidden_size + self.init_method = init_method + self.num_tokentypes = num_tokentypes + + self.sequence_parallel = ( + neox_args.sequence_parallel + ) # if we are using sequence parallelism, then we'll want to scatter our inputs across the seqlen dim across TP ranks + + self.use_mup = neox_args.use_mup + self.mup_embedding_mult = neox_args.mup_embedding_mult + self.mup_rp_embedding_mult = neox_args.mup_rp_embedding_mult + + # Word embeddings (parallel). + self.word_embeddings = mpu.VocabParallelEmbedding( + neox_args=neox_args, + num_embeddings=vocab_size, + embedding_dim=self.hidden_size, + init_method=self.init_method, + ) + self._word_embeddings_key = "word_embeddings" + + if neox_args.use_bnb_optimizer: + try: + import bitsandbytes as bnb + + self.embedding_module = bnb.nn.StableEmbedding + except ModuleNotFoundError: + print( + "Please install bitsandbytes following https://github.com/facebookresearch/bitsandbytes." + ) + raise Exception + else: + self.embedding_module = torch.nn.Embedding + + # Position embedding (serial). + self.use_pos_emb = use_pos_emb + if self.use_pos_emb: + self.embedding_type = neox_args.pos_emb + if self.embedding_type == "learned": + self.position_embeddings = self.embedding_module( + max_sequence_length, self.hidden_size + ) + self._position_embeddings_key = "position_embeddings" + # Initialize the position embeddings. + self.init_method(self.position_embeddings.weight) + elif self.embedding_type == "sinusoidal": + self.position_embeddings = SinusoidalPositionalEmbedding( + self.hidden_size + ) + + # Token type embedding. + # Add this as an optional field that can be added through + # method call so we can load a pretrain model without + # token types and add them as needed. + self._tokentype_embeddings_key = "tokentype_embeddings" + if self.num_tokentypes > 0: + self.tokentype_embeddings = self.embedding_module( + self.num_tokentypes, self.hidden_size + ) + # Initialize the token-type embeddings. + self.init_method(self.tokentype_embeddings.weight) + else: + self.tokentype_embeddings = None + + # Embeddings dropout + self.embedding_dropout = torch.nn.Dropout(embedding_dropout_prob) + self.opt_pos_emb_offset = neox_args.opt_pos_emb_offset + + # For ticking position ids forward + self.layer_past = None + + def add_tokentype_embeddings(self, num_tokentypes): + """Add token-type embedding. This function is provided so we can add + token-type embeddings in case the pretrained model does not have it. + This allows us to load the model normally and then add this embedding. + """ + if self.tokentype_embeddings is not None: + raise Exception("tokentype embeddings is already initialized") + if torch.distributed.get_rank() == 0: + print( + "adding embedding for {} tokentypes".format(num_tokentypes), flush=True + ) + self.num_tokentypes = num_tokentypes + self.tokentype_embeddings = self.embedding_module( + num_tokentypes, self.hidden_size + ) + # Initialize the token-type embeddings. + self.init_method(self.tokentype_embeddings.weight) + + def forward(self, input_ids, position_ids, tokentype_ids=None): + # Embeddings. + words_embeddings = self.word_embeddings(input_ids) + if self.use_pos_emb and self.embedding_type in ["learned", "sinusoidal"]: + if self.opt_pos_emb_offset: + if self.layer_past is not None: + position_ids = position_ids + self.layer_past + 1 + self.layer_past = position_ids[:, -1] + # OPT always adds 2 for some reason, according to the HF implementation + position_ids = position_ids + self.opt_pos_emb_offset + position_embeddings = self.position_embeddings(position_ids) + position_embeddings.mul_(self.mup_rp_embedding_mult) + embeddings = words_embeddings + position_embeddings + else: + embeddings = words_embeddings + if tokentype_ids is not None: + assert self.tokentype_embeddings is not None + embeddings = embeddings + self.tokentype_embeddings(tokentype_ids) + else: + assert self.tokentype_embeddings is None + + # Dropout. + embeddings = self.embedding_dropout(embeddings) + + if self.use_mup: + with torch.no_grad(): + embeddings.mul_(self.mup_embedding_mult) + + if self.sequence_parallel: + # TODO: megatron-lm does dropout using the scattered embs. This would save a tiny bit of time, perhaps? + # Not a priority since we don't often use dropout + embeddings = mpu.scatter_to_sequence_parallel_region(embeddings) + + return embeddings + + +class EmbeddingPipe(Embedding): + """Extends Embedding to forward attention_mask through the pipeline.""" + + @property + def word_embeddings_weight(self): + """Easy accessory for the pipeline engine to tie embeddings across stages.""" + return self.word_embeddings.weight + + def forward(self, args): + assert ( + len(args) == 3 + ), f"Expected 3 arguments (input_ids, position_ids, attention_mask), but got {len(args)}." + + input_ids = args[0] + position_ids = args[1] + attention_mask = args[2] + embeddings = super().forward(input_ids, position_ids) + return embeddings, attention_mask + + +class SoftEmbedding(torch.nn.Module): + def __init__( + self, + neox_args, + wte, + n_tokens: int = 10, + init_range: float = 0.5, + init_string: str = "", + ): + super(SoftEmbedding, self).__init__() + self.n_tokens = n_tokens + self.neox_args = neox_args + self.init_range = init_range + self.init_string = init_string + self.soft_embedding_weight = torch.nn.parameter.Parameter( + self.initialize_embedding(wte) + ) + + def initialize_embedding(self): + if self.init_string: + embeds = torch.LongTensor( + self.neox_args.tokenizer.tokenize(self.init_string) + ).to(self.embedding_module.weight.device) + embeds = self.embedding_module(embeds) + if embeds.shape[0] >= self.n_tokens: + embeds = embeds[: self.n_tokens, :] # slice + else: + embeds = embeds.repeat(math.ceil(self.n_tokens / embeds.shape[0]), 1)[ + : self.n_tokens, : + ] # pad up to n_tokens + return embeds + return torch.Tensor(n_tokens, neox_args.hidden_size).uniform_( + -self.random_range, self.random_range + ) + + def forward(self, args: tuple): + in_inference = len(args) == 3 # embeddings, layer_past, attention_mask + in_train = len(args) == 2 # embeddings, attention_mask + if in_train: + embedding, attention_mask = args + else: + embedding, layer_past, attention_mask = args + soft_embedding = self.soft_embedding_weight.repeat( + embedding.shape[0], 1, 1 + ) # repeat batch_size times + if in_train: + # append soft embedding at the beginning in training + embedding = torch.cat((soft_embedding, embedding), dim=1) + embedding = embedding[:, : self.neox_args.seq_length, ...] + return embedding, attention_mask + else: + if not (exists(layer_past) and layer_past.numel() > 0): + # if in inference, on the first forward pass, we want to do the same as in training (append soft embedding) + embedding = torch.cat((soft_embedding, embedding), dim=1) + embedding = embedding[:, : self.neox_args.seq_length, ...] + # otherwise, we're in incremental mode, and just want to forward the single embedding (since the soft prompt has already been cached) + return embedding, layer_past, attention_mask diff --git a/megatron/mpu/__init__.py b/megatron/mpu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..780fb33e83c7eab4d7ca830a0c990c7c81acc974 --- /dev/null +++ b/megatron/mpu/__init__.py @@ -0,0 +1,59 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Model parallel utility interface.""" + +from .cross_entropy import vocab_parallel_cross_entropy + +from .data import broadcast_data + +from .initialize import is_unitialized +from .initialize import destroy_model_parallel +from .initialize import get_data_parallel_group +from .initialize import get_data_parallel_rank +from .initialize import get_data_parallel_world_size +from .initialize import get_model_parallel_group +from .initialize import get_model_parallel_rank, set_model_parallel_rank +from .initialize import get_model_parallel_src_rank, get_data_parallel_src_rank +from .initialize import get_model_parallel_world_size, set_model_parallel_world_size +from .initialize import get_topology +from .initialize import get_pipe_parallel_group +from .initialize import get_pipe_parallel_rank +from .initialize import get_pipe_parallel_world_size +from .initialize import get_tensor_model_parallel_group +from .initialize import get_tensor_model_parallel_rank +from .initialize import get_tensor_model_parallel_world_size +from .initialize import get_io_parallel_group +from .initialize import initialize_model_parallel +from .initialize import model_parallel_is_initialized + +from .layers import ColumnParallelLinear +from .layers import RowParallelLinear +from .layers import VocabParallelEmbedding +from .layers import ParallelRelativePositionBias + +from .mappings import copy_to_model_parallel_region +from .mappings import gather_from_model_parallel_region +from .mappings import reduce_from_model_parallel_region +from .mappings import scatter_to_model_parallel_region +from .mappings import reduce_scatter_to_sequence_parallel_region +from .mappings import gather_from_sequence_parallel_region +from .mappings import scatter_to_sequence_parallel_region + +from .random import checkpoint +from .random import get_cuda_rng_tracker +from .random import model_parallel_cuda_manual_seed + +from .utils import divide +from .utils import split_tensor_along_last_dim diff --git a/megatron/mpu/cross_entropy.py b/megatron/mpu/cross_entropy.py new file mode 100644 index 0000000000000000000000000000000000000000..d9f04703db48d6fa11f126aa695e2b084f56a05d --- /dev/null +++ b/megatron/mpu/cross_entropy.py @@ -0,0 +1,117 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import torch + +from .initialize import get_model_parallel_group +from .initialize import get_model_parallel_rank +from .initialize import get_model_parallel_world_size +from .utils import VocabUtility + + +class _VocabParallelCrossEntropy(torch.autograd.Function): + @staticmethod + def forward(ctx, vocab_parallel_logits, target): + + # Maximum value along vocab dimension across all GPUs. + logits_max = torch.max(vocab_parallel_logits, dim=-1)[0] + torch.distributed.all_reduce( + logits_max, + op=torch.distributed.ReduceOp.MAX, + group=get_model_parallel_group(), + ) + # Subtract the maximum value. + vocab_parallel_logits.sub_(logits_max.unsqueeze(dim=-1)) + + # Get the partition's vocab indices + get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size + partition_vocab_size = vocab_parallel_logits.size()[-1] + rank = get_model_parallel_rank() + world_size = get_model_parallel_world_size() + vocab_start_index, vocab_end_index = get_vocab_range( + partition_vocab_size, rank, world_size + ) + + # Create a mask of valid vocab ids (1 means it needs to be masked). + target_mask = (target < vocab_start_index) | (target >= vocab_end_index) + masked_target = target.clone() - vocab_start_index + masked_target[target_mask] = 0 + + # Get predicted-logits = logits[target]. + # For Simplicity, we convert logits to a 2-D tensor with size + # [*, partition-vocab-size] and target to a 1-D tensor of size [*]. + logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size) + masked_target_1d = masked_target.view(-1) + arange_1d = torch.arange( + start=0, end=logits_2d.size()[0], device=logits_2d.device + ) + predicted_logits_1d = logits_2d[arange_1d, masked_target_1d] + predicted_logits_1d = predicted_logits_1d.clone().contiguous() + predicted_logits = predicted_logits_1d.view_as(target) + predicted_logits[target_mask] = 0.0 + # All reduce is needed to get the chunks from other GPUs. + torch.distributed.all_reduce( + predicted_logits, + op=torch.distributed.ReduceOp.SUM, + group=get_model_parallel_group(), + ) + + # Sum of exponential of logits along vocab dimension across all GPUs. + exp_logits = vocab_parallel_logits + torch.exp(vocab_parallel_logits, out=exp_logits) + sum_exp_logits = exp_logits.sum(dim=-1) + torch.distributed.all_reduce( + sum_exp_logits, + op=torch.distributed.ReduceOp.SUM, + group=get_model_parallel_group(), + ) + + # Loss = log(sum(exp(logits))) - predicted-logit. + loss = torch.log(sum_exp_logits) - predicted_logits + + # Store softmax, target-mask and masked-target for backward pass. + exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1)) + ctx.save_for_backward(exp_logits, target_mask, masked_target_1d) + + return loss + + @staticmethod + def backward(ctx, grad_output): + + # Retrieve tensors from the forward path. + softmax, target_mask, masked_target_1d = ctx.saved_tensors + + # All the inputs have softmax as their gradient. + grad_input = softmax + # For simplicity, work with the 2D gradient. + partition_vocab_size = softmax.size()[-1] + grad_2d = grad_input.view(-1, partition_vocab_size) + + # Add the gradient from matching classes. + arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=grad_2d.device) + grad_2d[arange_1d, masked_target_1d] -= 1.0 - target_mask.view(-1).float() + + # Finally elementwise multiplication with the output gradients. + grad_input.mul_(grad_output.unsqueeze(dim=-1)) + + return grad_input, None + + +def vocab_parallel_cross_entropy(vocab_parallel_logits, target): + """Helper function for the cross entropy.""" + return _VocabParallelCrossEntropy.apply(vocab_parallel_logits, target) diff --git a/megatron/mpu/data.py b/megatron/mpu/data.py new file mode 100644 index 0000000000000000000000000000000000000000..87e2a9615428ed8350a2a49e06cd1214a66c38d6 --- /dev/null +++ b/megatron/mpu/data.py @@ -0,0 +1,120 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch + +from .initialize import get_model_parallel_group +from .initialize import get_model_parallel_rank +from .initialize import get_model_parallel_src_rank + + +_MAX_DATA_DIM = 4 + + +def _check_data_types(keys, data, target_dtype): + """Check that all the keys have the same target data type.""" + for key in keys: + assert ( + data[key].dtype == target_dtype + ), "{} has data type {} which " "is different than {}".format( + key, data[key].dtype, target_dtype + ) + + +def _build_key_size_numel_dictionaries(keys, data): + """Build the size on rank 0 and broadcast.""" + max_dim = _MAX_DATA_DIM + sizes = [0 for _ in range(max_dim) for _ in keys] + + # Pack the sizes on rank zero. + if get_model_parallel_rank() == 0: + offset = 0 + for key in keys: + assert data[key].dim() < max_dim, "you should increase MAX_DATA_DIM" + size = data[key].size() + for i, s in enumerate(size): + sizes[i + offset] = s + offset += max_dim + + # Move to GPU and broadcast. + sizes_cuda = torch.cuda.LongTensor(sizes) + torch.distributed.broadcast( + sizes_cuda, get_model_parallel_src_rank(), group=get_model_parallel_group() + ) + + # Move back to cpu and unpack. + sizes_cpu = sizes_cuda.cpu() + key_size = {} + key_numel = {} + total_numel = 0 + offset = 0 + for key in keys: + i = 0 + size = [] + numel = 1 + while sizes_cpu[offset + i] > 0: + this_size = sizes_cpu[offset + i] + size.append(this_size) + numel *= this_size + i += 1 + key_size[key] = size + key_numel[key] = numel + total_numel += numel + offset += max_dim + + return key_size, key_numel, total_numel + + +def broadcast_data(keys, data, datatype): + """Broadcast data from rank zero of each model parallel group to the + members of the same model parallel group. + + Arguments: + keys: list of keys in the data dictionary to be broadcasted + data: data dictionary of string keys and cpu tensor values. + datatype: torch data type of all tensors in data associated + with keys. + """ + # Build (key, size) and (key, number of elements) dictionaries along + # with the total number of elements on all ranks. + key_size, key_numel, total_numel = _build_key_size_numel_dictionaries(keys, data) + + # Pack on rank zero. + if get_model_parallel_rank() == 0: + # Check that all keys have the same data type. + _check_data_types(keys, data, datatype) + # Flatten the data associated with the keys + flatten_data = torch.cat( + [data[key].contiguous().view(-1) for key in keys], dim=0 + ).cuda() + else: + flatten_data = torch.empty( + total_numel, device=torch.cuda.current_device(), dtype=datatype + ) + + # Broadcast + torch.distributed.broadcast( + flatten_data, get_model_parallel_src_rank(), group=get_model_parallel_group() + ) + + # Unpack + output = {} + offset = 0 + for key in keys: + size = key_size[key] + numel = key_numel[key] + output[key] = flatten_data.narrow(0, offset, numel).view(size) + offset += numel + + return output diff --git a/megatron/mpu/initialize.py b/megatron/mpu/initialize.py new file mode 100644 index 0000000000000000000000000000000000000000..19d2315241cdaf8c6aaaae89cb834b0c6cfb163e --- /dev/null +++ b/megatron/mpu/initialize.py @@ -0,0 +1,324 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Model and data parallel groups.""" + +import torch + +from .utils import ensure_divisibility + +# Model parallel group that the current rank belongs to. +_MODEL_PARALLEL_GROUP = None +# Data parallel group that the current rank belongs to. +_DATA_PARALLEL_GROUP = None +# Pipeline parallel group that the current rank belongs to. +_PIPE_PARALLEL_GROUP = None + +# A group used to sync during the IO process. Usually this is data_parallel_group(), +# but with pipeline parallelism it must also involve the last stage (which is not in the +# DP group of rank 0) +_IO_PARALLEL_GROUP = None + +# These values enable us to change the mpu sizes on the fly. +_MPU_WORLD_SIZE = None +_MPU_RANK = None + +# Used to query 3D topology +_MPU_TOPOLOGY = None + +# Get fp32_allreduce flag +_FP32_ALLREDUCE = None + + +def is_unitialized(): + """Useful for code segments that may be accessed with or without mpu initialization""" + return _DATA_PARALLEL_GROUP is None + + +def initialize_model_parallel(model_parallel_size, topology=None, fp32_allreduce=False): + """ + Initialize model data parallel groups. + + Arguments: + model_parallel_size: number of GPUs used to parallelize model. + + Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we + use 2 GPUs to parallelize the model. The present function will + create 4 model parallel groups and 2 data parallel groups as: + 4 model parallel groups: + [g0, g1], [g2, g3], [g4, g5], [g6, g7] + 2 data parallel groups: + [g0, g2, g4, g6], [g1, g3, g5, g7] + Note that for efficiency, the caller should make sure adjacent ranks + are on the same DGX box. For example if we are using 2 DGX-1 boxes + with a total of 16 GPUs, rank 0 to 7 belong to the first box and + ranks 8 to 15 belong to the second box. + """ + if torch.distributed.get_rank() == 0: + print("> initializing model parallel with size {}".format(model_parallel_size)) + # Get world size and rank. Ensure some consistencies. + assert torch.distributed.is_initialized() + world_size = torch.distributed.get_world_size() + if world_size < model_parallel_size: + raise ValueError("world size cannot be smaller than model parallel size") + ensure_divisibility(world_size, model_parallel_size) + rank = torch.distributed.get_rank() + + global _MPU_TOPOLOGY + if topology: + _MPU_TOPOLOGY = topology + + # Build the data parallel groups. + global _DATA_PARALLEL_GROUP + assert _DATA_PARALLEL_GROUP is None, "data parallel group is already initialized" + if topology: + for dp_group in topology.get_axis_comm_lists("data"): + group = torch.distributed.new_group(ranks=dp_group) + if rank == 0: + print(f"MPU DP:", dp_group) + if rank in dp_group: + _DATA_PARALLEL_GROUP = group + else: + for i in range(model_parallel_size): + ranks = range(i, world_size, model_parallel_size) + group = torch.distributed.new_group(ranks) + if i == (rank % model_parallel_size): + _DATA_PARALLEL_GROUP = group + + # Build pipeline parallel group + if topology is not None: + global _PIPE_PARALLEL_GROUP + for pp_group in topology.get_axis_comm_lists("pipe"): + group = torch.distributed.new_group(ranks=pp_group) + if rank == 0: + print(f"MPU PP:", pp_group) + if rank in pp_group: + _PIPE_PARALLEL_GROUP = group + + # Build IO group + global _IO_PARALLEL_GROUP + if topology and topology.get_dim("pipe") > 1: + io_stages = [0, topology.get_dim("pipe") - 1] + io_group = [] + for stage in io_stages: + io_group.extend(topology.filter_match(pipe=stage, model=0)) + if rank == 0: + print(f"MPU IO:", io_group) + group = torch.distributed.new_group(ranks=io_group) + if rank in io_group: + _IO_PARALLEL_GROUP = group + else: + _IO_PARALLEL_GROUP = get_data_parallel_group() + + # Build the model parallel groups. + global _MODEL_PARALLEL_GROUP + assert _MODEL_PARALLEL_GROUP is None, "model parallel group is already initialized" + if topology: + # Short circuit case without model parallelism. + # TODO: it would be nice to avoid this branching case? + if model_parallel_size == 1: + for group_rank in range(world_size): + group = torch.distributed.new_group(ranks=[group_rank]) + if rank == 0: + print(f"MPU MP:", [group_rank]) + if rank == group_rank: + _MODEL_PARALLEL_GROUP = group + return + + for mp_group in topology.get_axis_comm_lists("model"): + group = torch.distributed.new_group(ranks=mp_group) + if rank == 0: + print(f"MPU MP:", mp_group) + if rank in mp_group: + _MODEL_PARALLEL_GROUP = group + + else: + for i in range(world_size // model_parallel_size): + ranks = range(i * model_parallel_size, (i + 1) * model_parallel_size) + group = torch.distributed.new_group(ranks) + if i == (rank // model_parallel_size): + _MODEL_PARALLEL_GROUP = group + + global _FP32_ALLREDUCE + assert _FP32_ALLREDUCE is None, "fp32_allreduce is already initialized" + _FP32_ALLREDUCE = fp32_allreduce + + +def model_parallel_is_initialized(): + """Check if model and data parallel groups are initialized.""" + if _MODEL_PARALLEL_GROUP is None or _DATA_PARALLEL_GROUP is None: + return False + return True + + +def get_model_parallel_group(): + """Get the model parallel group the caller rank belongs to.""" + assert _MODEL_PARALLEL_GROUP is not None, "model parallel group is not initialized" + return _MODEL_PARALLEL_GROUP + + +def get_data_parallel_group(): + """Get the data parallel group the caller rank belongs to.""" + assert _DATA_PARALLEL_GROUP is not None, "data parallel group is not initialized" + return _DATA_PARALLEL_GROUP + + +def get_io_parallel_group(): + """Get the IO parallel group the caller rank belongs to.""" + assert _IO_PARALLEL_GROUP is not None, "IO parallel group is not initialized" + return _IO_PARALLEL_GROUP + + +def set_model_parallel_world_size(world_size): + """Set the model parallel size""" + global _MPU_WORLD_SIZE + _MPU_WORLD_SIZE = world_size + + +def get_model_parallel_world_size(): + """Return world size for the model parallel group.""" + global _MPU_WORLD_SIZE + if _MPU_WORLD_SIZE is not None: + return _MPU_WORLD_SIZE + return torch.distributed.get_world_size(group=get_model_parallel_group()) + + +def set_model_parallel_rank(rank): + """Set model parallel rank.""" + global _MPU_RANK + _MPU_RANK = rank + + +def get_model_parallel_rank(): + """Return my rank for the model parallel group.""" + global _MPU_RANK + if _MPU_RANK is not None: + return _MPU_RANK + return torch.distributed.get_rank(group=get_model_parallel_group()) + + +def get_model_parallel_src_rank(): + """Calculate the global rank corresponding to a local rank zero + in the model parallel group.""" + global_rank = torch.distributed.get_rank() + local_world_size = get_model_parallel_world_size() + return (global_rank // local_world_size) * local_world_size + + +def get_data_parallel_src_rank(): + """Calculate the global rank corresponding to a local rank zero + in the data parallel group.""" + global_rank = torch.distributed.get_rank() + topo = get_topology() + if topo is None: + # we are just using model parallel + return global_rank % get_model_parallel_world_size() + else: + # We are using pipeline parallel + d = topo.get_axis_comm_lists("data") + for l in d: + if global_rank in l: + return l[0] + + +def get_data_parallel_world_size(): + """Return world size for the data parallel group.""" + return torch.distributed.get_world_size(group=get_data_parallel_group()) + + +def get_data_parallel_rank(): + """Return my rank for the data parallel group.""" + return torch.distributed.get_rank(group=get_data_parallel_group()) + + +def get_topology(): + return _MPU_TOPOLOGY + + +def get_pipe_parallel_group(): + """Get the pipe parallel group the caller rank belongs to.""" + assert _PIPE_PARALLEL_GROUP is not None, "data parallel group is not initialized" + return _PIPE_PARALLEL_GROUP + + +def get_pipe_parallel_rank(): + """Return my rank for the pipe parallel group.""" + return torch.distributed.get_rank(group=get_pipe_parallel_group()) + + +def get_pipe_parallel_world_size(): + """Return world size for the pipe parallel group.""" + return torch.distributed.get_world_size(group=get_pipe_parallel_group()) + + +def set_tensor_model_parallel_world_size(world_size): + """Set the tensor model parallel size""" + set_model_parallel_world_size(world_size) + + +def get_tensor_model_parallel_group(): + """Get the tensor model parallel group the caller rank belongs to.""" + return get_model_parallel_group() + + +def get_tensor_model_parallel_src_rank(): + """Calculate the global rank corresponding to the first local rank + in the tensor model parallel group.""" + return get_model_parallel_rank() + + +# Needed for MOE. True tensor parallelism todo. +def get_tensor_model_parallel_world_size(): + """Return world size for the tensor model parallel group.""" + return get_model_parallel_world_size() + + +def set_tensor_model_parallel_rank(rank): + """Set tensor model parallel rank.""" + set_model_parallel_rank(rank) + + +def get_tensor_model_parallel_rank(): + """Return my rank for the tensor model parallel group.""" + return get_model_parallel_rank() + + +def destroy_model_parallel(): + """Set the groups to none.""" + global _MODEL_PARALLEL_GROUP + _MODEL_PARALLEL_GROUP = None + global _DATA_PARALLEL_GROUP + _DATA_PARALLEL_GROUP = None + global _PIPE_PARALLEL_GROUP + _PIPE_PARALLEL_GROUP = None + global _IO_PARALLEL_GROUP + _IO_PARALLEL_GROUP = None + global _MPU_WORLD_SIZE + global _MPU_RANK + _MPU_WORLD_SIZE = None + _MPU_RANK = None + global _MPU_TOPOLOGY + _MPU_TOPOLOGY = None + global _FP32_ALLREDUCE + _FP32_ALLREDUCE = None + + +def get_fp32_allreduce(): + """Get the fp32 allreduce flag""" + assert _FP32_ALLREDUCE is not None, "fp32_allreduce is not Initialized" + return _FP32_ALLREDUCE diff --git a/megatron/mpu/layers.py b/megatron/mpu/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..d59edab94f368b3e9d96734aa6d4f0b9a8c07298 --- /dev/null +++ b/megatron/mpu/layers.py @@ -0,0 +1,794 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Parts of the code here are adapted from PyTorch +# repo: https://github.com/pytorch/pytorch + + +import math + +import torch +import torch.nn.functional as F +import torch.nn.init as init +from torch.nn.parameter import Parameter + +from .initialize import get_model_parallel_rank +from .initialize import get_model_parallel_world_size +from .mappings import copy_to_model_parallel_region +from .mappings import gather_from_model_parallel_region +from .mappings import reduce_from_model_parallel_region +from .mappings import scatter_to_model_parallel_region +from .mappings import reduce_scatter_to_sequence_parallel_region +from .mappings import gather_from_sequence_parallel_region +from .random import get_cuda_rng_tracker +from .utils import divide +from .utils import VocabUtility +from functools import partial + + +def _initialize_affine_weight_gpu(weight, init_method, partition_dim, stride=1): + """Initialize affine weight for model parallel on GPU.""" + + weight.model_parallel = True + weight.partition_dim = partition_dim + weight.partition_stride = stride + + with get_cuda_rng_tracker().fork(): + init_method(weight) + + +def _initialize_affine_weight_cpu( + neox_args, + weight, + output_size, + input_size, + per_partition_size, + partition_dim, + init_method, + stride=1, + return_master_weight=False, +): + """Initialize affine weight for model parallel. + + Build the master weight on all processes and scatter + the relevant chunk.""" + + weight.model_parallel = True + weight.partition_dim = partition_dim + weight.partition_stride = stride + + # Initialize master weight + master_weight = torch.empty( + output_size, input_size, dtype=torch.float, requires_grad=False + ) + init_method(master_weight) + master_weight = master_weight.to(dtype=neox_args.params_dtype) + + # Split and copy + per_partition_per_stride_size = divide(per_partition_size, stride) + weight_list = torch.split( + master_weight, per_partition_per_stride_size, dim=partition_dim + ) + rank = get_model_parallel_rank() + world_size = get_model_parallel_world_size() + my_weight_list = weight_list[rank::world_size] + + with torch.no_grad(): + torch.cat(my_weight_list, dim=partition_dim, out=weight) + if return_master_weight: + return master_weight + return None + + +class VocabParallelEmbedding(torch.nn.Module): + """Embedding parallelized in the vocabulary dimension. + + This is mainly adapted from torch.nn.Embedding and all the default + values are kept. + Arguments: + num_embeddings: vocabulary size. + embedding_dim: size of hidden state. + init_method: method to initialize weights. + """ + + def __init__( + self, neox_args, num_embeddings, embedding_dim, init_method=init.xavier_normal_ + ): + super(VocabParallelEmbedding, self).__init__() + # Keep the input dimensions. + self.num_embeddings = num_embeddings + self.embedding_dim = embedding_dim + # Set the detauls for compatibility. + self.padding_idx = None + self.max_norm = None + self.norm_type = 2.0 + self.scale_grad_by_freq = False + self.sparse = False + self._weight = None + self.model_parallel_size = get_model_parallel_world_size() + # Divide the weight matrix along the vocabulary dimension. + ( + self.vocab_start_index, + self.vocab_end_index, + ) = VocabUtility.vocab_range_from_global_vocab_size( + self.num_embeddings, get_model_parallel_rank(), self.model_parallel_size + ) + self.num_embeddings_per_partition = ( + self.vocab_end_index - self.vocab_start_index + ) + self.init_method = init_method + + # Allocate weights and initialize. + if neox_args.use_cpu_initialization: + self.weight = Parameter( + torch.empty( + self.num_embeddings_per_partition, + self.embedding_dim, + dtype=neox_args.params_dtype, + ) + ) + _initialize_affine_weight_cpu( + neox_args, + self.weight, + self.num_embeddings, + self.embedding_dim, + self.num_embeddings_per_partition, + 0, + init_method, + ) + else: + self.weight = Parameter( + torch.empty( + self.num_embeddings_per_partition, + self.embedding_dim, + device=torch.cuda.current_device(), + dtype=neox_args.params_dtype, + ) + ) + _initialize_affine_weight_gpu( + self.weight, init_method, partition_dim=0, stride=1 + ) + + def mup_reinitialize_weights(self, neox_args): + if neox_args.use_cpu_initialization: + _initialize_affine_weight_cpu( + neox_args, + self.weight, + self.num_embeddings, + self.embedding_dim, + self.num_embeddings_per_partition, + 0, + partial(self.init_method, use_mup=True), + ) + else: + _initialize_affine_weight_gpu( + self.weight, + partial(self.init_method, use_mup=True), + partition_dim=0, + stride=1, + ) + + def forward(self, input_): + if self.model_parallel_size > 1: + # Build the mask. + input_mask = (input_ < self.vocab_start_index) | ( + input_ >= self.vocab_end_index + ) + # Mask the input. + masked_input = input_.clone() - self.vocab_start_index + masked_input[input_mask] = 0 + else: + masked_input = input_ + # Get the embeddings. + output_parallel = F.embedding( + masked_input, + self.weight, + self.padding_idx, + self.max_norm, + self.norm_type, + self.scale_grad_by_freq, + self.sparse, + ) + # Mask the output embedding. + if self.model_parallel_size > 1: + output_parallel[input_mask, :] = 0.0 + # Reduce across all the model parallel GPUs. + output = reduce_from_model_parallel_region(output_parallel) + return output + + +class ParallelRelativePositionBias(torch.nn.Module): + """T5 Relative Position Bias parallelized in the heads dimension + + Based on https://github.com/lucidrains/x-transformers/blob/6b93c21be0d0a679da6f7b9621d9bb638ab18428/x_transformers/x_transformers.py#L106 (14.12.2021) + and adapted for megatron's model parallelism + + Arguments: + scale: scaling factor for the bias + causal: flag for causal/non-causal language modelling. + num_buckets: number of rp buckets. + max_distance: max distance in sequence dim for each bucket. + heads: number of attention heads (total) + """ + + def __init__( + self, + neox_args, + scale, + causal=True, + num_buckets=32, + max_distance=128, + heads=8, + init_method=init.xavier_normal_, + ): + super().__init__() + self.scale = scale + self.causal = causal + self.num_buckets = num_buckets + self.max_distance = max_distance + self.heads = heads + + # Set the defaults for compatibility. + self.padding_idx = None + self.max_norm = None + self.norm_type = 2.0 + self.scale_grad_by_freq = False + self.sparse = False + self._weight = None + self.model_parallel_size = get_model_parallel_world_size() + self.model_parallel_rank = get_model_parallel_rank() + + # Divide the weight matrix along the heads dimension. + self.head_start_index, self.head_end_index = self.get_heads_range( + self.heads, self.model_parallel_rank, self.model_parallel_size + ) + self.num_heads_per_partition = self.head_end_index - self.head_start_index + self.init_method = init_method + + # Allocate weights and initialize. + if neox_args.use_cpu_initialization: + self.weight = Parameter( + torch.empty( + self.num_buckets, + self.num_heads_per_partition, + dtype=neox_args.params_dtype, + ) + ) + _initialize_affine_weight_cpu( + neox_args, + self.weight, + self.num_buckets, + self.heads, + self.num_heads_per_partition, + partition_dim=1, + init_method=init_method, + ) + else: + self.weight = Parameter( + torch.empty( + self.num_buckets, + self.num_heads_per_partition, + device=torch.cuda.current_device(), + dtype=neox_args.params_dtype, + ) + ) + _initialize_affine_weight_gpu( + self.weight, init_method, partition_dim=1, stride=1 + ) + self._q_len_cached = None + self._k_len_cached = None + self._rel_pos_bucket_cached = None + + def mup_reinitialize_weights(self, neox_args): + if self.use_cpu_initialization: + _initialize_affine_weight_cpu( + neox_args, + self.weight, + self.num_buckets, + self.heads, + self.num_heads_per_partition, + partition_dim=1, + init_method=partial(self.init_method, use_mup=True), + ) + else: + _initialize_affine_weight_gpu( + self.weight, + partial(self.init_method, use_mup=True), + partition_dim=1, + stride=1, + ) + + @staticmethod + def get_heads_range(global_n_heads, rank, world_size): + per_partition_n_heads = divide(global_n_heads, world_size) + index_f = rank * per_partition_n_heads + index_l = index_f + per_partition_n_heads + return index_f, index_l + + def _relative_position_bucket( + self, relative_position, num_buckets=32, max_distance=128 + ): + ret = 0 + n = -relative_position + if not self.causal: + num_buckets //= 2 + ret += (n < 0).long() * num_buckets + n = torch.abs(n) + else: + n = torch.max(n, torch.zeros_like(n)) + + max_exact = num_buckets // 2 + is_small = n < max_exact + + val_if_large = ( + max_exact + + ( + torch.log(n.float() / max_exact) + / math.log(max_distance / max_exact) + * (num_buckets - max_exact) + ).long() + ) + val_if_large = torch.min( + val_if_large, torch.full_like(val_if_large, num_buckets - 1) + ) + + ret += torch.where(is_small, n, val_if_large) + self._rel_pos_bucket_cached = ret + return self._rel_pos_bucket_cached + + def forward(self, q_len, k_len): + if self._q_len_cached != q_len or self._k_len_cached != k_len: + # cache bucket if first step seq len stays constant + self._q_len_cached, self._k_len_cached = q_len, k_len + q_pos = torch.arange( + q_len, dtype=torch.long, device=torch.cuda.current_device() + ) + k_pos = torch.arange( + k_len, dtype=torch.long, device=torch.cuda.current_device() + ) + rel_pos = k_pos[None, :] - q_pos[:, None] + rp_bucket = self._relative_position_bucket( + rel_pos, num_buckets=self.num_buckets, max_distance=self.max_distance + ) + else: + rp_bucket = self._rel_pos_bucket_cached + values = F.embedding( + rp_bucket, + self.weight, + self.padding_idx, + self.max_norm, + self.norm_type, + self.scale_grad_by_freq, + self.sparse, + ) + bias = values.movedim(2, 0).unsqueeze(0) + return bias * self.scale + + +class ColumnParallelLinear(torch.nn.Module): + """Linear layer with column parallelism. + + The linear layer is defined as Y = XA + b. A is parallelized along + its second dimension as A = [A_1, ..., A_p]. + + Arguments: + input_size: first dimension of matrix A. + output_size: second dimension of matrix A. + bias: If true, add bias + gather_output: If true, call all-gather on output and make Y available + to all GPUs, otherwise, every GPU will have its output + which is Y_i = XA_i + init_method: method to initialize weights. Note that bias is always set + to zero. + stride: For the strided linear layers. + keep_master_weight_for_test: This was added for testing and should be + set to False. It returns the master weights + used for initialization. + skip_bias_add: This was added to enable performance optimations where bias + can be fused with other elementwise operations. we skip + adding bias but instead return it. + """ + + def __init__( + self, + neox_args, + input_size, + output_size, + bias=True, + gather_output=True, + init_method=init.xavier_normal_, + stride=1, + keep_master_weight_for_test=False, + skip_bias_add=False, + MOE=False, + MoE_mp_size=1, + mup_rescale_parameters=False, + seq_dim=0, # Dimension which is the seq_len dimension. final ParallelLinear overrides this to be 1 ; otherwise, the default is used throughout. + ): + super(ColumnParallelLinear, self).__init__() + + # Keep input parameters + self.input_size = input_size + self.output_size = output_size + self.gather_output = gather_output + # Divide the weight matrix along the last dimension. + world_size = MoE_mp_size if MOE else get_model_parallel_world_size() + self.output_size_per_partition = divide(output_size, world_size) + self.skip_bias_add = skip_bias_add + + self.sequence_parallel = neox_args.sequence_parallel + self.seq_dim = seq_dim + + self.init_method = init_method + self.stride = stride + self.mup_rescale_parameters = mup_rescale_parameters + self.use_mup = neox_args.use_mup + + # Parameters. + # Note: torch.nn.functional.linear performs XA^T + b and as a result + # we allocate the transpose. + # Initialize weight. + if neox_args.use_cpu_initialization: + self.weight = Parameter( + torch.empty( + self.output_size_per_partition, + self.input_size, + dtype=neox_args.params_dtype, + ) + ) + self.master_weight = _initialize_affine_weight_cpu( + neox_args, + self.weight, + self.output_size, + self.input_size, + self.output_size_per_partition, + 0, + init_method, + stride=stride, + return_master_weight=keep_master_weight_for_test, + ) + else: + self.weight = Parameter( + torch.empty( + self.output_size_per_partition, + self.input_size, + device=torch.cuda.current_device(), + dtype=neox_args.params_dtype, + ) + ) + _initialize_affine_weight_gpu( + self.weight, init_method, partition_dim=0, stride=stride + ) + + if bias: + if neox_args.use_cpu_initialization: + self.bias = Parameter( + torch.empty( + self.output_size_per_partition, dtype=neox_args.params_dtype + ) + ) + else: + self.bias = Parameter( + torch.empty( + self.output_size_per_partition, + device=torch.cuda.current_device(), + dtype=neox_args.params_dtype, + ) + ) + self.bias.model_parallel = True + self.bias.partition_dim = 0 + self.bias.stride = stride + # Always initialize bias to zero. + with torch.no_grad(): + self.bias.zero_() + else: + self.register_parameter("bias", None) + + # Copied from Mup + def width_mult(self): + assert hasattr(self.weight, "infshape"), ( + "Please call set_base_shapes(...). If using torch.nn.DataParallel, " + "switch to distributed training with " + "torch.nn.parallel.DistributedDataParallel instead" + ) + return self.weight.infshape.width_mult() + + # Copied from Mup + def _rescale_parameters(self): + """Rescale parameters to convert SP initialization to μP initialization. + Warning: This method is NOT idempotent and should be called only once + unless you know what you are doing. + """ + if hasattr(self, "_has_rescaled_params") and self._has_rescaled_params: + raise RuntimeError( + "`_rescale_parameters` has been called once before already. " + "Unless you know what you are doing, usually you should not be calling `_rescale_parameters` more than once.\n" + "If you called `set_base_shapes` on a model loaded from a checkpoint, " + "or just want to re-set the base shapes of an existing model, " + "make sure to set the flag `rescale_params=False`.\n" + "To bypass this error and *still rescale parameters*, set `self._has_rescaled_params=False` before this call." + ) + if self.bias is not None: + self.bias.data *= self.width_mult() ** 0.5 + self.weight.data *= self.width_mult() ** 0.5 + self._has_rescaled_params = True + + def mup_reinitialize_weights(self, neox_args): + if neox_args.use_cpu_initialization: + self.master_weight = _initialize_affine_weight_cpu( + neox_args, + self.weight, + self.output_size, + self.input_size, + self.output_size_per_partition, + 0, + partial(self.init_method, use_mup=True), + stride=self.stride, + return_master_weight=keep_master_weight_for_test, + ) + else: + _initialize_affine_weight_gpu( + self.weight, + partial(self.init_method, use_mup=True), + partition_dim=0, + stride=self.stride, + ) + + def set_parallel_output(self, value: bool): + assert isinstance(value, bool) + self.gather_output = ( + not value + ) # if gather_output is True, parallel output is False, so we set the opposite + + def forward(self, input_): + if self.use_mup and self.mup_rescale_parameters: + input_ /= self.width_mult() + + if self.sequence_parallel: + input_parallel = input_ + else: + # Set up backprop all-reduce. + input_parallel = copy_to_model_parallel_region(input_) + # Matrix multiply. + + if self.sequence_parallel: + # do an AG in the fwd pass, RS in bwd pass. + # gather / scatter portion happens across the sequence dim (self.seq_dim)-- + # almost always is [s, b, h] and so dim 0, but for lm_head ParallelLinear it is seq_dim=1 and [b, s, h] + input_parallel = gather_from_sequence_parallel_region( + input_parallel, seq_dim=self.seq_dim + ) + + bias = self.bias if not self.skip_bias_add else None + output_parallel = F.linear(input_parallel, self.weight, bias) + if self.gather_output: + # All-gather across the partitions. + assert ( + not self.sequence_parallel + ), "sequence_parallel=True and gather_output=True are incompatible!" + output = gather_from_model_parallel_region(output_parallel) + else: + output = output_parallel + output_bias = self.bias if self.skip_bias_add else None + return output, output_bias + + +class RowParallelLinear(torch.nn.Module): + """Linear layer with row parallelism. + + The linear layer is defined as Y = XA + b. A is parallelized along + its first dimension and X along its second dimension as: + - - + | A_1 | + | . | + A = | . | X = [X_1, ..., X_p] + | . | + | A_p | + - - + Arguments: + input_size: first dimension of matrix A. + output_size: second dimension of matrix A. + bias: If true, add bias. Note that bias is not parallelized. + input_is_parallel: If true, we assume that the input is already + split across the GPUs and we do not split + again. + init_method: method to initialize weights. Note that bias is always set + to zero. + stride: For the strided linear layers. + keep_master_weight_for_test: This was added for testing and should be + set to False. It returns the master weights + used for initialization. + skip_bias_add: This was added to enable performance optimations where bias + can be fused with other elementwise operations. we skip + adding bias but instead return it. + """ + + def __init__( + self, + neox_args, + input_size, + output_size, + bias=True, + input_is_parallel=False, + init_method=init.xavier_normal_, + stride=1, + keep_master_weight_for_test=False, + skip_bias_add=False, + MOE=False, + MoE_mp_size=1, + parallel_output=False, + mup_rescale_parameters=False, + ): + super(RowParallelLinear, self).__init__() + + # Keep input parameters + self.input_size = input_size + self.output_size = output_size + self.input_is_parallel = input_is_parallel + # Divide the weight matrix along the last dimension. + world_size = MoE_mp_size if MOE else get_model_parallel_world_size() + self.input_size_per_partition = divide(input_size, world_size) + self.skip_bias_add = skip_bias_add + self.parallel_output = parallel_output + + self.sequence_parallel = neox_args.sequence_parallel + assert not ( + self.sequence_parallel and not self.input_is_parallel + ), "Cannot have self.input_is_parallel=False and self.sequence_parallel=True." + + self.init_method = init_method + self.stride = stride + self.keep_master_weight_for_test = keep_master_weight_for_test + self.mup_rescale_parameters = mup_rescale_parameters + self.use_mup = neox_args.use_mup + + # Parameters. + # Note: torch.nn.functional.linear performs XA^T + b and as a result + # we allocate the transpose. + # Initialize weight. + if neox_args.use_cpu_initialization: + self.weight = Parameter( + torch.empty( + self.output_size, + self.input_size_per_partition, + dtype=neox_args.params_dtype, + ) + ) + self.master_weight = _initialize_affine_weight_cpu( + neox_args, + self.weight, + self.output_size, + self.input_size, + self.input_size_per_partition, + 1, + init_method, + stride=stride, + return_master_weight=keep_master_weight_for_test, + ) + else: + self.weight = Parameter( + torch.empty( + self.output_size, + self.input_size_per_partition, + device=torch.cuda.current_device(), + dtype=neox_args.params_dtype, + ) + ) + _initialize_affine_weight_gpu( + self.weight, init_method, partition_dim=1, stride=stride + ) + if bias: + if neox_args.use_cpu_initialization: + self.bias = Parameter( + torch.empty(self.output_size, dtype=neox_args.params_dtype) + ) + else: + self.bias = Parameter( + torch.empty( + self.output_size, + device=torch.cuda.current_device(), + dtype=neox_args.params_dtype, + ) + ) + # Always initialize bias to zero. + with torch.no_grad(): + self.bias.zero_() + else: + self.register_parameter("bias", None) + + # Copied from Mup + def width_mult(self): + assert hasattr(self.weight, "infshape"), ( + "Please call set_base_shapes(...). If using torch.nn.DataParallel, " + "switch to distributed training with " + "torch.nn.parallel.DistributedDataParallel instead" + ) + return self.weight.infshape.width_mult() + + # Copied from Mup + def _rescale_parameters(self): + """Rescale parameters to convert SP initialization to μP initialization. + Warning: This method is NOT idempotent and should be called only once + unless you know what you are doing. + """ + if hasattr(self, "_has_rescaled_params") and self._has_rescaled_params: + raise RuntimeError( + "`_rescale_parameters` has been called once before already. " + "Unless you know what you are doing, usually you should not be calling `_rescale_parameters` more than once.\n" + "If you called `set_base_shapes` on a model loaded from a checkpoint, " + "or just want to re-set the base shapes of an existing model, " + "make sure to set the flag `rescale_params=False`.\n" + "To bypass this error and *still rescale parameters*, set `self._has_rescaled_params=False` before this call." + ) + if self.bias is not None: + self.bias.data *= self.width_mult() ** 0.5 + self.weight.data *= self.width_mult() ** 0.5 + self._has_rescaled_params = True + + def mup_reinitialize_weights(self, neox_args): + if neox_args.use_cpu_initialization: + self.master_weight = _initialize_affine_weight_cpu( + neox_args, + self.weight, + self.output_size, + self.input_size, + self.input_size_per_partition, + 1, + partial(self.init_method, use_mup=True), + stride=self.stride, + return_master_weight=self.keep_master_weight_for_test, + ) + else: + _initialize_affine_weight_gpu( + self.weight, + partial(self.init_method, use_mup=True), + partition_dim=1, + stride=self.stride, + ) + + def set_parallel_output(self, parallel_output: bool): + assert isinstance(parallel_output, bool) + self.parallel_output = parallel_output + + def forward(self, input_): + if self.use_mup and self.mup_rescale_parameters: + input_ /= self.width_mult() + # Set up backprop all-reduce. + if self.input_is_parallel: + input_parallel = input_ + else: + input_parallel = scatter_to_model_parallel_region(input_) + # Matrix multiply. + output_parallel = F.linear(input_parallel, self.weight) + # All-reduce across all the partitions. + if self.sequence_parallel and not self.parallel_output: + # do an RS in the fwd pass, AG in bwd pass. + # skip in the gpt-j parallel sublayer case (self.parallel_output=True) + # (user responsible for calling reduce-scatter) + output_ = reduce_scatter_to_sequence_parallel_region(output_parallel) + elif not self.parallel_output: + output_ = reduce_from_model_parallel_region(output_parallel) + else: + output_ = output_parallel + if not self.skip_bias_add: + output = output_ + self.bias if self.bias is not None else output_ + output_bias = None + else: + output = output_ + output_bias = self.bias + return output, output_bias diff --git a/megatron/mpu/mappings.py b/megatron/mpu/mappings.py new file mode 100644 index 0000000000000000000000000000000000000000..ceb89daa21bb0a011d0d17b6d504df0e7dbb468b --- /dev/null +++ b/megatron/mpu/mappings.py @@ -0,0 +1,344 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch + +from .initialize import ( + get_model_parallel_group, + get_model_parallel_world_size, + get_model_parallel_rank, + get_fp32_allreduce, +) +from .utils import split_tensor_along_last_dim, split_tensor_along_any_dim + + +def _reduce(input_): + """All-reduce the the input tensor across model parallel group.""" + + # Bypass the function if we are using only 1 GPU. + if get_model_parallel_world_size() == 1: + return input_ + + # upcast to fp32 if using fp32 allreduce + dt = input_.dtype + if get_fp32_allreduce(): + input_ = input_.float() + + # All-reduce. + torch.distributed.all_reduce(input_, group=get_model_parallel_group()) + + # reconvert to original Bf16/Fp16 dtype + if get_fp32_allreduce(): + input_ = input_.to(dt) + + return input_ + + +def _split(input_): + """Split the tensor along its last dimension and keep the + corresponding slice.""" + + world_size = get_model_parallel_world_size() + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + + # Split along last dimension. + input_list = split_tensor_along_last_dim(input_, world_size) + + # Note: torch.split does not create contiguous tensors by default. + rank = get_model_parallel_rank() + output = input_list[rank].contiguous() + + return output + + +def _gather(input_): + """Gather tensors and concatinate along the last dimension.""" + + world_size = get_model_parallel_world_size() + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + + # Size and dimension. + last_dim = input_.dim() - 1 + rank = get_model_parallel_rank() + + tensor_list = [torch.empty_like(input_) for _ in range(world_size)] + tensor_list[rank] = input_ + torch.distributed.all_gather(tensor_list, input_, group=get_model_parallel_group()) + + # Note: torch.cat already creates a contiguous tensor. + output = torch.cat(tensor_list, dim=last_dim).contiguous() + + return output + + +def _reduce_scatter_along_seq_dim(input_, seq_dim): + """Reduce-scatter the input tensor across model parallel group, scattering across sequence dim.""" + world_size = get_model_parallel_world_size() + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + + # upcast to fp32 if using fp32 allreduce + dt = input_.dtype + if get_fp32_allreduce(): + input_ = input_.float() + + dim_size = list(input_.size()) + assert ( + isinstance(seq_dim, int) and seq_dim < len(dim_size) and seq_dim >= 0 + ), "seq_dim must be a valid tensor dim" + assert dim_size[seq_dim] % world_size == 0 + + if seq_dim == 0: + # reduce_scatter_tensor is faster but only works correctly on dimension 0 + dim_size[seq_dim] = dim_size[seq_dim] // world_size + output = torch.empty( + dim_size, dtype=input_.dtype, device=torch.cuda.current_device() + ) + torch.distributed.reduce_scatter_tensor( + output, input_.contiguous(), group=get_model_parallel_group() + ) + else: + tensor_list = list( + torch.split(input_, input_.shape[seq_dim] // world_size, seq_dim) + ) + output = torch.empty_like(tensor_list[0]) + torch.distributed.reduce_scatter( + output, tensor_list, group=get_model_parallel_group() + ) + + # reconvert to original Bf16/Fp16 dtype + if get_fp32_allreduce(): + output = output.to(dt) + + return output + + +def _gather_along_seq_dim(input_, seq_dim): + """Gather tensors and concatinate along the (manually-specified) sequence dimension.""" + + world_size = get_model_parallel_world_size() + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + + dim_size = list(input_.size()) + assert ( + isinstance(seq_dim, int) and seq_dim < len(dim_size) and seq_dim >= 0 + ), "seq_dim must be a valid tensor dim" + dim_size[seq_dim] = dim_size[seq_dim] * world_size + + if seq_dim == 0: + # reduce_gather_tensor is faster but only works correctly on dimension 0 + output = torch.empty( + dim_size, dtype=input_.dtype, device=torch.cuda.current_device() + ) + torch.distributed.all_gather_into_tensor( + output, input_.contiguous(), group=get_model_parallel_group() + ) + else: + input_ = input_.contiguous() + rank = get_model_parallel_rank() + tensor_list = [torch.empty_like(input_) for _ in range(world_size)] + tensor_list[rank] = input_ + torch.distributed.all_gather( + tensor_list, input_, group=get_model_parallel_group() + ) + output = torch.cat(tensor_list, dim=seq_dim) + + return output + + +def _split_along_seq_dim(input_, seq_dim): + """Split the tensor along the sequence dimension (as manually selected) and keep the + corresponding slice.""" + + world_size = get_model_parallel_world_size() + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + + # Split along second dimension. + input_list = split_tensor_along_any_dim(input_, world_size, seq_dim) + + # Note: torch.split does not create contiguous tensors by default. + rank = get_model_parallel_rank() + output = input_list[rank].contiguous() + + return output + + +class _CopyToModelParallelRegion(torch.autograd.Function): + """Pass the input to the model parallel region.""" + + @staticmethod + def symbolic(graph, input_): + return input_ + + @staticmethod + def forward(ctx, input_): + return input_ + + @staticmethod + def backward(ctx, grad_output): + return _reduce(grad_output) + + +class _ReduceFromModelParallelRegion(torch.autograd.Function): + """All-reduce the input from the model parallel region.""" + + @staticmethod + def symbolic(graph, input_): + return _reduce(input_) + + @staticmethod + def forward(ctx, input_): + return _reduce(input_) + + @staticmethod + def backward(ctx, grad_output): + return grad_output + + +class _ScatterToModelParallelRegion(torch.autograd.Function): + """Split the input and keep only the corresponding chuck to the rank.""" + + @staticmethod + def symbolic(graph, input_): + return _split(input_) + + @staticmethod + def forward(ctx, input_): + return _split(input_) + + @staticmethod + def backward(ctx, grad_output): + return _gather(grad_output) + + +class _GatherFromModelParallelRegion(torch.autograd.Function): + """Gather the input from model parallel region and concatinate.""" + + @staticmethod + def symbolic(graph, input_): + return _gather(input_) + + @staticmethod + def forward(ctx, input_): + return _gather(input_) + + @staticmethod + def backward(ctx, grad_output): + return _split(grad_output) + + +class _ReduceScatterToSequenceParallelRegion(torch.autograd.Function): + """Reduce-Scatter across sequence parallel region (same as model parallel region.) + Note: same region as model parallel region + """ + + @staticmethod + def symbolic(graph, input_, seq_dim): + return _reduce_scatter_along_seq_dim(input_, seq_dim=seq_dim) + + @staticmethod + def forward(ctx, input_, seq_dim): + ctx.seq_dim = seq_dim + return _reduce_scatter_along_seq_dim(input_, seq_dim=seq_dim) + + @staticmethod + def backward(ctx, grad_output): + seq_dim = ctx.seq_dim + return _gather_along_seq_dim(grad_output, seq_dim=seq_dim), None + + +class _GatherFromSequenceParallelRegion(torch.autograd.Function): + """All-Gather across sequence parallel region (same region as model parallel region.)""" + + @staticmethod + def symbolic(graph, input_, seq_dim): + return _gather_along_seq_dim(input_, seq_dim=seq_dim) + + @staticmethod + def forward(ctx, input_, seq_dim): + ctx.seq_dim = seq_dim + return _gather_along_seq_dim(input_, seq_dim=seq_dim) + + @staticmethod + def backward(ctx, grad_output): + seq_dim = ctx.seq_dim + return _reduce_scatter_along_seq_dim(grad_output, seq_dim=seq_dim), None + + +class _ScatterToSequenceParallelRegion(torch.autograd.Function): + """Scatter (split) sequence length across sequence parallel region (=> same region as model parallel.)""" + + @staticmethod + def symbolic(graph, input_, seq_dim): + return _split_along_seq_dim(input_, seq_dim=seq_dim) + + @staticmethod + def forward(ctx, input_, seq_dim): + ctx.seq_dim = seq_dim + return _split_along_seq_dim(input_, seq_dim=seq_dim) + + @staticmethod + def backward(ctx, grad_output): + seq_dim = ctx.seq_dim + return ( + _gather_along_seq_dim(grad_output, seq_dim=seq_dim), + None, + ) + + +# ----------------- +# Helper functions. +# ----------------- + + +def copy_to_model_parallel_region(input_): + return _CopyToModelParallelRegion.apply(input_) + + +def reduce_from_model_parallel_region(input_): + return _ReduceFromModelParallelRegion.apply(input_) + + +def scatter_to_model_parallel_region(input_): + return _ScatterToModelParallelRegion.apply(input_) + + +def gather_from_model_parallel_region(input_): + return _GatherFromModelParallelRegion.apply(input_) + + +def reduce_scatter_to_sequence_parallel_region(input_, seq_dim=0): + return _ReduceScatterToSequenceParallelRegion.apply(input_, seq_dim) + + +def gather_from_sequence_parallel_region(input_, seq_dim=0): + return _GatherFromSequenceParallelRegion.apply(input_, seq_dim) + + +def scatter_to_sequence_parallel_region( + input_, seq_dim=1 +): # use this fn in scattering input embeds across TP ranks. There, shape of inps is [b, s, h] instead of the usual [s, b, h] + return _ScatterToSequenceParallelRegion.apply(input_, seq_dim) diff --git a/megatron/mpu/random.py b/megatron/mpu/random.py new file mode 100644 index 0000000000000000000000000000000000000000..77fe80b3ef54934dca658f73ed5e483fce66308f --- /dev/null +++ b/megatron/mpu/random.py @@ -0,0 +1,37 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# mostly moving to using checkpointing from deepspeed (identical code anyway) so currently this file is only imports +# TODO: should be able to get rid of this file entirely + +import deepspeed +import deepspeed.runtime.activation_checkpointing.checkpointing as checkpointing + +# Default name for the model parallel rng tracker. +_MODEL_PARALLEL_RNG_TRACKER_NAME = ( + deepspeed.checkpointing._MODEL_PARALLEL_RNG_TRACKER_NAME +) + +# Whether apply model parallelsim to checkpointed hidden states. +_CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER = None + +# RNG tracker object. +_CUDA_RNG_STATE_TRACKER = deepspeed.checkpointing._CUDA_RNG_STATE_TRACKER + +# Deepspeed checkpointing functions +# TODO: replace calls to these in our codebase with calls to the deepspeed ones +_set_cuda_rng_state = checkpointing._set_cuda_rng_state +checkpoint = checkpointing.checkpoint +model_parallel_cuda_manual_seed = checkpointing.model_parallel_cuda_manual_seed +get_cuda_rng_tracker = checkpointing.get_cuda_rng_tracker diff --git a/megatron/mpu/utils.py b/megatron/mpu/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1f97e0e76abe1dd23b049eb3e854eef6de3705cd --- /dev/null +++ b/megatron/mpu/utils.py @@ -0,0 +1,96 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import torch + + +def ensure_divisibility(numerator, denominator): + """Ensure that numerator is divisible by the denominator.""" + assert numerator % denominator == 0, "{} is not divisible by {}".format( + numerator, denominator + ) + + +def divide(numerator, denominator): + """Ensure that numerator is divisible by the denominator and return + the division value.""" + ensure_divisibility(numerator, denominator) + return numerator // denominator + + +def split_tensor_along_last_dim(tensor, num_partitions, contiguous_split_chunks=False): + """Split a tensor along its last dimension. + Arguments: + tensor: input tensor. + num_partitions: number of partitions to split the tensor + contiguous_split_chunks: If True, make each chunk contiguous + in memory. + """ + # Get the size and dimension. + last_dim = tensor.dim() - 1 + last_dim_size = divide(tensor.size()[last_dim], num_partitions) + # Split. + tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) + # Note: torch.split does not create contiguous tensors by default. + if contiguous_split_chunks: + return tuple(chunk.contiguous() for chunk in tensor_list) + + return tensor_list + + +def split_tensor_along_any_dim( + tensor, num_partitions, seq_dim, contiguous_split_chunks=False +): + """Split a tensor along a user-specified dimension. + Arguments: + tensor: input tensor. + num_partitions: number of partitions to split the tensor + seq_dim: dimension along which to split the tensor + contiguous_split_chunks: If True, make each chunk contiguous + in memory. + """ + # Get the size and dimension. + seq_dim_size = divide(tensor.size()[seq_dim], num_partitions) + # Split. + tensor_list = torch.split(tensor, seq_dim_size, dim=seq_dim) + # Note: torch.split does not create contiguous tensors by default. + if contiguous_split_chunks: + return tuple(chunk.contiguous() for chunk in tensor_list) + + return tensor_list + + +class VocabUtility: + """Split the vocabulary into `world_size` chunks amd return the + first and last index of the vocabulary belonging to the `rank` + partition: Note that indices in [first, last]""" + + @staticmethod + def vocab_range_from_per_partition_vocab_size( + per_partition_vocab_size, rank, world_size + ): + index_f = rank * per_partition_vocab_size + index_l = index_f + per_partition_vocab_size + return index_f, index_l + + @staticmethod + def vocab_range_from_global_vocab_size(global_vocab_size, rank, world_size): + per_partition_vocab_size = divide(global_vocab_size, world_size) + return VocabUtility.vocab_range_from_per_partition_vocab_size( + per_partition_vocab_size, rank, world_size + ) diff --git a/megatron/mup_substitute.py b/megatron/mup_substitute.py new file mode 100644 index 0000000000000000000000000000000000000000..e16a21589590b5429ab40c00cf81511bd500df48 --- /dev/null +++ b/megatron/mup_substitute.py @@ -0,0 +1,212 @@ +""" +Helper functions for performing coord check. +""" +import os +from copy import copy +from itertools import product + +import numpy as np +import pandas as pd +import torch +import torch.nn.functional as F + +from mup import coord_check as mup_coord_check +from megatron.training import train_step + + +def _get_coord_data( + neox_args, + timers, + lr_scheduler, + models, + dataloader, + optcls, + nsteps=3, + dict_in_out=False, + flatten_input=False, + flatten_output=False, + output_name="loss", + lossfn="xent", + filter_module_by_name=None, + fix_data=True, + cuda=True, + nseeds=1, + output_fdict=None, + input_fdict=None, + param_fdict=None, + show_progress=True, + one_hot_target=False, +): + df = [] + + for i in range(nseeds): + torch.manual_seed(i) + for width, model in models.items(): + model = model() + model.train() + optimizer = optcls(model) + for step in range(nsteps + 1): + remove_hooks = [] + # add hooks + for name, module in model.named_modules(): + if filter_module_by_name and not filter_module_by_name(name): + continue + remove_hooks.append( + module.register_forward_hook( + mup_coord_check._record_coords( + df, + width, + name, + step + 1, + output_fdict=output_fdict, + input_fdict=input_fdict, + param_fdict=param_fdict, + ) + ) + ) + + # train for a step + loss_dict, skipped_iter = train_step( + neox_args=neox_args, + timers=timers, + data_iterator=dataloader, + model=model, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + ) + + # remove hooks + for handle in remove_hooks: + handle.remove() + + import gc + + del model + gc.collect() + + return pd.DataFrame(df) + + +def get_coord_data( + neox_args, + timers, + lr_scheduler, + models, + dataloader, + optimizer="sgd", + lr=None, + mup=True, + filter_trainable_by_name=None, + **kwargs +): + """Get coord data for coord check. + Train the models in `models` with data from `dataloader` and optimizer + specified by `optimizer` and `lr` for `nsteps` steps, and record coordinate + statistics specified by `output_fdict`, `input_fdict`, `param_fdict`. By + default, only `l1` is computed for output activations of each module. + This function wraps around `_get_coord_data`, with the main difference being + user can specify common optimizers via a more convenient interface. + Inputs: + models: + a dict of lazy models, where the keys are numbers indicating width. + Each entry of `models` is a function that instantiates a model given + nothing. + dataloader: + an iterator whose elements are either Huggingface style dicts, if + `dict_in_out` is True, or (input, label). If `fix_data` is True + (which is the default), then only the first element of `dataloader` + is used in a loop and the rest of `dataloder` is ignored. + optimizer: + a string in `['sgd', 'adam', 'adamw']`, with default being `'sgd'`. + lr: + learning rate. By default is 0.1 for `'sgd'` and 1e-3 for others. + mup: + If True, then use the optimizer from `mup.optim`; otherwise, use the + one from `torch.optim`. + filter_trainable_by_name: + a function that returns a bool given module names (from + `model.named_modules()`), or None. If not None, then only modules + whose name yields True will be trained. + nsteps: + number of steps to train the model + dict_in_out: + whether the data loader contains Huggingface-style dict input and + output. Default: False + flatten_input: + if not `dict_in_out`, reshape the input to be + `input.view(input.shape[0], -1)`. Typically used for testing MLPs. + flatten_output: + if not `dict_in_out`, reshape the label to be `label.view(-1, + input.shape[-1])`. + output_name: + if `dict_in_out`, this is the key for the loss value if the output + is a dict. If the output is not a dict, then we assume the first + element of the output is the loss. + lossfn: + loss function to use if not `dict_in_out`. Can be either a string from + [`xent`, 'mse', 'nll', 'l1'] or a python `callable` such that + `lossfn(output, target)` returns the loss value. Examples of valid + `callable`s are `F.cross_entropy`, `F.mse_loss`, etc, where `F` is + `torch.nn.functional`. Default: 'xent' + filter_module_by_name: + a function that returns a bool given module names (from + `model.named_modules()`), or None. If not None, then only modules + whose name yields True will be recorded. + cuda: + whether to use cuda or not. Default: True + nseeds: + number of times to repeat the training, each with different seeds. + output_fdict, input_fdict, param_fdict: + function dicts to be used in `_record_coords`. By default, only `l1` + is computed for output activations of each module. + show_progress: + show progress using tqdm. Default: True + one_hot_target: + convert target label into a one-hot vector. This typically is only + used for `'mse'` or `'l1'` losses in classification tasks. + Default: False + Output: + a pandas DataFrame containing recorded results. The column names are + `'width', 'module', 't'` as well as names of statistics recorded, such + as `'l1'` (see `FDICT` for other premade statistics that can be + collected). + + Breaking Changes: + In v1.0.0, when `lossfn=='mse'`, the target is automatically converted + to a one hot vector before loss computation. Starting in v1.1.0, this + behavior is turned off, and the user needs to explicitly turn on this + behavior by setting `one_hot_target=True`. + """ + if lr is None: + lr = 0.1 if optimizer == "sgd" else 1e-3 + if mup: + from mup.optim import MuAdam as Adam + from mup.optim import MuAdamW as AdamW + from mup.optim import MuSGD as SGD + else: + from torch.optim import SGD, Adam, AdamW + + def get_trainable(model): + params = model.parameters() + if filter_trainable_by_name is not None: + params = [] + for name, p in model.named_parameters(): + if filter_trainable_by_name(name): + params.append(p) + return params + + if optimizer == "sgd": + optcls = lambda model: SGD(get_trainable(model), lr=lr) + elif optimizer == "adam": + optcls = lambda model: Adam(get_trainable(model), lr=lr) + elif optimizer == "adamw": + optcls = lambda model: AdamW(get_trainable(model), lr=lr) + elif optimizer is None: + raise ValueError("optimizer should be sgd|adam|adamw or a custom function") + + data = _get_coord_data( + neox_args, timers, lr_scheduler, models, dataloader, optcls, **kwargs + ) + data["optimizer"] = optimizer + data["lr"] = lr + return data diff --git a/megatron/neox_arguments/__init__.py b/megatron/neox_arguments/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..025464cbfcd2aaff334f35d78367899cb1bbd8e2 --- /dev/null +++ b/megatron/neox_arguments/__init__.py @@ -0,0 +1,36 @@ +""" +NeoX Arguments manages all configuration arguments. + +**general** + +* The implementation makes use of the python dataclass. +* The main class 'NeoXArgs' (in ./arguments) exposes all configuration attributes that are relevant to GPT NeoX +* No attributes are nested (apart from attributes with type dict) +* Output functions (enable_logging, save_yml, print) are implemented +* Instantiation always runs NeoXArgs.__post_init__(), which calculates derived values and performs a validation (values, types, keys). +* it is possible to set undefined attributes (e.g. line of code 'NeoXArgs().my_undefined_config = 42' works fine); such set attributes are not validated +* It is possible to update attributes (e.g. line of code 'NeoXArgs().do_train = True' works fine); a validation can be performed by calling the validation functions on the class instance +* In order to avoid setting undefined attributes you can use the function NeoXArgs().update_value(); this function raises an error if the to be set attribute is not defined + +**instantiation** +NeoX args can be instantiated with the following options + +* NeoXArgs.from_ymls(["path_to_yaml1", "path_to_yaml2", ...]): load yaml configuration files and instantiate with the values provided; checks for duplications and unknown arguments are performed +* NeoXArgs.from_dict({"num_layers": 12, ...}): load attribute values from dict; checks unknown arguments are performed + +* NeoXArgs.consume_deepy_args(): entry point for deepy.py configuring and consuming command line arguments (i.e. user_script, conf_dir, conf_file, wandb_group, wandb_team); neox_args.get_deepspeed_main_args() produces a list of command line arguments to feed to deepspeed.launcher.runner.main +* NeoXArgs.consume_neox_args(): In the call stack deepy.py -> deepspeed -> pretrain_gpt2.py; arguments are passed to pretrain_gpt2.py by neox_args.get_deepspeed_main_args(). So produced arguments can be read with consume_neox_args() to instantiate a NeoXArgs instance. + + +**code structure** + +* NeoX args (in ./arguments) inherits from the following subclasses: NeoXArgsDeepspeedRunner, NeoXArgsDeepspeedConfig, NeoXArgsModel, NeoXArgsTokenizer, NeoXArgsTraining, NeoXArgsParallelism, NeoXArgsLogging, NeoXArgsOther, NeoXArgsTextgen +* The Subclasses group args according to their purpose +* The attributes of NeoXArgsDeepspeedRunner are directly mapped to the expected command line args of deepspeed.launcher.runner.main; no attributes unknown to deepspeed should be included; no arguments relevant for deepspeed should be omitted +* The attributes of NeoXArgsDeepspeedConfig are directly mapped to the expected keys of the deepspeed config; no arguments relevant for deepspeed should be omitted +* calculated attributes (decorator '@property') are available as attribute, but would not be included in dataclass fields (e.g. NeoXArgs().__dataclass_fields__.items()) +* refer to docstrings in code for more information +""" + + +from .arguments import NeoXArgs diff --git a/megatron/neox_arguments/arguments.py b/megatron/neox_arguments/arguments.py new file mode 100644 index 0000000000000000000000000000000000000000..fa475c057e478681d12b88700474dbb824c4efb8 --- /dev/null +++ b/megatron/neox_arguments/arguments.py @@ -0,0 +1,1463 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import os +from pathlib import Path +import yaml +import json +import logging +import copy +import torch +import argparse +from pkg_resources import packaging +from importlib.metadata import version + +from dataclasses import dataclass +from typing import List, Dict +from socket import gethostname + +try: + from typing import Literal, Union +except ImportError: + from typing_extensions import Literal, Union +from deepspeed.launcher.runner import DLTS_HOSTFILE +from megatron.logging import Tee +from megatron.tokenizer import build_tokenizer +from megatron.utils import obtain_resource_pool, expand_attention_types +from .deepspeed_args import NeoXArgsDeepspeedConfig, NeoXArgsDeepspeedRunner +from .neox_args import ( + NeoXArgsModel, + NeoXArgsTokenizer, + NeoXArgsTraining, + NeoXArgsParallelism, + NeoXArgsLogging, + NeoXArgsOther, + NeoXArgsTextgen, + NeoXArgsOptimizer, + NeoXArgsLRScheduler, + ATTENTION_TYPE_CHOICES, +) + +### Logging colors ### +GREEN = "\033[92m" +RED = "\033[91m" +YELLOW = "\033[93m" +END = "\033[0m" +SUCCESS = f"{GREEN} [SUCCESS] {END}" +OKAY = f"{GREEN}[OKAY]{END}" +WARNING = f"{YELLOW}[WARNING]{END}" +FAIL = f"{RED}[FAIL]{END}" +INFO = "[INFO]" + +# ZERO defaults by deespeed +# These values should not be changed unless defaults in deepspeed are changed +# for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training +ZERO_DEFAULTS = { + "stage": 0, + "allgather_partitions": True, + "reduce_scatter": True, + "allgather_bucket_size": int(5e8), + "overlap_comm": False, + "reduce_scatter": True, + "reduce_bucket_size": int(5e8), + "contiguous_gradients": False, +} + +# NeoX optimizer defaults +OPT_DEFAULT = "Adam" +OPT_PARAMS_DEFAULTS = { + "lr": 0.001, + "betas": [0.9, 0.999], + "eps": 1.0e-8, + "weight_decay": 0, + "freeze_step": 400, + "momentum": 0.0, + "cuda_aware": False, +} + + +AUTOTUNING_ARGS = ( + "train_batch_size", + "train_micro_batch_size_per_gpu", + "gradient_accumulation_steps", + "zero_optimization", + "autotuning", +) + +BASE_CLASSES = [ + NeoXArgsDeepspeedRunner, + NeoXArgsDeepspeedConfig, + NeoXArgsModel, + NeoXArgsLRScheduler, + NeoXArgsOptimizer, + NeoXArgsTokenizer, + NeoXArgsTraining, + NeoXArgsParallelism, + NeoXArgsLogging, + NeoXArgsTextgen, + NeoXArgsOther, +] + +DEEPSPEED_ARG_CLASSES = [NeoXArgsDeepspeedRunner, NeoXArgsDeepspeedConfig] +NEOX_ARG_CLASSES = [i for i in BASE_CLASSES if i not in DEEPSPEED_ARG_CLASSES] + +if "DLTS_HOSTFILE" in os.environ: + DLTS_HOSTFILE = os.environ["DLTS_HOSTFILE"] + + +@dataclass +class NeoXArgs(*BASE_CLASSES): + """ + data class containing all configurations + + NeoXArgs inherits from a number of small configuration classes + """ + + ############################################################################################################################ + # start of instantiation + + def __post_init__(self): + """ + after initialization of default or loaded values + a number of functions are performed in order to + calculate values, assert consistency and do typechecking. + """ + if not NeoXArgs.validate_keys(): + raise ValueError( + self.__class__.__name__ + + ".__post_init__() NeoXArgs keys cannot be validated" + ) + + self.enable_logging() + + self.calculate_derived() + + if not self.validate_types(): + raise ValueError( + self.__class__.__name__ + + ".__post_init__() NeoXArgs types cannot be validated" + ) + + if not self.validate_values(): + raise ValueError( + self.__class__.__name__ + + ".__post_init__() NeoXArgs values cannot be validated" + ) + + def build_tokenizer(self): + self.tokenizer = build_tokenizer(self) + + def initialize_tensorboard_writer(self): + if self.tensorboard_dir and self.rank == 0: + try: + from torch.utils.tensorboard import SummaryWriter + + print("> setting up tensorboard ...") + self.tensorboard_writer = SummaryWriter(log_dir=self.tensorboard_dir) + except (ModuleNotFoundError, ImportError): + print( + "WARNING: TensorBoard writing requested but is not " + "available (are you using PyTorch 1.1.0 or later and do you have tensorboard installed?), " + "no TensorBoard logs will be written.", + flush=True, + ) + + def initialize_comet(self): + if self.use_comet and self.rank == 0: + try: + import comet_ml + + # Deactivate output logging to avoid any potential interference with Tee + self.comet_experiment = comet_ml.start( + workspace=self.comet_workspace, + project=self.comet_project, + experiment_config=comet_ml.ExperimentConfig( + auto_output_logging=False + ), + ) + self.comet_experiment.__internal_api__log_parameters__( + self.all_config, + framework="gpt-neox", + source="manual", + flatten_nested=True, + ) + + if self.comet_experiment_name: + self.comet_experiment.set_name(self.comet_experiment_name) + + if self.comet_tags: + self.comet_experiment.add_tags(self.comet_tags) + + if self.comet_others: + self.comet_experiment.log_others(self.comet_others) + + logging.info("> setting up comet ...") + except ImportError as e: + logging.error( + f'{FAIL} importing comet. Comet can be installed with "pip install comet_llm". See https://github.com/comet-ml/comet-llm for more info. Full error is:' + ) + raise e + except Exception as e: + logging.error( + f'{FAIL} Error setting up Comet. Either set "use_comet: False" in your configuration file, or resolve the issue with Comet. Full error is:', + ) + raise e + + @classmethod + def from_ymls(cls, paths_to_yml_files: List[str], overwrite_values: Dict = None): + """ + instantiates NeoXArgs while reading values from yml files + + paths_to_yml_files: list of paths to yml files + + overwrite_values: If provided, overwrite any values in the yamls with these values + """ + + print(cls.__name__ + ".from_ymls() " + str(paths_to_yml_files), flush=True) + + # initialize an empty config dictionary to be filled by yamls + config = dict() + config_files = dict() + # iterate of all to be loaded yaml files + for conf_file_name in paths_to_yml_files: + # load file + with open(conf_file_name) as conf_file: + conf = yaml.load(conf_file, Loader=yaml.FullLoader) + + # check for key duplicates and load values + for conf_key, conf_value in conf.items(): + if conf_key in config: + raise ValueError( + f"Conf file {conf_file_name} has the following duplicate keys with previously loaded file: {conf_key}" + ) + + conf_key_converted = conf_key.replace( + "-", "_" + ) # TODO remove replace and update configuration files? + config[conf_key_converted] = conf_value + + # load original config files to save unchanged with checkpoint + # saving the original config retains comments + filename = os.path.basename(conf_file_name) + assert ( + filename not in config_files + ), "At least two config files have the same filename. This will result in conflicts when saving out configs with the checkpoint in one single directory. Please use unique names for configs." + config_files[filename] = open(conf_file_name).read() + + # add config file content to neox args to make them accessible in code + # this is used when saving checkpoints + config["config_files"] = config_files + + # Configuration parameters not specified + params_not_in_config = sorted( + list(set(cls.__dataclass_fields__.keys()) - set(config.keys())) + ) + if len(params_not_in_config) > 0: + logging.debug( + cls.__name__ + + ".from_ymls() Configuration parameters not specified (using defaults): " + + ", ".join(params_not_in_config) + ) + + if overwrite_values is not None: + for k, v in overwrite_values.items(): + config[k] = v + + # instantiate class and return + # duplicate values and unrecognized keys are again checked upon instantiation + return cls(**config) + + @classmethod + def from_dict(cls, args_dict: Dict): + """ + instantiates NeoXArgs while reading values from input dict + """ + return cls(**args_dict) + + ############################################################################################################################ + # start of command line args interface + + @classmethod + def consume_deepy_args(cls, input_args=None): + """ + entry point for deepy.py configuring and consuming command line arguments. + + We can use `--wandb_group` / `--wandb_team` to overwrite those args from the command line, otherwise the value from the config is taken. + """ + + parser = argparse.ArgumentParser( + description="GPT-NeoX Configuration", allow_abbrev=False + ) + + group = parser.add_argument_group(title="Training Configuration") + + group.add_argument( + "user_script", + type=str, + help="User script to launch, followed by any required " "arguments.", + ) + + group.add_argument( + "--conf_dir", + "-d", + type=str, + default=None, + help="Directory to prefix to all configuration file paths", + ) + + group.add_argument( + "conf_file", + type=str, + nargs="+", + help="Configuration file path. Multiple files can be provided and will be merged.", + ) + + group = parser.add_argument_group(title="Weights and Biases monitoring args") + + group.add_argument( + "--wandb_group", + type=str, + default=None, + help='Weights & Biases group name - used to group together "runs".', + ) + group.add_argument( + "--wandb_team", + type=str, + default=None, + help="Weights & Biases team name.", + ) + + group = parser.add_argument_group(title="Eval args") + + group.add_argument( + "--eval_tasks", + type=str, + nargs="+", + default=None, + help="Optionally overwrite eval tasks to run for eval.py", + ) + group.add_argument( + "--iteration", + type=int, + default=None, + help="Iteration to load checkpoint from in the eval.py and generate.py scripts. If None is provided, uses the latest iteration.", + ) + group.add_argument( + "--eval_results_prefix", + type=str, + default=None, + help="prefix to append to eval results file", + ) + parser.add_argument( + "-H", + "--hostfile", + type=str, + help="Hostfile path (in MPI style) that defines the " + "resource pool available to the job (e.g., " + "worker-0 slots=4)", + ) + group = parser.add_argument_group(title="Generation args") + group.add_argument( + "-i", + "--sample_input_file", + type=str, + default=None, + help="Optionally overwrite `sample_input_file` for generate.py", + ) + group.add_argument( + "-o", + "--sample_output_file", + type=str, + default=None, + help="Optionally overwrite `sample_output_file` for generate.py", + ) + + tuning = parser.add_argument_group(title="DeepSpeed Autotuning") + tuning.add_argument( + "--autotuning", + type=str, + default=None, + choices=("tune", "run"), + help="Use DeepSpeed's autotuning feature to optimize certain hyperparameters. For more details refer to documentation here: https://www.deepspeed.ai/tutorials/autotuning/", + ) + args_parsed = parser.parse_args(input_args) + + # Validate user_script exists + assert os.path.exists( + args_parsed.user_script + ), f"User script could not be found: {args_parsed.user_script}" + + # load config files + conf_files = args_parsed.conf_file + if args_parsed.conf_dir: + conf_files = [os.path.join(args_parsed.conf_dir, f) for f in conf_files] + + # enables us to pass in `125M` instead of `125M.yml` + conf_files = [ + (cf if (cf.endswith(".yml") or cf.endswith(".json")) else cf + ".yml") + for cf in conf_files + ] + + # determine overwrite values + overwrite_values = dict() + for k, v in vars(args_parsed).items(): + if k == "autotuning" and v is not None: + overwrite_values["autotuning_run"] = v + elif k not in ["conf_dir", "conf_file"] and v is not None: + overwrite_values[k] = v + + # load args + neox_args = cls.from_ymls( + paths_to_yml_files=conf_files, overwrite_values=overwrite_values + ) + + if neox_args.use_wandb: + try: + import wandb + + # Check if the W&B group name is configured + if neox_args.wandb_group is None: + # Set a randomized string as group name if no group name is provided + neox_args.wandb_group = wandb.sdk.lib.runid.generate_id() + else: + # Concatenate the W&B group name with a randomized string to ensure uniqueness. + neox_args.wandb_group += "_" + wandb.sdk.lib.runid.generate_id() + except ModuleNotFoundError as e: + if e.name == "wandb": + e.msg += "\nWeights & Biases monitoring was requested but `wandb` was not found. Install `wandb` to use Weights & Biases, or set the `use_wandb` configuration option to a boolean false to disable Weights & Biases logging." + raise e + + neox_args.wandb_group += "_" + wandb.util.generate_id() + + neox_args.print() + + return neox_args + + @classmethod + def consume_neox_args(cls, overwrite_values=None, input_args=None): + """ + Deepspeed launcher needs to pass the arguments for `pretrain_gpt2.py` across to all machines. + + In order not to have any problems with different configs being mismatched across machines, we instead read the .yaml configuration file from the main rank, + then serialize the arguments to a dictionary, which the deepspeed launcher broadcasts to all machines (`--megatron_config`). + + We then instantiate a new NeoXArgs from the dictionary (`.from_dict`). This should ensure args are never inconsistent across machines. + """ + + parser = argparse.ArgumentParser( + description="GPT-NeoX Configuration", allow_abbrev=False + ) + parser.add_argument( + "--megatron_config", + type=str, + default=None, + help="json dict dumped as string in NeoXArgs.get_deepspeed_main_args()", + ) + parser.add_argument( + "--deepspeed_config", + type=str, + default=None, + help="Only need this (at this stage) for autotuning", + ) + args_parsed, _ = parser.parse_known_args(input_args) + megatron_config = json.loads( + base64.urlsafe_b64decode(args_parsed.megatron_config).decode("utf-8") + ) + if args_parsed.deepspeed_config is not None: + overwrite_values = cls.set_up_autotuning( + args_parsed.deepspeed_config, overwrite_values + ) + if overwrite_values is not None: + megatron_config.update(overwrite_values) + return cls.from_dict(args_dict=megatron_config) + + @staticmethod + def set_up_autotuning(encoded_config, overwrite_values): + config = json.loads(base64.urlsafe_b64decode(encoded_config).decode("utf-8")) + overwrite_values = overwrite_values if overwrite_values else {} + for tuning_param in AUTOTUNING_ARGS: + # TODO: This is for autotuning specifically, may cause surprises for someone with a weird setup + if tuning_param in config: + overwrite_values[tuning_param] = config[tuning_param] + return overwrite_values + + @staticmethod + def convert_key_value_to_command_line_arg(k, v): + if isinstance(v, bool): + if v: + return [f"--{k}"] + else: + return [] + if v is None: + return [] + return [f"--{k}", str(v)] + + def get_extra_deepspeed_args(self): + """ + Sets up the extra arguments for deepspeed. This is done by reading in the `deepspeed_extra_args` dictionary from + the configuration file, and then adding any arguments where values differ from those specified in the dataclass. + """ + neox_args = self.get_parent_class_value_dict( + *self.__class__.__bases__, only_non_defaults=True + ) + + extra_ds_args = dict() + + for key, value in self.deepspeed_extra_args.items(): + # Check to make sure the key is not already changed from defaults, and raise an exception if it is + # This is to prevent users from accidentally writing arguments both in deepspeed_extra_args and in the base level + # of the configuration file + if hasattr(neox_args, key): + raise ValueError( + f"Key {key} is already specified elsewhere. Reading in a different value from the 'deepspeed_extra_args' option in the configuration file will cause undefined behavior." + ) + extra_ds_args[key] = value + + return extra_ds_args + + def get_deepspeed_main_args(self): + args_list = list() + + if self.autotuning_run is not None: + args_list.extend( + self.convert_key_value_to_command_line_arg( + "autotuning", self.autotuning_run + ) + ) + + # get deepspeed runner args, and only pass them in to deepspeed launcher if they differ from defaults + for key, default_value in NeoXArgsDeepspeedRunner().defaults(): + if key == "autotuning_run": + continue + configured_value = getattr(self, key) + + if key == "force_multi": + if self.deepspeed_slurm or self.deepspeed_mpi: + configured_value = True + if configured_value != default_value: + args_list.extend( + self.convert_key_value_to_command_line_arg(key, configured_value) + ) + + if self.deepspeed_slurm: + comment = getattr(self, "comment") + if comment: + args_list.extend( + self.convert_key_value_to_command_line_arg("comment", comment) + ) + account = getattr(self, "account") + if account: + args_list.extend( + self.convert_key_value_to_command_line_arg("account", account) + ) + + # master_address = os.environ['SLURM_JOB_NODELIST'].split('\n')[0] + # args_list.extend( + # self.convert_key_value_to_command_line_arg('master_addr', master_address) + # ) + + if "DLTS_HOSTFILE" in os.environ: + args_list.extend( + self.convert_key_value_to_command_line_arg( + "hostfile", os.environ["DLTS_HOSTFILE"] + ) + ) + + if "MASTER_ADDR" in os.environ: + args_list.extend( + self.convert_key_value_to_command_line_arg( + "master_addr", os.environ["MASTER_ADDR"] + ) + ) + + if ( + "--include" in args_list or "--exclude" in args_list + ) and "--num_gpus" in args_list: + print( + "WARNING: both --include/--exclude and num_gpus were specified simultaneously - overriding num_gpus with --include/--exclude" + ) + # cannot specify these both simultaneously, remove num_gpus from list + idx = args_list.index("--num_gpus") + # pop twice, once for the arg, once for its value + args_list.pop(idx) + args_list.pop(idx) + + # add user script + args_list.append(self.user_script) + + self.configure_distributed_args() + cwd = Path.cwd() + + # get deepspeed_config + args_list.append("--deepspeed_config") + + if self.autotuning_run is not None: + ds_fp = cwd / Path("ds_config.json") + if self.rank == 0: + with open(ds_fp, mode="w") as ds_file: + json.dump(self.deepspeed_config, ds_file) + args_list.append(str(ds_fp)) + else: + encoded_ds_config = base64.urlsafe_b64encode( + json.dumps(self.deepspeed_config).encode("utf-8") + ).decode("utf-8") + args_list.append(encoded_ds_config) + + # get all config values + args_list.append("--megatron_config") + neox_args = self.get_parent_class_value_dict( + *self.__class__.__bases__, only_non_defaults=True + ) + encoded_mega_config = base64.urlsafe_b64encode( + json.dumps(neox_args).encode("utf-8") + ).decode("utf-8") + args_list.append(str(encoded_mega_config)) + return args_list + + ############################################################################################################################ + # start of calculated properties + + @property + def deepspeed_config(self) -> dict: + """ + returns a dict containing variables within deepspeed config + """ + config = self.get_parent_class_value_dict_extra_ds( + NeoXArgsDeepspeedConfig, only_non_defaults=True + ) + return config + + @property + def deepspeed_runner(self) -> dict: + """ + returns variables within deepspeed runner + """ + return self.get_parent_class_value_dict(NeoXArgsDeepspeedRunner) + + @property + def megatron_config(self) -> dict: + """ + returns variables within megatron args + """ + return self.get_parent_class_value_dict(*NEOX_ARG_CLASSES) + + @property + def all_config(self) -> dict: + """ + returns variables of all args + """ + return self.get_parent_class_value_dict(*BASE_CLASSES) + + def get_parent_class_value_dict( + self, *parent_classes, only_non_defaults=False + ) -> dict: + """ + takes a sequence of parent classes and returns corresponding values (with defaults set) + """ + # TODO no Nones or non-defaults + result = dict() + for parent in parent_classes: + for key, default_value in parent().defaults(): + if key in ["tokenizer", "tensorboard_writer", "adlr_autoresume_object"]: + continue + if only_non_defaults: + value = getattr(self, key) + if value == default_value: + continue + result[key] = getattr(self, key) + return result + + def get_parent_class_value_dict_extra_ds( + self, *parent_classes, only_non_defaults=False + ) -> dict: + """ + Takes a sequence of parent classes and returns corresponding values (with defaults set). + Also adds in any extra deepspeed arguments that are specified in the configuration file. + + Args: + parent_classes: sequence of parent classes + only_non_defaults: if True, only returns values that differ from defaults + + Returns: + dict of arguments and values + + """ + # TODO no Nones or non-defaults + result = dict() + for parent in parent_classes: + for key, default_value in parent().defaults(): + if key in [ + "tokenizer", + "tensorboard_writer", + "adlr_autoresume_object", + "deepspeed_extra_args", + ]: + continue + if only_non_defaults: + value = getattr(self, key) + if value == default_value: + continue + result[key] = getattr(self, key) + + if self.deepspeed_extra_args is not None: + extra_ds_args = self.get_extra_deepspeed_args() + result.update(extra_ds_args) + + return result + + @property + def params_dtype(self): + """ + returns the datatype on the basis of configured precision + """ + if self.precision == "fp16": + return torch.half + elif self.precision == "bfloat16": + return torch.bfloat16 + else: + return torch.float + + ############################################################################################################################ + # start of logging and output + + def enable_logging(self): + """ + enable Tee logs based on the configured logdir + """ + if self.log_dir: + os.makedirs(self.log_dir, exist_ok=True) + hostname = gethostname() + file_prefix = os.path.join(self.log_dir, hostname) + Tee(file_prefix + "_stdout.txt", err=False) + Tee(file_prefix + "_stderr.txt", err=True) + + def print(self): + """Print arguments.""" + if self.rank == 0 or self.rank is None: + print("-------------------- arguments --------------------", flush=True) + str_list = [] + for arg in vars(self): + # add arg + value + dots = "." * (32 - len(arg)) + value = getattr(self, arg) + print_str = " {} {} {}".format(arg, dots, value) + + # add info 'default or updated' + field_def = self.__dataclass_fields__.get(arg) + if field_def is not None: + default_info = ( + "default" if value == field_def.default else "updated" + ) + else: + default_info = "" + dots = "." * (64 - len(print_str)) + print_str += dots + str_list.append({"print_str": print_str, "default_info": default_info}) + + for arg in sorted( + sorted(str_list, key=lambda x: x["print_str"].lower()), + key=lambda x: x["default_info"], + reverse=True, + ): + print(arg["print_str"] + arg["default_info"], flush=True) + print("---------------- end of arguments ----------------", flush=True) + + ############################################################################################################################ + # start of calculations and derived values + + def configure_distributed_args(self): + """ + Configures distributed training arguments from local variables set by deepspeed launcher. + """ + if self.deepspeed_mpi: + from deepspeed.comm import mpi_discovery + + mpi_discovery() + + if self.deepspeed_slurm: + os.environ["LOCAL_RANK"] = os.environ["SLURM_LOCALID"] + os.environ["RANK"] = os.environ["SLURM_PROCID"] + os.environ["WORLD_SIZE"] = ( + os.environ["SLURM_NTASKS"] + if os.environ.get("SLURM_NTASKS") is not None + else str( + int(os.environ["SLURM_NNODES"]) + * int(os.environ["SLURM_NTASKS_PER_NODE"]) + ) + ) + + self.update_value("local_rank", int(os.getenv("LOCAL_RANK", "0"))) + self.update_value("rank", int(os.getenv("RANK", "0"))) + self.update_value("world_size", int(os.getenv("WORLD_SIZE", "1"))) + + if self.rank == 0: + print( + self.__class__.__name__ + + ".configure_distributed_args() using world size: {} and model-parallel size: {} ".format( + self.world_size, self.model_parallel_size + ), + flush=True, + ) + + @staticmethod + def calculate_batch_parameters( + dp_world_size, train_batch=None, micro_batch=None, grad_acc=None + ): + # all values are provided nothing needs to be set + if train_batch is not None and micro_batch is not None and grad_acc is not None: + return train_batch, micro_batch, grad_acc + + # gradient_accumulation_steps needs to be set + elif train_batch is not None and micro_batch is not None: + grad_acc = train_batch // micro_batch + grad_acc //= dp_world_size + + # micro_batch_per_gpu needs to be set + elif train_batch is not None and grad_acc is not None: + micro_batch = train_batch // dp_world_size + micro_batch //= grad_acc + + # train_batch_size needs to be set + elif micro_batch is not None and grad_acc is not None: + train_batch = micro_batch * grad_acc + train_batch *= dp_world_size + + # gradient_accumulation_steps and micro_batch_per_gpus is set + elif train_batch is not None: + grad_acc = 1 + micro_batch = train_batch // dp_world_size + + # train_batch_size and gradient_accumulation_step is set + elif micro_batch is not None: + train_batch = micro_batch * dp_world_size + grad_acc = 1 + + # either none of the three parameters are provided or just gradient_accumulation_step is provided + else: + assert ( + False + ), "Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + return int(train_batch), int(micro_batch), int(grad_acc) + + @staticmethod + def check_batch_parameters(dp_world_size, train_batch, micro_batch, grad_acc): + assert ( + train_batch > 0 + ), f"Train batch size: {train_batch} has to be greater than 0" + + assert ( + micro_batch > 0 + ), f"Micro batch size per gpu: {micro_batch} has to be greater than 0" + + assert ( + grad_acc > 0 + ), f"Gradient accumulation steps: {grad_acc} has to be greater than 0" + + assert train_batch == micro_batch * grad_acc * dp_world_size, ( + f"Check batch related parameters. train_batch_size is not equal" + " to micro_batch_per_gpu * gradient_acc_step * world_size \n" + f"{train_batch} != {micro_batch} * {grad_acc} * {dp_world_size}" + ) + + def calculate_derived(self): + """ + Derives additional configuration values necessary for training from the current config + """ + + # number of gpus + # Get number of GPUs param or hostfile to determine train_batch_size + global_num_gpus = getattr(self, "global_num_gpus", None) + if global_num_gpus is None: + if self.hostfile is not None or os.path.exists(DLTS_HOSTFILE): + hostfile_path = self.hostfile or DLTS_HOSTFILE + resources = obtain_resource_pool( + hostfile_path, self.include or "", self.exclude or "" + ) + if self.num_nodes is not None and self.num_nodes > 0: + resources = { + k: resources[k] + for k in list(resources.keys())[: self.num_nodes] + } + global_num_gpus = sum(map(len, resources.values())) + if self.num_gpus is not None and self.num_gpus > 0: + global_num_gpus = self.num_gpus * len(resources) + else: + global_num_gpus = torch.cuda.device_count() + self.update_value("global_num_gpus", global_num_gpus) + + logging.info( + self.__class__.__name__ + + ".calculate_derived() " + + f"Total number of GPUs determined to be: {global_num_gpus}" + ) + + # get world size in the model/pipe parallel case, the actual `world size` deepspeed uses is the size of the + # data-parallel group, or (num_gpus / mp_size) / pp_size + pp_size = self.pipe_parallel_size + pp_size = pp_size if pp_size >= 1 else 1 + mp_size = self.model_parallel_size + mp_size = mp_size if mp_size >= 1 else 1 + self.update_value("model_parallel_size", mp_size) + + # pp_size and mp_size are only used here to compute dp world size and nowhere else. + dp_world_size = (global_num_gpus / pp_size) / mp_size + if not (dp_world_size % 1 == 0): + error_message = ( + f"{ERROR}" + + self.__class__.__name__ + + ".calculate_derived() " + + f"(global_num_gpus / pp_size) / mp_size [({global_num_gpus} / {pp_size}) / {mp_size}] must be a whole number" + ) + logging.error(error_message) + raise AssertionError(error_message) + + # Automatically derive train_batch_size = train_micro_batch_size_per_gpu*global_num_gpus*gradient_accumulation_steps + ( + train_batch_size, + train_micro_batch_size_per_gpu, + gradient_accumulation_steps, + ) = self.calculate_batch_parameters( + dp_world_size=dp_world_size, + train_batch=self.train_batch_size, + micro_batch=self.train_micro_batch_size_per_gpu, + grad_acc=self.gradient_accumulation_steps, + ) + self.check_batch_parameters( + dp_world_size=dp_world_size, + train_batch=train_batch_size, + micro_batch=train_micro_batch_size_per_gpu, + grad_acc=gradient_accumulation_steps, + ) + self.update_values( + { + # batch size params + "train_batch_size": train_batch_size, + "train_micro_batch_size_per_gpu": train_micro_batch_size_per_gpu, + "gradient_accumulation_steps": gradient_accumulation_steps, + "batch_size": train_micro_batch_size_per_gpu, + # duplicate items + "clip_grad": self.gradient_clipping, + } + ) + + # derive precision + fp16_conflict = "DeepSpeed fp16 field was set but precision conflicts" + if self.fp16 and self.fp16.get("enabled", False): + if self.precision is None: + self.update_value("precision", "fp16") + else: + assert self.precision == "fp16", fp16_conflict + + if self.precision == "fp16": + if isinstance(self.fp16, dict) and len(self.fp16) > 0: + fp16_args = copy.deepcopy(self.fp16) + fp16_args["enabled"] = True + else: + fp16_args = {"type": "fp16", "enabled": True} + self.update_value("fp16", fp16_args) + elif self.precision == "bfloat16": + bf_config = {"bf16": {"enabled": True}} + # dt_config = {"grad_accum_dtype": "fp32"} + if self.deepspeed_extra_args is None: + self.update_value("deepspeed_extra_args", bf_config) + else: + extra_args = copy.deepcopy(self.deepspeed_extra_args) + extra_args.update(bf_config) + self.update_value("deepspeed_extra_args", extra_args) + + zero_stage = self.zero_optimization["stage"] + if self.data_types is None: + fp32_grad_accum = False + else: + fp32_grad_accum = self.data_types.get("grad_accum_dtype") == "fp32" + if (zero_stage > 0) and (pp_size > 0) and not fp32_grad_accum: + # Remove this code when this issue is resolved + # https://github.com/microsoft/DeepSpeed/issues/1835 + logging.warn( + "Outstanding DeepSpeed issue means that pp>0, zero1, and bf16 will break without fp32 grads" + ) + else: + self.update_value("precision", "fp32") + + # zero optimization + if self.zero_optimization is None: + self.zero_optimization = copy.deepcopy( + ZERO_DEFAULTS + ) # a dict is overwritten and not updated key by key + try: + stage = self.zero_optimization["stage"] + if stage in (0, 1, 2, 3): + self.update_values( + { + "zero_stage": self.zero_optimization.get( + "stage", ZERO_DEFAULTS["stage"] + ), + "zero_reduce_scatter": self.zero_optimization.get( + "reduce_scatter", ZERO_DEFAULTS["reduce_scatter"] + ), + "zero_contiguous_gradients": self.zero_optimization.get( + "contiguous_gradients", + ZERO_DEFAULTS["contiguous_gradients"], + ), + "zero_reduce_bucket_size": self.zero_optimization.get( + "reduce_bucket_size", ZERO_DEFAULTS["reduce_bucket_size"] + ), + "zero_allgather_bucket_size": self.zero_optimization.get( + "allgather_bucket_size", + ZERO_DEFAULTS["allgather_bucket_size"], + ), + } + ) + else: + assert ( + self.autotuning is not None + ), f"Zero Stage must be an integer unless you are doing autotuning, not {stage}" + except KeyError as ke: + print(f"Zero Optimization config: {self.zero_optimization}") + raise ke + + # optimizer and scheduler + opt_params = self.optimizer or { + "type": OPT_DEFAULT, + "params": OPT_PARAMS_DEFAULTS, + } + self.update_values( + { + "optimizer_type": opt_params.get("type", OPT_DEFAULT), + "lr": opt_params["params"].get("lr", OPT_PARAMS_DEFAULTS["lr"]), + } + ) + + if self.optimizer_type.lower() == "onebitadam": + assert ( + self.train_iters is not None + ), "OneBitAdam requires train_iters to be specified" + + # onebitadam needs to instantiated by deepspeed, and so we need to pass deepspeed scheduler args + # for all other optimizers, the scheduling is handled by megatron + self.scheduler = { + "type": "WarmupDecayLR", # for now this is the only ds scheduler offering decay + "params": { + "warmup_min_lr": 0, + "warmup_max_lr": self.lr, + "warmup_num_steps": int(self.train_iters * self.warmup), + "total_num_steps": self.lr_decay_iters or self.train_iters, + }, + } + + # Fp16 loss scaling. + self.update_value("dynamic_loss_scale", self.loss_scale is None) + + # Update 'is pipe parallel' flag + # if we set pipe_parallel_size to 0, GPT2ModelPipe.to_sequential() is called, and we run training with + # the sequential model without the PipelineModule wrapper to avoid the overhead it incurs + self.update_value("is_pipe_parallel", self.pipe_parallel_size >= 1) + if self.moe_num_experts > 1: + assert not ( + self.is_pipe_parallel or self.pipe_parallel_size > 1 + ), "MoE not supported with pipeline parallelism" + assert self.zero_optimization["stage"] != 3, "MoE not compatible with zero3" + + assert ( + self.sequence_parallel is False + ), "MoE not compatible with Sequence Parallel" + + # Attention config + if self.attention_config is None: + self.update_value("attention_config", [[["global"], self.num_layers]]) + self.update_value( + "attention_config", + expand_attention_types(self.attention_config, self.num_layers), + ) + assert ( + len(self.attention_config) == self.num_layers + ), "Length of attention config list must equal num_layers" + for item in self.attention_config: + assert ( + item in ATTENTION_TYPE_CHOICES + ), f"Attention type {item} not recognized" + if "gmlp" in self.attention_config or "amlp" in self.attention_config: + assert ( + not self.partition_activations + ), "GMLP Blocks are not compatible with partition activations" + if "mamba" in self.attention_config: + if isinstance(self.zero_stage, int): + assert self.zero_stage <= 2, "Zero stage 3 not compatible with Mamba" + assert ( + self.hidden_dropout == 0.0, + ), "Mamba does not yet have dropout implemented" + if "rwkv" in self.attention_config: + assert ( + self.model_parallel_size == 1 + ), "RWKV not currently compatible with model parallelism" + if isinstance(self.zero_stage, int): + assert self.zero_stage <= 2, "Zero stage 3 not compatible with RWKV" + assert ( + self.hidden_dropout == 0.0, + ), "RWKV does not yet have dropout implemented" + + # Sparsity config + if self.sparsity_config is None: + # Can't have a default value as an empty dict so need to set it here + self.update_value("sparsity_config", {}) + + # Multi-query or grouped-query attention settings + if self.num_kv_heads is not None: + # need KV heads <= query heads, and KV heads dividing query heads evenly + assert ( + self.num_attention_heads % self.num_kv_heads == 0 + ), "num_kv_heads must evenly divide num_attention_heads and be no greater than it" + + if self.num_kv_heads < self.num_attention_heads: + # GQA / MQA not compatible with sparse attention configurations + assert ( + not self.sparsity_config + ), "Sparse attention not compatible with GQA or MQA" + assert all( + (attn_type == "flash") or (attn_type == "global") + for attn_type in self.attention_config + ), "GQA / MQA currently only compatible with Flash or standard global/sliding window Attention" + assert ( + self.num_kv_heads % self.model_parallel_size == 0 + ), "Number of KV heads must be at least model_parallel_size for now!" + # Flash attention version >=2.3.0 required to combine Flash + Sliding Window Attention + if "flash" in self.attention_config: + _flash_version = packaging.version.Version(version("flash-attn")) + if self.sliding_window_width is not None: + assert _flash_version >= packaging.version.Version( + "2.3.0" + ), f"Flash-Attention version ({str(_flash_version)}) must be >= 2.3.0 to support sliding window attention." + if self.pos_emb == "alibi": + if not _flash_version >= packaging.version.Version("2.4.0.post1"): + print( + f"Warning: Flash-Attention version ({str(_flash_version)}) must be >= 2.4.0.post1 to support AliBi. Falling back to flash-attn triton backend, but version 2.4.0.post1 or later will be required in future." + ) + + # Adding equal dataset weights if none are provided + if self.train_data_paths and (self.train_data_weights is None): + self.train_data_weights = [1.0] * len(self.train_data_paths) + elif self.pos_train_data_paths and (self.train_data_weights is None): + self.train_data_weights = [1.0] * len(self.pos_train_data_paths) + if self.valid_data_paths and (self.valid_data_weights is None): + self.valid_data_weights = [1.0] * len(self.valid_data_paths) + elif self.pos_valid_data_paths and (self.valid_data_weights is None): + self.valid_data_weights = [1.0] * len(self.pos_valid_data_paths) + if self.test_data_paths and (self.test_data_weights is None): + self.test_data_weights = [1.0] * len(self.test_data_paths) + elif self.pos_test_data_paths and (self.test_data_weights is None): + self.test_data_weights = [1.0] * len(self.pos_test_data_paths) + + if self.train_label_data_paths: + err_str = "Must use `train_label_data_paths` with `train_data_paths`, not `data_path`" + assert self.train_data_paths and not self.data_path, err_str + + # if a sample input file is provided, default text_gen_type type to input-file + if self.text_gen_type is None: + if self.sample_input_file: + self.update_value("text_gen_type", "input-file") + else: + self.update_value("text_gen_type", "unconditional") + + ############################################################################################################################ + # start of validation functions + + @classmethod + def validate_keys(cls): + """ + test that there are no duplicate arguments + """ + source_classes = list(cls.__bases__) + defined_properties = dict() + + for source_class in source_classes: + source_vars = list(source_class.__dataclass_fields__) + for item in source_vars: + if item in defined_properties.keys(): + logging.error( + f"({cls.__name__}) duplicate of item: {item}, in class {source_class.__name__} and {defined_properties[item]}" + ) + return False + else: + defined_properties[item] = source_class.__name__ + return True + + def validate_values(self): + # the current codebase assumes running with deepspeed only + if not self.deepspeed: + return False + + # learning rate + if self.lr is None: + error_message = ( + f"{FAIL} " + self.__class__.__name__ + ".validate_values() lr is None" + ) + logging.error(error_message) + raise ValueError(error_message) + return False + + # required arguments + required_args = [ + "num_layers", + "hidden_size", + "num_attention_heads", + "max_position_embeddings", + ] + for req_arg in required_args: + if getattr(self, req_arg) is None: + error_message = ( + f"{FAIL}" + + self.__class__.__name__ + + ".validate_values() " + + req_arg + + " is None." + ) + logging.error(error_message) + raise ValueError(error_message) + return False + + # Checks. + if self.hidden_size % self.num_attention_heads != 0 and not ( + "mamba" in self.attention_config + ): + error_message = ( + f"{FAIL}" + + self.__class__.__name__ + + ".validate_values() hidden_size must be divisible by num_attention_heads" + ) + logging.error(error_message) + raise ValueError(error_message) + return False + + if self.seq_length is not None: + if not (self.max_position_embeddings >= self.seq_length): + error_message = ( + f"{FAIL}" + + self.__class__.__name__ + + ".validate_values() max_position_embeddings must be bigger or equal seq_length" + ) + logging.error(error_message) + raise ValueError(error_message) + return False + + if not (self.min_lr <= self.lr): + error_message = ( + "{FAIL}" + + self.__class__.__name__ + + ".validate_values() min_lr must be smaller or equal lr" + ) + logging.error(error_message) + raise ValueError(error_message) + return False + + if ( + self.save is not None + and self.checkpoint_factor is None + and self.extra_save_iters is None + ): + error_message = ( + f"{FAIL}" + + self.__class__.__name__ + + ".validate_values() checkpoint_factor or extra_save_iters must be defined if save is defined" + ) + logging.error(error_message) + raise ValueError(error_message) + return False + + # Parameters sharing does not work with torch DDP. + if (self.num_unique_layers is not None) and (self.num_layers is not None): + if not (self.num_unique_layers <= self.num_layers): + error_message = ( + f"{FAIL}" + + self.__class__.__name__ + + ".validate_values() num-unique-layers must be smaller or equal num_layers" + ) + logging.error(error_message) + raise ValueError(error_message) + return False + + if not (self.num_layers % self.num_unique_layers == 0): + error_message = ( + f"{FAIL}" + + self.__class__.__name__ + + ".validate_values() num-layers should be divisible by num-unique-layers" + ) + logging.error(error_message) + raise ValueError(error_message) + return False + + if self.fp16_lm_cross_entropy and self.precision != "fp16": + error_message = ( + f"{FAIL}" + + self.__class__.__name__ + + ".validate_values() lm cross entropy in fp16 only support in fp16 mode." + ) + logging.error(error_message) + raise ValueError(error_message) + return False + + # assert that if one of train/test/valid_data_path are provided, data_path should not be + has_separate_path = [ + data_path is not None + for data_path in [ + self.train_data_paths, + self.valid_data_paths, + self.test_data_paths, + ] + ] + if all(has_separate_path): + assert self.data_path is None, ( + f"{FAIL} Please provide *either* `data_path` or `train/valid/test_data_path` " + "in args " + ) + + # assert that if one of train/test/valid_data_path are provided, all should be + assert_error_mess = ( + f"{FAIL} One or more of train/valid/test data_path are not provided:\n\t" + ) + assert_error_mess += "\n\t".join( + [ + f"{name} data paths: {data_path}," + for name, data_path in [ + ["train", self.train_data_paths], + ["valid", self.valid_data_paths], + ["test", self.test_data_paths], + ] + ] + ) + assert any(has_separate_path) == all(has_separate_path), assert_error_mess + + # assert that if train / valid / test data path(s) and weights are provided, that the paths and the weights should be equal length + if self.train_data_paths is not None: + assert len(self.train_data_paths) == len(self.train_data_weights) + if self.valid_data_paths is not None: + assert len(self.valid_data_paths) == len(self.valid_data_weights) + if self.test_data_paths is not None: + assert len(self.test_data_paths) == len(self.test_data_weights) + + return True + + def validate_types(self): + """ + At runtime, checks types are actually the type specified. + """ + for field_name, field_def in self.__dataclass_fields__.items(): + actual_value = getattr(self, field_name) + if actual_value is None: + continue # we allow for some values not to be configured + + if self.autotuning is not None and actual_value == "auto": + continue + + actual_type = type(actual_value) + if actual_type != field_def.type: + if ( + actual_type == int and field_def.type == float + ): # floats should be able to be configured as ints + continue + + # for typing.Literal (i.e a list of choices) - checks that actual value is in accepted values + elif field_def.type.__origin__ == Literal: + accepted_values = field_def.type.__args__ + if actual_value in accepted_values: + continue + elif type(actual_value) == str: + # case insensitive checking + lowercase_accepted_values = [ + i.lower() for i in accepted_values if isinstance(i, str) + ] + if actual_value.lower() in lowercase_accepted_values: + continue + logging.error( + f"{FAIL}" + + self.__class__.__name__ + + ".validate_types() " + + f"{field_name}: '{actual_value}' Not in accepted values: '{accepted_values}'" + ) + return False + elif field_def.type.__origin__ == Union: + accepted_types = field_def.type.__args__ + if actual_type in accepted_types: + continue + else: + logging.error( + f"{FAIL}" + + self.__class__.__name__ + + ".validate_types() " + + f"{field_name}: '{actual_type}' not in {accepted_types}" + ) + return False + + logging.error( + f"{FAIL}" + + self.__class__.__name__ + + ".validate_types() " + + f"{field_name}: '{actual_type}' instead of '{field_def.type}'" + ) + return False + + # validate deepspeed dicts + for field_name in ["optimizer", "scheduler"]: + value = getattr(self, field_name) + if isinstance( + value, dict + ): # dict is checked above, only fields are checked here + if "type" in value: + if not isinstance(value["type"], str): + logging.error( + self.__class__.__name__ + + ".validate_types() " + + f"{field_name}: key 'type' must be a string" + ) + return False + else: + logging.error( + f"{FAIL}" + + self.__class__.__name__ + + ".validate_types() " + + f"{field_name}: must contain key 'type'" + ) + return False + if "params" in value: + if not isinstance(value["params"], dict): + logging.error( + f"{FAIL}" + + self.__class__.__name__ + + ".validate_types() " + + f"{field_name}: key 'params' must be a dict" + ) + return False + else: + logging.error( + f"{FAIL}" + + self.__class__.__name__ + + ".validate_types() " + + f"{field_name}: must contain key 'params'" + ) + return False + + for field_name in ["fp16", "amp", "flops_profiler"]: + value = getattr(self, field_name) + if isinstance(value, dict): + if not "enabled" in value: + error_message = ( + f"{FAIL}" + + self.__class__.__name__ + + ".validate_types() " + + f"{field_name}: must contain key 'enabled'" + ) + logging.error(error_message) + return False + + return True diff --git a/megatron/neox_arguments/deepspeed_args.py b/megatron/neox_arguments/deepspeed_args.py new file mode 100644 index 0000000000000000000000000000000000000000..270e67f8ce876553a16c1eb03c6fe5cb3eb68190 --- /dev/null +++ b/megatron/neox_arguments/deepspeed_args.py @@ -0,0 +1,369 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass + +try: + from .template import NeoXArgsTemplate +except ImportError: + from template import NeoXArgsTemplate + +try: + from typing import Literal +except ImportError: + from typing_extensions import Literal + + +@dataclass +class NeoXArgsDeepspeedConfig(NeoXArgsTemplate): + """ + Args for deepspeed config + Every argument included here will be included in deepspeed config json + As of Mar 8 2023, up to date compared to https://www.deepspeed.ai/docs/config-json/ + """ + + deepspeed: bool = True + """boolean flag to enable DeepSpeed (Always True)""" + + train_batch_size: int = None + """ + The effective training batch size. This is the amount of data samples that leads to one step of model update. train_batch_size is aggregated by the batch size that a single GPU processes in one forward/backward pass (a.k.a., train_step_batch_size), the gradient accumulation steps (a.k.a., gradient_accumulation_steps), and the number of GPUs. + """ + + train_micro_batch_size_per_gpu: int = None + """ + Batch size to be processed by one GPU in one step (without gradient accumulation). When specified, gradient_accumulation_steps is automatically calculated using train_batch_size and number of GPUs. Should not be concurrently specified with gradient_accumulation_steps in the configuration JSON. + """ + + gradient_accumulation_steps: int = 1 + """ + Number of training steps to accumulate gradients before averaging and applying them. This feature is sometimes useful to improve scalability since it results in less frequent communication of gradients between steps. Another impact of this feature is the ability to train with larger batch sizes per GPU. When specified, train_step_batch_size is automatically calculated using train_batch_size and number of GPUs. Should not be concurrently specified with train_step_batch_size in the configuration JSON. + """ + + optimizer: dict = None + """ + dict containing the keys type and params + + type: The optimizer name. DeepSpeed natively supports Adam, AdamW, OneBitAdam, Lamb, and OneBitLamb optimizers (See here for details) and will import other optimizers from torch. + + params: Dictionary of parameters to instantiate optimizer. The parameter names must match the optimizer constructor signature (e.g., for Adam). + """ + + scheduler: dict = None + """ + dict containing the keys type and params + + type: The scheduler name. See here (https://deepspeed.readthedocs.io/en/latest/schedulers.html) for list of support schedulers. + + params: Dictionary of parameters to instantiate scheduler. The parameter names should match scheduler constructor signature. + """ + + fp32_allreduce: bool = False + """ + During gradient averaging perform allreduce with 32 bit values + """ + + prescale_gradients: bool = False + """ + Scale gradients before doing allreduce + """ + + gradient_predivide_factor: float = 1.0 + """ + Before gradient averaging predivide gradients by a specified factor, can sometimes help with fp16 stability when scaling to large numbers of GPUs + """ + + sparse_gradients: bool = False + """ + Enable sparse compression of torch.nn.Embedding gradients. + """ + + # ---FP16 Training Options--- + + fp16: dict = None + """ + Configuration for using mixed precision/FP16 training that leverages NVIDIA’s Apex package. + + Dictionary options as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#fp16-training-options + """ + + bf16: dict = None + """ + Configuration for using bfloat16 floating-point format as an alternative to FP16. BFLOAT16 requires hardware support (e.g., NVIDIA A100). + + Dictionary options as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#bfloat16-training-options + """ + + # ---Automatic Mixed Precision (AMP) Training Options--- + + amp: dict = None + """ + Configuration for using automatic mixed precision (AMP) training that leverages NVIDIA’s Apex AMP package. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#automatic-mixed-precision-amp-training-options + """ + + gradient_clipping: float = 1.0 + """ + Enable gradient clipping with provided value + """ + + # ---ZeRO Optimization Options--- + + zero_optimization: dict = None + """ + Configuration for using ZeRO optimization. + + Multi-level dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#zero-optimization-options + """ + + # ---Logging Options--- + + curriculum_learning: dict = None + """""" + + curriculum_seqlen: int = 0 + """ + Internal var for tracking the current seqlen + """ + + steps_per_print: int = 10 + """ + Print train loss every N steps. + """ + + wall_clock_breakdown: bool = False + """ + Enable timing of the latency of forward/backward/update training phases. + """ + + dump_state: bool = False + """ + Print out state information of DeepSpeed object after initialization. + """ + + # ---FLOPS Profiler Options--- + + flops_profiler: dict = None + """ + Configuration for using FLOPS profiler. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#flops-profiler + """ + + # ---Communication Options--- + + communication_data_type: bool = None + """ + During gradient averaging, perform communication with selected data type. By default it will be determined by selected regime + """ + + # ---Autotuning Options--- + autotuning: dict = None + """ + Configuration for using autotuning. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#autotuning + """ + + # ---Activation Checkpointing Options--- + + activation_checkpointing: dict = None + """ + Configuration for using activation checkpointing. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#activation-checkpointing + """ + + # ---Sparse Attention Options--- + + sparse_attention: dict = None + """ + Configuration for using sparse attention. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#sparse-attention + + """ + + # ---Data Efficiency Options--- + + data_efficiency: dict = None + """ + Configuration for using data efficiency. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#data-efficiency + """ + + # ---Monitoring Module Options--- + + tensorboard: dict = None + """ + Configuration for using tensorboard. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#monitoring-module-tensorboard-wandb-csv + """ + + wandb: dict = None + """ + Configuration for using wandb. + """ + + csv_monitor: dict = None + """ + Configuration for using csv_monitor. + """ + + # ---Elastic Training Options--- + + elasticity: dict = None + """ + Configuration for using elastic training. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#elastic-training-config-v01-and-v02 + """ + + # ---Communication Logging Options--- + + comms_logger: dict = None + """ + Configuration for using communication logger. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#communication-logging + """ + + # ---Compression Options--- + + compression_training: dict = None + """ + Configuration for using compression training. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#compression + """ + + # ---Checkpointing Options--- + + checkpoint: dict = None + """ + Configuration for using checkpointing. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#checkpoint-options + """ + + # ---Data Type Options--- + + data_types: dict = None + """ + Configuration for using data types. + + Dictionary as described in Deepspeed documentation: https://www.deepspeed.ai/docs/config-json/#data-type-options + """ + + # ---EXTRA ARGUMENTS--- + + deepspeed_extra_args: dict = None + """ + Dictionary of extra arguments to be included in the yaml config file. This can be used for any argument not included in the above list. + """ + + autotuning: dict = None + """Dictionary as described in DeepSpeed autotuning documentation: https://github.com/microsoft/DeepSpeed/tree/master/deepspeed/autotuning""" + + +@dataclass +class NeoXArgsDeepspeedRunner(NeoXArgsTemplate): + """ + Args for deepspeed runner (deepspeed.launcher.runner). + Every argument included here will be passed as command line argument to deepspeed.launcher.runner + """ + + hostfile: str = None + """ + list of hostnames / ssh aliases and the number of GPUs per host + + example file contents: + worker-1 slots=4 + worker-2 slots=4 + 127.0.0 slots=4 + 127.0.1 slots=4 + """ + + include: str = None + """ + Specify hardware resources to use during execution. String format is `NODE_SPEC[@NODE_SPEC ...]` where `NODE_SPEC=NAME[:SLOT[,SLOT ...]]`. If `:SLOT` is omitted, include all slots on that host. Example: `"worker-0@worker-1:0,2"` will use all slots. on `worker-0` and slots `[0, 2]` on `worker-1`. + """ + + exclude: str = None + """ + Specify hardware resources to NOT use during execution. Same format as include + """ + + num_nodes: int = -1 + """ + Total number of worker nodes to run on, this will use the top N hosts from the given hostfile. -1 will use all. + """ + + num_gpus: int = None + """ + Max number of GPUs to use on each node, will use [0:N) GPU ids on each node. None / not specifying a value will use all. + """ + + master_port: int = 29500 + """ + Port used by PyTorch distributed for communication during training. + """ + + master_addr: str = None + """ + IP address of node 0, will be inferred via 'hostname -I' if not specified. + """ + + launcher: Literal["pdsh", "openmpi", "mvapich", "slurm"] = "pdsh" + """ + Launcher backend for multi-node training. Options currently include PDSH, OpenMPI, MVAPICH. + """ + + force_multi: bool = False + """ + Force multi-node training even if only one node is specified. + """ + + detect_nvlink_pairs: bool = False + """ + If true, autodetects nvlink pairs and remaps cuda visible devices to place them next to each other. This is an Eleuther addition to deepspeed, and should speed up model parallel training on setups with nvlink pairs when mp=2. + """ + + autotuning_run: str = None + """ + Either "tune", "run", or `None`. + """ + + no_ssh_check: bool = False + """ + If true, overrides the default check where DeepSpeed confirms that the headnode is accessible via ssh. + """ + + force_multi: bool = False + """ + If true, Force multi-node launcher mode, helps in cases where user wants to launch on single remote node. + """ + + comment: str = None + """ + Adds a `--comment` to the DeepSpeed launch command. In DeeperSpeed this is passed on to the SlurmLauncher as well. Sometimes necessary for cluster rules, or so I've heard. + """ + + account: str = None + """ + Adds a `--account` to the DeepSpeed launch command. In DeeperSpeed this is passed on to the SlurmLauncher as well. Sometimes necessary for cluster rules, or so I've heard. + """ diff --git a/megatron/neox_arguments/neox_args.py b/megatron/neox_arguments/neox_args.py new file mode 100644 index 0000000000000000000000000000000000000000..c877c6c7881fe1268590cc18480a8d45ad0c662b --- /dev/null +++ b/megatron/neox_arguments/neox_args.py @@ -0,0 +1,1570 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +from dataclasses import dataclass + +try: + from .template import NeoXArgsTemplate +except ImportError: + from template import NeoXArgsTemplate + +try: + from typing import List, Literal, Union, Optional, Any +except ImportError: + from typing_extensions import List, Literal, Union, Optional + + +ATTENTION_TYPE_CHOICES = [ + "global", + "local", + "sparse_fixed", + "sparse_variable", + "bigbird", + "bslongformer", + "gmlp", + "amlp", + "flash", + "rwkv", + "mamba", +] + + +def get_git_commit_hash(): + """Gets the git commit hash of your current repo (if it exists)""" + try: + git_hash = subprocess.check_output(["git", "describe", "--always"]).strip() + git_hash = git_hash.decode() + except (subprocess.CalledProcessError, FileNotFoundError): + git_hash = None + return git_hash + + +@dataclass +class NeoXArgsParallelism(NeoXArgsTemplate): + """ + Parallelism Arguments + """ + + pipe_parallel_size: int = 0 + """ + Number of pipeline parallel stages. Disable with 0. + """ + + model_parallel_size: int = 1 + """ + Size of the model parallelism. + """ + + pipe_partition_method: str = "type:transformer|mlp" + """ + method used to distribute model layers across pipeline stages. Choose from "parameters", which balances the number + of parameters on each pipeline stage, "uniform", which naively balances the number of layers per stage, or + "type:[regex]", which balances layers whose class names match [regex] + """ + + world_size: int = None + """ + Total world size (i.e number of gpus in cluster). Configured post-launch using distributed launcher + """ + + is_pipe_parallel: bool = False + """ + flag to determine whether pipeline parallelism is on - shouldn't be set by user, is automatically determined + according to pipeline parallel size. + """ + + sequence_parallel: bool = False + """ + flag to determine whether Megatron-style Sequence Parallelism (https://arxiv.org/abs/2205.05198) + (Layernorm inputs and activations are sharded across model parallel group) will be used. Has no effect when model_parallel_size is 1. + **Set by user, in contrast to neox_args.is_pipe_parallel.** + """ + + expert_interval: int = 2 + """ + Have one MoE layer every expert_interval layers + """ + + +@dataclass +class NeoXArgsModel(NeoXArgsTemplate): + """ + Model Arguments + """ + + precision: Literal["fp16", "fp32", "bfloat16"] = None + """ + description of the used precision, either one of fp16 or fp32 (and in the future bf16). + """ + + num_layers: int = None + """ + Number of transformer layers. + """ + + hidden_size: int = None + """ + Transformer hidden size. + """ + + intermediate_size: int = None + """ + Transformer intermediate size. Default = 4h + """ + + mlp_multiple_of: int = 1 + """ + force mlp size to be a multiple of this value + """ + + expansion_factor: float = None + """ + Transformer intermediate size. Default = 4 + """ + + num_attention_heads: int = None + """ + Number of transformer attention heads. + + If num_kv_heads is set, will control only number of query heads. + """ + + num_kv_heads: int = None + """ + Number of transformer key/value attention heads. + + If set to None or the same value as num_attention_heads, will perform multi-head attention (MHA). + If set to < num_attention_heads but > 1, will perform grouped-query attention (GQA) (https://arxiv.org/pdf/2305.13245.pdf) + If set to 1, will perform multi-query attention. + + Must be < num_attention_heads and divide num_attention_heads evenly. + """ + + seq_length: int = None + """ + Maximum sequence length to process. + """ + + sliding_window_width: int = None + """ + Width of the attention sliding window. Only supported with Flash Attention 2. + """ + + max_position_embeddings: int = None + """ + Maximum number of position embeddings to use. This is the size of position embedding. + """ + + norm: Literal[ + "layernorm", "rmsnorm", "scalenorm", "te_rmsnorm", "te_layernorm" + ] = "layernorm" + """ + Normalization layer to use. Choose from "layernorm", "rmsnorm", "scalenorm", "te_rmsnorm", "te_layernorm". + """ + + layernorm_fusion: bool = False + """ + Use fused layer norm kernel (if `norm` is `layernorm`). + """ + + rmsnorm_fusion: bool = False + """ + Use fused RMS norm kernel (if `norm` is `rmsnorm`). + """ + + use_qk_layernorm: bool = False + """ + Use QK Normalization + """ + + layernorm_epsilon: float = 1.0e-5 + """ + Layer norm epsilon. + """ + + rms_norm_epsilon: float = 1.0e-8 + """ + Root mean squared norm epsilon + """ + + scalenorm_epsilon: float = 1.0e-8 + """ + Scalenorm epsilon + """ + + pos_emb: Literal[ + "learned", "rotary", "sinusoidal", "rpe", "alibi", "none" + ] = "learned" + """ + Type of positional embedding to use - choose from 'learned', 'rotary', 'sinusoidal', 'rpe', 'none' + """ + + rpe_num_buckets: int = 32 + """ + T5 relative positional encoding number of buckets, default 32. + """ + + rpe_max_distance: int = 128 + """ + T5 relative positional encoding max distance, default 128. + """ + + opt_pos_emb_offset: int = 0 + """ + Learned position embedding offset (only used by OPT, where it should be set to 2). + """ + + no_weight_tying: bool = False + """ + Disables weight tying between embedding weights and final Linear layer + """ + + attention_config: list = None + + """ + Attention configuration for gpt-neox + + The first item in the list specifies the attention type(s), and should be a list of strings. The second item + specifies the number of times to repeat those attention types in the full list. + + attention type choices: [global, local, sparse_fixed, sparse_variable, bslongformer, bigbird, "gmlp", "amlp", "flash", "mamba", "rwkv"] + + So a 12 layer network with only global attention could be specified like: + [[[`global`], 12]] + + or a 12 layer network with alternating global / local like: + [[[`global`, `local`], 6]] + + If none is specified, this defaults to + [[[`global`], n_layers]] + """ + + sparsity_config: dict = None + + """ + Sparsity configuration dict as defined in https://www.deepspeed.ai/docs/config-json/#sparse-attention + + Note that since neox is autoregressive, attention is always "unidirectional" and `horizontal_global_attention` is + always false. + + The main difference between our sparsity config and deepspeed's is that `mode` is ignored - since it is instead + specified in attention_config defining each layer. + + An example config is given below: + "sparse_attention": { + "block": 16, + "different_layout_per_head": true, + "num_local_blocks": 4, + "num_global_blocks": 1, + "num_different_global_patterns": 4, + "num_random_blocks": 0, + "local_window_blocks": [4], + "global_block_indices": [0], + "global_block_end_indices": None, + "num_sliding_window_blocks": 3 + } + """ + + num_unique_layers: int = None + """ + Number of unique transformer layers. num-layers should be divisible by this value. Currently only has an effect when pipe_parallel_size=0. + """ + + param_sharing_style: str = "grouped" + """ + Ordering of the shared parameters. For example, for a num-layers=4 and --num-unique-layers=2, we will have the following ordering for two unique layers 1 and 2-: grouped: [1, 2, 1, 2] and spaced: [1, 1, 2, 2]. + """ + + make_vocab_size_divisible_by: int = 128 + """ + Pad the vocab size to be divisible by this value. This is added for computational efficiency reasons. + """ + + activation: Literal[ + "gelu", + "geglu", + "relu", + "softsign", + "swish", + "mish", + "silu", + "reglu", + "swiglu", + "bilinear", + "glu", + ] = "gelu" + """ + Activation function to use - choose from ["gelu", "geglu", "relu", "softsign", "swish", "mish", "silu", "reglu", "swiglu", "bilinear", "glu"] + """ + + use_flashattn_swiglu: bool = False + """ + Use flash attention's version of swiglu + """ + + scaled_upper_triang_masked_softmax_fusion: bool = False + """ + Enable fusion of query_key_value_scaling time (upper diagonal) masking and softmax. + """ + + scaled_masked_softmax_fusion: bool = False + """ + Enable fusion of query_key_value_scaling general masking and softmax. + """ + + bias_gelu_fusion: bool = False + """ + Enable bias and gelu fusion. + """ + + bias_dropout_fusion: bool = False + """ + Enable bias and dropout fusion. + """ + + rope_fusion: bool = False + """ + Enable rotary embedding fusion. + """ + + fp16_lm_cross_entropy: bool = False + """ + Move the cross entropy unreduced loss calculation for lm head to fp16. + """ + + init_method_std: float = 0.02 + """ + Standard deviation of the zero mean normal distribution used for weight initialization. + """ + + apply_query_key_layer_scaling: bool = False + """ + Scale Q * K^T by 1 / layer-number. If this flag is set, then it will automatically set attention-softmax-in-fp32 to true + """ + + use_cpu_initialization: bool = False + """ + If set, affine parallel weights initialization uses CPU + """ + + attention_softmax_in_fp32: bool = False + """ + Run attention masking and softmax in fp32. + """ + + rotary_pct: float = 1.0 + """ + pct of hidden dims to apply rotary positional embedding to + """ + + rotary_emb_base: int = 10000 + """ + Base for rotary positional embedding + """ + + rotary_save_freqs_buffer: bool = False + """ + Used to control whether the `inv_freqs` buffer in rotary embeddings + will be stored in checkpoints (persistent=True) or not. + + Defaults to false, but is left configurable to maintain backward-compatibility + with GPT-NeoX checkpoints that were trained with this flag. + """ + + init_method: Literal[ + "normal", + "scaled_normal", + "orthogonal", + "scaled_orthogonal", + "xavier_uniform", + "xavier_normal", + "wang_init", + "small_init", + "single_residual_scaled_normal", + ] = "normal" + """ + Init function used on all layers except ff residual outputs - choose from + ["normal", "scaled_normal", "orthogonal", "scaled_orthogonal", "xavier_uniform", "xavier_normal", "wang_init", "small_init"] + """ + + output_layer_init_method: Literal[ + "normal", + "scaled_normal", + "orthogonal", + "scaled_orthogonal", + "xavier_uniform", + "xavier_normal", + "wang_init", + "small_init", + "single_residual_scaled_normal", + ] = "scaled_normal" + """ + Init function used for ff residual outputs - choose from + ["normal", "scaled_normal", "orthogonal", "scaled_orthogonal", "xavier_uniform", "xavier_normal", "wang_init", "small_init"] + """ + + gmlp_attn_dim: int = 64 + """ + the dimension of the single head self attention in gmlp model (not used in gpt models). + If None - gmlp model doesn't use attention. + """ + + gpt_j_residual: bool = False + """ + If false, we use the conventional residual path: + x = x + attn(ln1(x)) + x = x + mlp(ln2(x)) + Otherwise, we use the residual path from GPT-J, which offers a slight speedup: + x = ln(x) + x = x + attn(x) + mlp(x) + """ + + gpt_j_tied: bool = False + """ + If false, we use + x = x + attn(ln1(x)) + mlp(ln2(x)) + Otherwise, we tie the layer norms + y = ln(x) + x = x + attn(y) + mlp(y) + """ + + use_bias_in_norms: bool = True + """ + If false, norms (e.g. LayerNorm) will not have bias terms + """ + use_bias_in_attn_linear: bool = True + """ + If false, attn_linear (e.g. QKVO) will not have bias terms + """ + use_bias_in_mlp: bool = True + """ + If false, mlps will not have bias terms + """ + + soft_prompt_tuning: dict = None + """ + Dictionary configuring the soft prompt tuning parameters. + If enabled, will train *only* the soft prompt, and freezes the rest of the model. + parameters in the dict are: + 'enabled': bool = True # enables soft prompting + 'num_tokens': int = 10 # length of the soft prompt in tokens + 'init_string': str = '' # if provided, initialize the soft prompt with the word embeddings of this string + 'init_range': float = 0.5 # if no init string is provided, initialize the soft prompt with a uniform distribution between -init_range and init_rang + """ + + mamba_selective_scan_fusion: bool = False + """ + Enable fused kernels for Mamba selective scan. + """ + + mamba_causal_conv_fusion: bool = False + """ + Enable fused kernels for Mamba causal Conv1d. + """ + + mamba_inner_func_fusion: bool = False + """ + Enable fused inner operator for Mamba. (Supersedes conv. and selective scan fusion flags, requires each of those kernels to be installed.) + """ + + mamba_selective_fp32_params: bool = True + """ + Keep selected parameters in fp32 for Mamba (A and D). + Requires https://github.com/EleutherAI/DeeperSpeed/pull/61 . + """ + + mamba_use_bias_in_conv: bool = True + """ + If false, conv1d in mamba block will not have bias term + """ + + mamba_use_bias_in_linears: bool = False + """ + Enable bias terms in mamba block up- and down- projections (in_proj and out_proj). + """ + + # Output layer parallelism over the hidden dim is currently broken (https://github.com/EleutherAI/gpt-neox/issues/905) + output_layer_parallelism: Literal["column"] = "column" + + """ + Parameter controlling whether the output layer is parallelized over the hidden dim (row) or the vocab dim (column) + """ + + dim_att: int = None + """ + Total dimension of the attention mechanism for RWKV. If not set, defaults to hidden_size. + """ + + head_size: int = None + """ + Size of each attention head for RWKV. Calculated as dim_att // num_attention_heads. + """ + + ffn_dim: int = None + """ + Dimension of the feed-forward network for RWKV. If not set, calculated based on hidden_size and expansion_factor. + """ + + +@dataclass +class NeoXArgsOptimizer(NeoXArgsTemplate): + """ + Optimizer Arguments + """ + + optimizer_type: Literal[ + "adam", + "onebitadam", + "cpu_adam", + "cpu_torch_adam", + "sm3", + "madgrad_wd", + "sgd", + "lion", + ] = "adam" + """ + Type of optimizer to use. Choose from ['adam', 'onebitadam', 'cpu_adam', 'cpu_torch_adam', 'sm3', 'madgrad_wd', 'sgd', 'lion'] + NOTE: sgd will use MuSGD from Mup. Mup must be enabled for this optimizer. + """ + + use_bnb_optimizer: bool = False + """ + Whether to enable the bitsandbytes optimizers + """ + + zero_stage: Union[int, List[int], Literal["all"]] = None + """ + Zero Optimizer stage + """ + + zero_reduce_scatter: bool = None + """ + Zero: Uses reduce or reduce scatter instead of allreduce to average gradients + """ + + zero_contiguous_gradients: bool = None + """ + Zero: Copies the gradients to a contiguous buffer as they are produced. Avoids memory fragmentation during backward pass. Only useful when running very large models. + """ + + zero_reduce_bucket_size: int = None + """ + Zero: Number of elements reduced/allreduced at a time. Limits the memory required for the allgather for large model sizes + """ + + zero_allgather_bucket_size: int = None + """ + Zero: Number of elements allgathered at a time. Limits the memory required for the allgather for large model sizes + """ + + lr: float = None + """ + Max Learning rate during training + """ + + +@dataclass +class NeoXArgsLRScheduler(NeoXArgsTemplate): + """ + LR Scheduler Arguments + """ + + lr_decay_style: Literal["constant", "linear", "cosine", "exponential"] = "linear" + """ + Learning rate decay function. Choose from 'constant', 'linear', 'cosine', 'exponential'. + """ + + lr_decay_iters: int = None + """ + Number of iterations to decay learning rate over, If None defaults to + --train-iters or the equivalent inferred valued from train_epochs. + """ + + lr_decay_fraction: float = None + """ + Effective fraction of training over which to decay lr, overrides lr_decay_iters, useful when specifying train_epochs + """ + + min_lr: float = 0.0 + """ + Minimum value for learning rate. The scheduler clips values below this threshold. + """ + + warmup: float = 0.01 + """ + Percentage of total iterations to warmup on (.01 = 1 percent of all training iters). + """ + + override_lr_scheduler: bool = False + """ + Reset the values of the scheduler (learning rate,warmup iterations, minimum learning rate, maximum number of iterations, and decay style from input arguments and ignore values from checkpoints. Note that all the above values will be reset. + """ + + use_checkpoint_lr_scheduler: bool = False + """ + Use checkpoint to set the values of the scheduler (learning rate, warmup iterations, minimum learning rate, maximum number of iterations, and decay style from checkpoint and ignore input arguments. + """ + + +@dataclass +class NeoXArgsLogging(NeoXArgsTemplate): + """ + Logging Arguments + """ + + use_wandb: bool = None + """Flag indicating if wandb is to be used.""" + + wandb_group: str = None + """Weights and Biases group name - used to group together "runs".""" + + wandb_team: str = None + """Team name for Weights and Biases.""" + + wandb_project: str = "neox" + """wandb project name""" + + wandb_host: str = "https://api.wandb.ai" + """url of the wandb host""" + + wandb_init_all_ranks: bool = False + """Initialize wandb on all ranks.""" + + git_hash: str = get_git_commit_hash() + """current git hash of repository""" + + log_dir: str = None + """ + Directory to save logs to. + """ + + tensorboard_writer = None + """ + initialized tensorboard writer + """ + + tensorboard_dir: str = None + """ + Write TensorBoard logs to this directory. + """ + + use_comet: bool = None + """Flag indicating if comet is to be used.""" + + comet_workspace: Optional[str] = None + """ + Comet workspace name, if not configured Comet Experiments will be created in the user configured default workspace. + """ + + comet_project: Optional[str] = None + """ + Comet project name, if not configured Comet Experiments will be created in the Uncategorized Experiments project. + """ + + comet_experiment_name: Optional[str] = None + """ + Custom name for the Comet experiment. If not provided, a random name is used. + """ + + comet_tags: Optional[list] = None + """ + List of tags to attach to the created Comet Experiment. + """ + + comet_others: Optional[dict] = None + """ + Custom metadata to attach to the created Comet Experiment. + """ + + comet_experiment: Any = None + """ + Initialized comet experiment object used to log data + """ + + log_interval: int = 100 + """ + Interval between logging. + """ + + log_grad_pct_zeros: bool = False + """ + Log the percentage of zeros for the gradient of each parameter to wandb / tensorboard (useful for debugging). Needs wandb_init_all_ranks set to True if using pipeline parallelism to log all ranks. + """ + + log_param_norm: bool = False + """ + Log the frob norm of the parameters to wandb / tensorboard (useful for debugging). Needs wandb_init_all_ranks set to True if using pipeline parallelism to log all ranks. + """ + + log_grad_norm: bool = False + """ + Log the frob norm of the gradients to wandb / tensorboard (useful for debugging). + (N.B - this will only work with pp = 0 for now, as we don't have access to the gradients of the model because + deepspeed.) + """ + + log_optimizer_states: bool = False + """ + Log the frob norm of the optimizer states to wandb / tensorboard (useful for debugging). + """ + + log_gradient_noise_scale: bool = False + """ + Whether to log the gradient noise scale when training (cf. https://arxiv.org/abs/1812.06162 for explanation) + """ + + gradient_noise_scale_n_batches: int = 5 + """ + Number of batches to accumulate gradients for in the gradient noise scale logger. + """ + + gradient_noise_scale_cpu_offload: bool = False + """ + Whether to offload the buffered gradients to cpu when measuring gradient noise scale. + """ + + memory_profiling: bool = False + """ + Whether to take a memory snapshot of the model. Useful for debugging memory issues. + """ + + memory_profiling_path: str = None + """ + Path to save memory snapshot to. + """ + + profile: bool = False + """ + Enable nsys and pytorch profiling. When using this option with nsys, + nsys options should be directly specified in commandline. + An example nsys commandline is + ``` + nsys profile -s none -t nvtx,cuda -o + --force-overwrite true + --capture-range=cudaProfilerApi + --capture-range-end=stop + ``` + """ + + profile_step_start: int = 10 + """ + Step to start profiling at. + """ + + profile_step_stop: int = 12 + """ + Step to stop profiling at. + """ + + +@dataclass +class NeoXArgsOther(NeoXArgsTemplate): + """ + Misc. Arguments + """ + + distributed_backend: str = "nccl" + """ + Which backend to use for distributed training. + """ + + local_rank: int = None + """ + local rank passed from distributed launcher. + """ + + rank: int = None + """ + global rank of process being run (passed in via distributed launcher) + """ + + lazy_mpu_init: bool = False + """ + If set to True, initialize_megatron() skips DDP initialization and returns function to complete it instead. Also turns on use-cpu-initialization flag. This is for external DDP manager. + """ + + short_seq_prob: float = 0.1 + """ + Probability of producing a short sequence. + """ + + eod_mask_loss: bool = False + """ + Mask loss for the end of document tokens. + """ + + adlr_autoresume: bool = False + """ + Enable auto-resume on adlr cluster. + """ + + adlr_autoresume_object = None + """ + imported autoresume + """ + + adlr_autoresume_interval: int = 1000 + """ + Intervals over which check for auto-resume termination signal + """ + + seed: int = 1234 + """ + Random seed used for python, numpy, pytorch, and cuda. + """ + + onnx_safe: bool = False + """ + Use workarounds for known problems with Torch ONNX exporter + """ + + deepscale: bool = False + """ + (Deprecated) enable DeepSpeed (helper flag for user code, no impact on DeepSpeed backend)' + """ + + deepscale_config: str = None + """(Deprecated) deepscale json configuration file.""" + + deepspeed_mpi: bool = False + """ + Run via MPI, this will attempt to discover the necessary variables to initialize torch distributed from the MPI environment + """ + + deepspeed_slurm: bool = False + """ + Run via SLURM, this will attempt to discover the necessary variables to initialize torch distributed from the SLURM environment + """ + + user_script: str = None + """ + user script to be run + """ + + iteration: int = None + """ + Set during training + """ + + do_train: bool = None + """ + Set during training + """ + + do_valid: bool = None + """ + Set during training + """ + + do_test: bool = None + """ + Set during training + """ + + global_num_gpus: int = None + """ + Set during launching + """ + + +@dataclass +class NeoXArgsTokenizer(NeoXArgsTemplate): + """ + Tokenizer Arguments + """ + + tokenizer_type: Literal[ + "GPT2BPETokenizer", + "HFTokenizer", + "HFGPT2Tokenizer", + "SPMTokenizer", + "CharLevelTokenizer", + "TiktokenTokenizer", + ] = "GPT2BPETokenizer" + """ + Type of tokenizer to use - should be one of ["GPT2BPETokenizer", "HFTokenizer", "HFGPT2Tokenizer", "SPMTokenizer", "CharLevelTokenizer", "TiktokenTokenizer"] + """ + + padded_vocab_size: int = None + """ + Total (padded) vocabulary size of tokenizer. Configured after launching of training, + as it's dependent on the parallelism size. + """ + + tokenizer = None + """ + tokenizer object loaded into memory and accessible by other functions + """ + + +@dataclass +class NeoXArgsTraining(NeoXArgsTemplate): + """ + Training Arguments + """ + + data_path: str = None + """ + Path to combined dataset to split. + """ + + use_shared_fs: bool = True + """ + Whether to use a shared filesystem for data loading. If False, local rank 0 on all nodes will preprocess the data, + otherwise only global rank 0 will preprocess the data. This is implemented in megatron/data/gpt2_dataset.py::_build_index_mappings. + """ + + train_data_paths: list = None + """ + List of paths to train datasets. + """ + + train_label_data_paths: list = None + """ + List of paths to train label datasets (not shifted by 1 yet!). + """ + + train_reward_data_paths: list = None + """ + List of paths to train reward datasets + """ + + test_data_paths: list = None + """ + List of paths to test datasets. + """ + + test_label_data_paths: list = None + """ + List of paths to test label datasets (not shifted by 1 yet!). + """ + + test_reward_data_paths: list = None + """ + List of paths to test reward datasets + """ + + valid_data_paths: list = None + """ + List of paths to validation datasets. + """ + + valid_label_data_paths: list = None + """ + List of paths to validation label datasets (not shifted by 1 yet!). + """ + + valid_reward_data_paths: list = None + """ + List of paths to validation reward datasets + """ + + pos_train_data_paths: list = None + neg_train_data_paths: list = None + """ + List of paths to positive and negative training datasets. + """ + + pos_train_label_data_paths: list = None + neg_train_label_data_paths: list = None + """ + List of paths to positive and negative training label datasets (not shifted by 1 yet!). + """ + + pos_valid_data_paths: list = None + neg_valid_data_paths: list = None + """ + List of paths to positive and negative validation datasets. + """ + + pos_valid_label_data_paths: list = None + neg_valid_label_data_paths: list = None + """ + List of paths to positive and negative validation label datasets (not shifted by 1 yet!). + """ + + pos_test_data_paths: list = None + neg_test_data_paths: list = None + """ + List of paths to positive and negative test datasets. + """ + + pos_test_label_data_paths: list = None + neg_test_label_data_paths: list = None + """ + List of paths to positive and negative test label datasets (not shifted by 1 yet!). + """ + + train_data_weights: list = None + """ + List of 'weights' that decide how often to sample from each training dataset when blending datasets. If None, defaults to equal weighting. + Should be a list the same length as `train_data_paths` + """ + + valid_data_weights: list = None + """ + List of 'weights' that decide how often to sample from each validation dataset when blending datasets. If None, defaults to equal weighting. + Should be a list the same length as `valid_data_paths` + """ + + test_data_weights: list = None + """ + List of 'weights' that decide how often to sample from each test dataset when blending datasets. If None, defaults to equal weighting. + Should be a list the same length as `test_data_paths` + """ + + weight_by_num_documents: bool = False + """ + If True, Builds dataset weights from a multinomial distribution over groups of data according to the number of + documents in each group. + + WARNING: setting this to True will override any user provided weights + + We sample from a group according to the probability p(L) ∝ |L| ** α, + where p(L) is the probability of sampling from a given group, + |L| is the number of examples in that datapoint, + and α is a coefficient that acts to upsample data from underrepresented groups + + Hence α (`alpha`) allows us to control how much to 'boost' the probability of training on low-resource groups. + + See https://arxiv.org/abs/1911.02116 for more details + """ + + weighted_sampler_alpha: float = 1.0 + """ + Alpha value for `weight_by_num_documents`. Only has an effect if `weight_by_num_documents` = True. + + when alpha = 1, the probability of sampling from a given group = n_samples / total_samples + as alpha -> 0, the probability of sampling from all groups becomes equal, and number of documents has no effect + as alpha -> inf, the probability of sampling from the groups with *the most samples* -> 1 + """ + + data_impl: Literal["infer", "mmap", "cached"] = "infer" + """ + Implementation of indexed datasets, can be one of "infer", "cached", or "mmap" + """ + + pack_impl: Literal["packed", "pack_until_overflow", "unpacked"] = "packed" + """ + Packing implementation, can be one of "packed", "pack_until_overflow", or "unpacked". + + warning: pack_until_overflow is very naive and will likely have issues with pretraining scale datasets + """ + + dataset_impl: Literal["gpt2", "pairwise"] = "gpt2" + """ + Dataset implementation, can be one of "gpt2" or "pairwise" + """ + + train_impl: Literal["normal", "dpo", "rm", "kto"] = "normal" + """ + Training implementation, can be one of "normal", "dpo", "kto", or "rm" + """ + + dpo_fp32: bool = True + """ + Whether to cast logits to fp32 for DPO loss calculation. + """ + + dpo_reference_free: bool = False + """ + Whether to use reference-free DPO. + """ + + dpo_beta: float = 0.1 + """ + Beta value for DPO + """ + + kto_fp32: bool = True + """ + Whether to cast logits to fp32 for KTO loss calculation. + """ + + kto_desirable_weight: float = 1.0 + """ + Weight for desirable loss in KTO. Might help if you have unbalanced desirable and undesirable classes. + """ + + kto_undesirable_weight: float = 1.0 + """ + Weight for undesirable loss in KTO. Might help if you have unbalanced desirable and undesirable classes. + """ + + z_loss: float = 0.0 + """ + Z-loss parameter, only implemented for RM training currently. + https://arxiv.org/pdf/2204.02311 + https://arxiv.org/pdf/2309.10305 + """ + + kto_beta: float = 0.1 + """ + Beta value for KTO + """ + + allow_chopped: bool = True + """ + WARNING: if your packing impl is packed, this is ignored. + + Allow chopped samples in the dataset. + (e.g if your sequence length is 1024 and you have a sample of length 1026, it will be chopped to 1024) + """ + + mmap_warmup: bool = False + """ + Warm up mmap files. + """ + + save: str = None + """ + Output directory to save checkpoints to. + """ + + s3_path: str = None + """ + Path to s3 bucket for saving checkpoints. + """ + + s3_chunk_size: int = 104_857_600 + """ + The number of bytes in each file chunk when uploading to s3. Defaults to 100MiB. + """ + + config_files: dict = None + """ + Store of original config files mapping config filename to file contents + """ + + load: str = None + """ + Directory containing a model checkpoint. + """ + + checkpoint_validation_with_forward_pass: bool = False + """ + save input and output of a forward pass with the checkpoint and validate after load + """ + + checkpoint_scale: Literal["linear", "log"] = "linear" + """ + How step at which checkpoints are saved should scale. "linear" implies 1 checkpoint will be saved at every multiple of `checkpoint-factor`, + while "log" implies that the number of steps between each checkpoint will be multiplied by `checkpoint-factor` at each step, starting from step 1. + """ + + checkpoint_factor: Union[int, float] = None + """ + Acts as a multiplier on either the "log" or "linear" checkpoint spacing. + + With `checkpoint-scale="linear"`, `checkpoint-factor=20`, and `train-iters=100`, checkpoints will be saved at + steps [20, 40, 60, 80, 100]. + + With `checkpoint-scale="log"`, `checkpoint-factor=2`, and `train-iters=100`, checkpoints will be saved at + steps [1, 2, 4, 8, 16, 32, 64, 100]. + + Note that the last checkpoint step is always saved. + """ + + extra_save_iters: list = None + """ + Additional iterations when a checkpoint should be saved. + Must be a list of ints or `None`. + """ + + no_save_optim: bool = False + """ + Do not save current optimizer. + """ + + no_save_rng: bool = False + """ + Do not save current rng state. + """ + + no_load_optim: bool = False + """ + Do not load optimizer when loading checkpoint. + """ + + no_load_rng: bool = False + """ + Do not load rng state when loading checkpoint. + """ + + finetune: bool = False + """ + Load model for finetuning. Do not load optimizer or rng state from checkpoint and set iteration to 0. Assumed when loading a release checkpoint. + """ + + batch_size: int = None + """ + training microbatch size per gpu + """ + + train_iters: int = None + """ + Number of iterations to run for training. + """ + + train_epochs: int = None + """ + Number of epochs to run for training. Do not specify both train_epochs and train_iters. + Not currently compatible with data reweighing, pairwise datasets, and packing other than 'packed' + """ + + eval_iters: int = 100 + """ + Number of iterations to run for evaluation validation/test for. + """ + + keep_last_n_checkpoints: int = None + """ + Number of last checkpoints to keep + """ + + eval_interval: int = 1000 + """ + Interval between running evaluation on validation set. + """ + + split: str = "969, 30, 1" + """ + Comma_separated list of proportions for training, validation, and test split. For example the split 90,5,5 will use 90% of data for training, 5% for validation and 5% for test. + """ + + vocab_file: str = None + """ + Path to the vocab file. + """ + + merge_file: str = None + """ + Path to the BPE merge file. + """ + + num_workers: int = 2 + """ + Dataloader number of workers. + """ + + exit_interval: int = None + """ + Exit the program after the iteration is divisible by this value. + """ + + attention_dropout: float = 0.0 + """ + Post attention dropout probability. + """ + + hidden_dropout: float = 0.0 + """ + Dropout probability for hidden state transformer. + """ + + weight_decay: float = 0.1 + """ + Weight decay coefficient for L2 regularization. + """ + + checkpoint_activations: bool = False + """ + Checkpoint activation to allow for training with larger models, sequences, and batch sizes. + """ + + checkpoint_num_layers: int = 1 + """ + Chunk size (number of layers) for checkpointing. + """ + + deepspeed_activation_checkpointing: bool = True + """ + DEPRECATED - TODO: remove + Uses activation checkpointing from deepspeed + """ + + contiguous_checkpointing: bool = False + """ + Contiguous memory checkpointing for activations. + """ + + checkpoint_in_cpu: bool = False + """ + Move the activation checkpoints to CPU. + """ + + synchronize_each_layer: bool = False + """ + does a synchronize at the beginning and end of each checkpointed layer. + """ + + profile_backward: bool = False + """ + Enables backward pass profiling for checkpointed layers. + """ + + partition_activations: bool = False + """ + Partition Activations across GPUs before checkpointing. + """ + + clip_grad: float = 1.0 + """ + Gradient clipping based on global L2 norm. + """ + + hysteresis: int = 2 + """ + hysteresis for dynamic loss scaling + """ + + dynamic_loss_scale: bool = None + """ + flag indicating whether dynamic loss scale is used + """ + + loss_scale: float = None + """ + Static loss scaling, positive power of 2 + values can improve fp16 convergence. If None, dynamic loss scaling is used. + """ + + loss_scale_window: float = 1000.0 + """ + Window over which to raise/lower dynamic scale. + """ + + min_scale: float = 1.0 + """ + Minimum loss scale for dynamic loss scale. + """ + + char_level_ppl: bool = False + """ + Whether to calculate character level perplexity as well as token level perplexity. (may incur a time cost) + """ + + use_mup: bool = False + """ + Whether to use Microsoft's Mup https://github.com/microsoft/mup + """ + + coord_check: bool = False + """ + Whether to generate a "coord check" plot to verify mup's implementation in neox + """ + + save_base_shapes: bool = False + """ + Whether to save base shapes for mup. This will save the shapes to the path specified in base-shapes-file. + """ + + base_shapes_file: str = None + """ + Path to the base shapes to save to/load from + """ + + mup_init_scale: float = 1.0 + """ + Initialization scale: All the parameters are multiplied by this value + """ + + mup_attn_temp: float = 1.0 + """ + Attention temperature: Reciprocal of the multiplier applied to the input to attention softmax + """ + + mup_output_temp: float = 1.0 + """ + Output temperature: Reciprocal of the multiplier applied to the input to softmax that + produces the distribution over output tokens. + """ + + mup_embedding_mult: float = 1.0 + """ + Scalar by which we multiply the output of the embedding layer + """ + + mup_rp_embedding_mult: float = 1.0 + """ + Scalar by which we multiply vectors representing relative position + """ + + mup_width_scale: int = 2 + """ + What to scale width by when creating the delta model for mup + """ + + +@dataclass +class NeoXArgsTextgen(NeoXArgsTemplate): + """ + Text Generation arguments + """ + + text_gen_type: str = None + """ + How to generate text/sample the model. + Options: `unconditional`, `input-file`, `interactive`, `precompute` + """ + + precompute_model_name: str = None + """ + Model name to use for saving precomputed logprobs + """ + + temperature: float = 0.0 + """ + exponential scaling output distribution ("higher == more risk") + """ + + top_p: float = 0.0 + """ + Top-p (nucleus) sampling chooses from the smallest possible set of tokens whose cumulative probability exceeds the probability top_p. + """ + + top_k: int = 0 + """ + integer between 0 and the models vocab size. Filters out any logits with a probability less than that of the top_kth token. + """ + + return_logits: bool = False + """ + Boolean for whether to return the logits for generated tokens + """ + + maximum_tokens: int = 64 + """ + maximum number of tokens to be generated + """ + + prompt_end: str = "\n" + """ + a single prompt's end. Defaults to newline + """ + + sample_input_file: str = None + """ + Get input from file instead of interactive mode, each line is an input. + """ + + sample_output_file: str = "samples.txt" + """ + Output file + """ + + num_samples: int = 1 + """ + Number of samples to generate unconditionally, defaults to 1 and interactive conditional sampling + """ + + recompute: bool = False + """ + During generation recompute all attention instead of using previously computed keys/values. + Should be set to true for sparse attention models + """ + + eval_results_prefix: str = "" + """ + prefix to which to save evaluation results - final fp will be {eval_results_prefix}_eval_results_yy-mm-dd-HH-MM.json + """ + + eval_tasks: list = None + """ + Tasks to evaluate on using lm_eval_harness + + NOTE: Requires internet connection + """ + + moe_top_k: int = 1 + """ + Activate top K experts in MoE + """ + + use_tutel: bool = False + """ + Use Tutel optimizations in MoE + """ + + moe_num_experts: int = 1 + """ + Number of MoE experts + """ + + moe_loss_coeff: float = 0.1 + """ + Coefficient for MoE loss + """ + + moe_train_capacity_factor: float = 1.0 + """ + The capacity of the expert at train time + """ + + moe_eval_capacity_factor: float = 1.0 + """ + The capacity of the expert at eval time + """ + + moe_min_capacity: int = 4 + """ + The minimum capacity per expert regardless of the capacity_factor + """ + + moe_token_dropping: bool = False + """ + Whether to drop tokens when exceeding capacity + """ + + create_moe_param_group: bool = True + """ + Whether to create a separate parameter group for MoE parameters + """ + + moe_use_residual: bool = True + """ + Whether to use residual in MoE + """ + + moe_expert_parallel_size: int = 1 + """ + Number of parallel experts in MoE + """ + + moe_type: str = "megablocks" + """ + Either `deepspeed` or `megablocks` + """ + + moe_glu: bool = False + """ + Use gated linear units in MoE + """ + + moe_lbl_in_fp32: bool = False + """ + Whether to compute the load balancing loss in fp32. + """ + + moe_jitter_eps: float = None + """ + Coefficient for MoE routing jitter. Jitter is + not used if set to None + """ + + enable_expert_tensor_parallelism: bool = False + """ + Enable expert tensor parallelism + """ diff --git a/megatron/neox_arguments/template.py b/megatron/neox_arguments/template.py new file mode 100644 index 0000000000000000000000000000000000000000..2e83419906f99f4fd88d55437395c5f429cfd136 --- /dev/null +++ b/megatron/neox_arguments/template.py @@ -0,0 +1,51 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +import logging + + +@dataclass +class NeoXArgsTemplate: + def defaults(self): + """ + generator for getting default values. + """ + for key, field_def in self.__dataclass_fields__.items(): + yield key, field_def.default + + def update_value(self, key: str, value): + """ + updates a property value if the key already exists + + Problem: a previously non-existing property can be added to the class instance without error. + """ + if hasattr(self, key): + setattr(self, key, value) + else: + error_message = ( + self.__class__.__name__ + + ".update_value() to be updated property " + + str(key) + + " does not exist" + ) + logging.error(error_message) + raise ValueError(error_message) + + def update_values(self, d): + """ + Updates multiple values in self if the keys already exists + """ + for k, v in d.items(): + self.update_value(k, v) diff --git a/megatron/optimizers.py b/megatron/optimizers.py new file mode 100644 index 0000000000000000000000000000000000000000..93515ed1432c50dadda700b5c61ddf9cedb1c851 --- /dev/null +++ b/megatron/optimizers.py @@ -0,0 +1,497 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import torch +from torch.optim import Optimizer + + +class SM3(Optimizer): + """Implements SM3 algorithm. + It has been proposed in `Memory-Efficient Adaptive Optimization`_. + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): coefficient that scale delta before it is applied + to the parameters (default: 0.1) + momentum (float, optional): coefficient used to scale prior updates + before adding. This drastically increases memory usage if + `momentum > 0.0`. This is ignored if the parameter's gradient + is sparse. (default: 0.0) + beta (float, optional): coefficient used for exponential moving + averages (default: 0.0) + eps (float, optional): Term added to square-root in denominator to + improve numerical stability (default: 1e-30) + .. _Memory-Efficient Adaptive Optimization: + https://arxiv.org/abs/1901.11150 + """ + + def __init__(self, params, lr=0.1, momentum=0.0, beta=0.0, eps=1e-30): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {0}".format(lr)) + if not 0.0 <= momentum < 1.0: + raise ValueError("Invalid momentum: {0}".format(momentum)) + if not 0.0 <= beta < 1.0: + raise ValueError("Invalid beta: {0}".format(beta)) + if not 0.0 <= eps: + raise ValueError("Invalid eps: {0}".format(eps)) + + defaults = {"lr": lr, "momentum": momentum, "beta": beta, "eps": eps} + super(SM3, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + momentum = group["momentum"] + beta = group["beta"] + eps = group["eps"] + for p in group["params"]: + if p is None: + continue + grad = p.grad + + state = self.state[p] + shape = grad.shape + rank = len(shape) + + # State initialization + if len(state) == 0: + state["step"] = 0 + state["momentum_buffer"] = 0.0 + _add_initial_accumulators(state, grad) + + if grad.is_sparse: + # the update is non-linear so indices must be unique + grad.coalesce() + grad_indices = grad._indices() + grad_values = grad._values() + + # Transform update_values into sparse tensor + def make_sparse(values): + constructor = grad.new + if grad_indices.dim() == 0 or values.dim() == 0: + return constructor().resize_as_(grad) + return constructor(grad_indices, values, grad.size()) + + acc = state[_key(0)] + update_values = _compute_sparse_update( + beta, acc, grad_values, grad_indices + ) + + self._update_sparse_accumulator( + beta, acc, make_sparse(update_values) + ) + + # Add small amount for numerical stability + update_values.add_(eps).rsqrt_().mul_(grad_values) + + update = make_sparse(update_values) + else: + # Get previous accumulators mu_{t-1} + if rank > 1: + acc_list = [state[_key(i)] for i in range(rank)] + else: + acc_list = [state[_key(0)]] + + # Get update from accumulators and gradients + update = _compute_update(beta, acc_list, grad) + + # Update accumulators. + self._update_accumulator(beta, acc_list, update) + + # Add small amount for numerical stability + update.add_(eps).rsqrt_().mul_(grad) + + if momentum > 0.0: + m = state["momentum_buffer"] + update.mul_(1.0 - momentum).add_(m, alpha=momentum) + state["momentum_buffer"] = update.detach() + + p.sub_(update, alpha=group["lr"]) + state["step"] += 1 + return loss + + @staticmethod + def _update_accumulator(beta, acc_list, update): + for i, acc in enumerate(acc_list): + nu_max = _max_reduce_except_dim(update, i) + if beta > 0.0: + torch.max(acc, nu_max, out=acc) + else: + # No need to compare - nu_max is bigger because of grad ** 2 + acc.copy_(nu_max) + + @staticmethod + def _update_sparse_accumulator(beta, acc, update): + nu_max = _max_reduce_except_dim(update.to_dense(), 0).squeeze() + if beta > 0.0: + torch.max(acc, nu_max, out=acc) + else: + # No need to compare - nu_max is bigger because of grad ** 2 + acc.copy_(nu_max) + + +def _compute_sparse_update(beta, acc, grad_values, grad_indices): + # In the sparse case, a single accumulator is used. + update_values = torch.gather(acc, 0, grad_indices[0]) + if beta > 0.0: + update_values.mul_(beta) + update_values.addcmul_(grad_values, grad_values, value=1.0 - beta) + return update_values + + +def _compute_update(beta, acc_list, grad): + rank = len(acc_list) + update = acc_list[0].clone() + for i in range(1, rank): + # We rely on broadcasting to get the proper end shape. + update = torch.min(update, acc_list[i]) + if beta > 0.0: + update.mul_(beta) + update.addcmul_(grad, grad, value=1.0 - beta) + + return update + + +def _key(i): + # Returns key used for accessing accumulators + return "accumulator_" + str(i) + + +def _add_initial_accumulators(state, grad): + # Creates initial accumulators. For a dense tensor of shape (n1, n2, n3), + # then our initial accumulators are of shape (n1, 1, 1), (1, n2, 1) and + # (1, 1, n3). For a sparse tensor of shape (n, *), we use a single + # accumulator of shape (n,). + shape = grad.shape + rank = len(shape) + defaults = {"device": grad.device, "dtype": grad.dtype} + acc = {} + + if grad.is_sparse: + acc[_key(0)] = torch.zeros(shape[0], **defaults) + elif rank == 0: + # The scalar case is handled separately + acc[_key(0)] = torch.zeros(shape, **defaults) + else: + for i in range(rank): + acc_shape = [1] * i + [shape[i]] + [1] * (rank - 1 - i) + acc[_key(i)] = torch.zeros(acc_shape, **defaults) + + state.update(acc) + + +def _max_reduce_except_dim(tensor, dim): + # Computes max along all dimensions except the given dim. + # If tensor is a scalar, it returns tensor. + rank = len(tensor.shape) + result = tensor + if rank > 0: + assert dim < rank + for d in range(rank): + if d != dim: + result = result.max(dim=d, keepdim=True).values + return result + + +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +# modifications - 4/4/2021 @lessw2020 (decay issue spotted by @nestordemeure ) +# weight decay has been implemented AdamW style instead of the original madgrad Adam style. +# in initial image classification testing, this outperformed 0 weight decay or original style weight decay. + +# closure is checked if callable or not since some code passes loss directly, rather than in closure param + +import math +from typing import Collection, TYPE_CHECKING, Any, Callable, Optional, Tuple + +import torch +import torch.optim +import collections + +if TYPE_CHECKING: + from torch.optim.optimizer import _params_t +else: + _params_t = Any + + +class madgrad_wd(torch.optim.Optimizer): + """ + MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic + Optimization. + + .. _MADGRAD: https://arxiv.org/abs/2101.11075 + + MADGRAD is a general purpose optimizer that can be used in place of SGD or + Adam may converge faster and generalize better. Currently GPU-only. + Typically, the same learning rate schedule that is used for SGD or Adam may + be used. The overall learning rate is not comparable to either method and + should be determined by a hyper-parameter sweep. + + MADGRAD requires less weight decay than other methods, often as little as + zero. Momentum values used for SGD or Adam's beta1 should work here also. + + On sparse problems both weight_decay and momentum should be set to 0. + + Arguments: + params (iterable): + Iterable of parameters to optimize or dicts defining parameter groups. + lr (float): + Learning rate (default: 1e-2). + momentum (float): + Momentum value in the range [0,1) (default: 0.9). + weight_decay (float): + Weight decay, i.e. a L2 penalty (default: 0). + eps (float): + Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6). + """ + + def __init__( + self, + params: _params_t, + lr: float = 1e-2, + momentum: float = 0.9, + weight_decay: float = 0, + eps: float = 1e-6, + ): + if momentum < 0 or momentum >= 1: + raise ValueError(f"Momentum {momentum} must be in the range [0,1]") + if lr <= 0: + raise ValueError(f"Learning rate {lr} must be positive") + if weight_decay < 0: + raise ValueError(f"Weight decay {weight_decay} must be non-negative") + if eps < 0: + raise ValueError(f"Eps must be non-negative") + + defaults = dict(lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay) + super().__init__(params, defaults) + + @property + def supports_memory_efficient_fp16(self) -> bool: + return False + + @property + def supports_flat_params(self) -> bool: + return True + + def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None and isinstance(closure, collections.Callable): + loss = closure() + + # step counter must be stored in state to ensure correct behavior under + # optimizer sharding + if "k" not in self.state: + self.state["k"] = torch.tensor([0], dtype=torch.long) + k = self.state["k"].item() + + for group in self.param_groups: + eps = group["eps"] + lr = group["lr"] + eps + decay = group["weight_decay"] + momentum = group["momentum"] + + ck = 1 - momentum + lamb = lr * math.pow(k + 1, 0.5) + + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad.data + state = self.state[p] + + if "grad_sum_sq" not in state: + state["grad_sum_sq"] = torch.zeros_like(p.data).detach() + state["s"] = torch.zeros_like(p.data).detach() + if momentum != 0: + state["x0"] = torch.clone(p.data).detach() + + if momentum != 0.0 and grad.is_sparse: + raise RuntimeError( + "momentum != 0 is not compatible with sparse gradients" + ) + + grad_sum_sq = state["grad_sum_sq"] + s = state["s"] + + # Apply weight decay - L2 / AdamW style + if decay: + p.data.mul_(1 - lr * decay) + + """ original impl: + if decay != 0: + if grad.is_sparse: + raise RuntimeError("weight_decay option is not compatible with sparse gradients") + + grad.add_(p.data, alpha=decay) + """ + + if grad.is_sparse: + grad = grad.coalesce() + grad_val = grad._values() + + p_masked = p.sparse_mask(grad) + grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad) + s_masked = s.sparse_mask(grad) + + # Compute x_0 from other known quantities + rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps) + x0_masked_vals = p_masked._values().addcdiv( + s_masked._values(), rms_masked_vals, value=1 + ) + + # Dense + sparse op + grad_sq = grad * grad + grad_sum_sq.add_(grad_sq, alpha=lamb) + grad_sum_sq_masked.add_(grad_sq, alpha=lamb) + + rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps) + + s.add_(grad, alpha=lamb) + s_masked._values().add_(grad_val, alpha=lamb) + + # update masked copy of p + p_kp1_masked_vals = x0_masked_vals.addcdiv( + s_masked._values(), rms_masked_vals, value=-1 + ) + # Copy updated masked p to dense p using an add operation + p_masked._values().add_(p_kp1_masked_vals, alpha=-1) + p.data.add_(p_masked, alpha=-1) + else: + if momentum == 0: + # Compute x_0 from other known quantities + rms = grad_sum_sq.pow(1 / 3).add_(eps) + x0 = p.data.addcdiv(s, rms, value=1) + else: + x0 = state["x0"] + + # Accumulate second moments + grad_sum_sq.addcmul_(grad, grad, value=lamb) + rms = grad_sum_sq.pow(1 / 3).add_(eps) + + # Update s + s.data.add_(grad, alpha=lamb) + + # Step + if momentum == 0: + p.data.copy_(x0.addcdiv(s, rms, value=-1)) + else: + z = x0.addcdiv(s, rms, value=-1) + + # p is a moving average of z + p.data.mul_(1 - ck).add_(z, alpha=ck) + + self.state["k"] += 1 + return loss + + +class Lion(Optimizer): + """ + Implements the Lion Algorithm + + .. / _Lion: https://arxiv.org/abs/2302.06675 + + Compared to AdamW and various adaptive optimizers that need to save both first and second moments, + Lion only needs the momentum, halving the additional memory footprint. This is beneficial when training large models + and / or with a large batch size. + + Arguments: + params (iterable): + Iterable of parameters to optimize or dicts defining parameter groups. + lr (float): + Learning rate (default: 1e-2). + beta (float): + coefficients used for computing running averages of gradient and its square (default: (0.9, 0.99)) + weight_decay (float): + Weight decay, i.e. a L2 penalty (default: 0). + + """ + + def __init__( + self, + params, + lr: float = 1e-4, + betas: Tuple[float, float] = (0.9, 0.99), + weight_decay: float = 0.0, + ): + if lr <= 0: + raise ValueError(f"Learning rate {lr} must be positive") + if weight_decay < 0: + raise ValueError(f"Weight decay {weight_decay} must be non-negative") + if not (0 <= betas[0] <= 1 and 0 <= betas[1] <= 1): + raise ValueError(f"Betas {betas} must be in range [0, 1)") + + defaults = dict(lr=lr, betas=betas, weight_decay=weight_decay) + super().__init__(params, defaults) + + def update(self, p, grad, exp_avg, lr, wd, beta1, beta2): + """https://arxiv.org/pdf/2302.06675.pdf#appendix.A""" + + # update model parameters + p.mul_(1 - lr * wd) + sign = exp_avg.clone().mul_(beta1).add(grad, alpha=1 - beta1).sign_() + p.add_(sign, alpha=-lr) + + # update EMA + exp_avg.mul_(beta2).add_(grad, alpha=1 - beta2) + + @torch.no_grad() + def step(self, closure: Optional[Callable] = None): + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group["params"]: + if p.grad is None: + continue + + state = self.state[p] + + # init state - exponential moving average of gradient values + if len(state) == 0: + state["exp_avg"] = torch.zeros_like(p.data).detach() + + self.update( + p, + p.grad, + state["exp_avg"], + group["lr"], + group["weight_decay"], + group["betas"][0], + group["betas"][1], + ) + + return loss diff --git a/megatron/text_generation_utils.py b/megatron/text_generation_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..293cbaabc0b37ca9d3e6266e361e4f1325f5089b --- /dev/null +++ b/megatron/text_generation_utils.py @@ -0,0 +1,1000 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for generating text.""" + +import copy +import json +import math +import os +import time +from typing import List, Union + +import numpy as np +import torch +import torch.nn.functional as F + +from megatron import print_rank_0 +from megatron import mpu +from megatron.utils import get_ltor_masks_and_position_ids, is_mp_rank_0 +from megatron.data.indexed_dataset import make_builder, make_dataset +from megatron.mpu.mappings import gather_from_model_parallel_region + + +def get_batch(neox_args, context_tokens: torch.Tensor): + """ + Generate batch from context tokens. Attention mask and position ids are created. Returned tensors will be on CUDA. + + neox_args: NeoXArgs. + context_tokens: torch tensor with dimensions [batch, context_size] + + returns: tuple of torch tensors (tokens, attention_mask, position_ids) on CUDA + """ + + # Move to GPU. + tokens = context_tokens.contiguous().cuda() + # Get the attention mask and position ids. + attention_mask, _, position_ids = get_ltor_masks_and_position_ids( + data=tokens, + eod_token=neox_args.tokenizer.eod, + eod_mask_loss=neox_args.eod_mask_loss, + ) + return tokens, attention_mask, position_ids + + +def pad_batch( + context_tokens: List[List[int]], pad_id: int, pad_len: int, truncate: bool = False +): + """ + pads context lengths in context_tokens with pad_id to equal neox_args.seq_length, + and returns the padded batch and the new lengths. + + context_tokens: list of lists of tokens + pad_id: int, integer to use as padding token + pad_len: int, context length to be padded; all batch items will be padded to the same length + truncate: bool, if True, truncate context tokens to pad_len if they are longer than pad_len + + returns: tuple of padded context tokens and a list of unpadded token count + """ + + context_lengths = [] + for i, tokens in enumerate(context_tokens): + context_length = len(tokens) + if context_length < pad_len: + tokens.extend([pad_id] * (pad_len - context_length)) + elif context_length > pad_len: + if not truncate: + raise ValueError("context_length is bigger than to be padded length") + context_tokens[i] = tokens[:pad_len] + context_length = pad_len + context_lengths.append(context_length) + return context_tokens, context_lengths + + +def filter_logits(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")): + """ + Filters the logits using top_k / top_p, filling any filtered vocab items with filter_value (defaults to -inf). + + This function has been mostly taken from huggingface conversational ai code at + https://medium.com/huggingface/how-to-build-a-state-of-the-art-conversational-ai-with-transfer-learning-2d818ac26313 + + When both top_k and top_p are specified, tokens are first filtered according to top_k, renormalized, and then filtered according to top_p. + + logits: torch.Tensor -> logits of megatron model. + top_k: integer -> integer between 0 and the models vocab size. Filters out any logits with a probability less than that of the top_kth token. + top_p: float -> Top-p (nucleus) sampling chooses from the smallest possible set of tokens whose cumulative probability exceeds the probability top_p. + + returns: (filtered) logits""" + + if top_k > 0: + # Remove all tokens with a probability less than the + # last token of the top-k + indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] + logits[indices_to_remove] = filter_value + + if top_p > 0.0: + # convert to 1D + sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1) + cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) + + # Remove tokens with cumulative probability above the threshold + sorted_indices_to_remove = cumulative_probs > top_p + # Shift the indices to the right to keep also the first token + # above the threshold + sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() + sorted_indices_to_remove[..., 0] = 0 + for i in range(sorted_indices.size(0)): + indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]] + logits[i][indices_to_remove] = filter_value + + return logits + + +def switch(val1, val2, boolean): + """ + replaces items in val1 with items in val2 where boolean = True + """ + boolean = boolean.type_as(val1) + return (1 - boolean) * val1 + boolean * val2 + + +def forward_model(model, model_inputs, is_pipe_parallel=False) -> torch.Tensor: + """ + Runs model.forward(model_inputs) + + We need to create a wrapper for this function because deepspeed pipe parallel modules operate differently to normal models. + + model: a Megatron model. + model_inputs: tuple containing model args + + returns: torch.Tensor containing the logits of the model + """ + # because someone at deepspeed decided pipeline modules couldn't use kwargs, + # we need to forward a pipe model differently to a normal model + if not is_pipe_parallel: + return model.module(model_inputs) + else: + # we need to format inputs this way because: + # a) deepspeed pipeline only accepts iterables + # b) deepspeed pipeline *requires* that you pass in labels for the loss, it's not easy to get around this + # so we wrap the inputs in an iterable, and pad them (because internally, we get labels as inputs[:, 1:] and inputs as inputs[:, :-1]) + model_inputs = iter([{"text": F.pad(model_inputs[0], pad=(0, 1))}]) + + # set num microbatches to 1 at inference time + micro_batches_before = model.micro_batches + model.micro_batches = 1 + + # deepspeed sends metadata across pipeline stages only once in the first step, then assumes it will stay + # constant. In inference, the metadata of the tensors being sent across pipe stages may change, so we need to set + # these two flags in order for deepspeed to send the metadata every step, otherwise torch.distributed hangs + # silently. Fun stuff. + model.first_output_send = True + model.pipe_recv_buf = None + + loss, logits = model.eval_batch(model_inputs, return_logits=True) + model.micro_batches = micro_batches_before + return logits + + +def broadcast_terminate_signal(terminate_runs: int): + """Send signal to all workers to terminate if we've finished the process""" + terminate_runs_tensor = torch.cuda.LongTensor([terminate_runs]) + torch.distributed.broadcast( + terminate_runs_tensor, + mpu.get_model_parallel_src_rank(), + group=mpu.get_model_parallel_group(), + ) + return terminate_runs_tensor[0].item() + + +def stop_tokens_in_completion(stop_tokens, context_tokens, batch_index, current_index): + if stop_tokens is None: + return False + res = [] + for token_group in stop_tokens: + context = context_tokens[batch_index, : current_index + 1] + context = context[-len(token_group) :] + if context.shape[0] == token_group.shape[0]: + res.append(all(token_group == context)) + else: + res.append(False) + return any(res) + + +def stream_tokens( + neox_args, + model, + context_tokens: List[List[int]], + eos_token_id: int = None, + maximum_tokens: int = None, + recompute: bool = False, + temperature: float = 0.0, + top_k: int = 0, + top_p: float = 0.0, + stop_tokens=None, +): + """ + iterator producing text completions + + neox_args: NeoXArgs. + model: a Megatron model. + context_tokens: the prompt to complete; unpadded list of lists of tokens ids + context_lengths: lengths of context tokens of dimension [batch]; the context length records for each bach item how many non-padded tokens are provided + eos_token_id: end of text token at which completion is terminated, even if max_tokes count has not been reached + attention_mask: attention mask for megatron model. + position_ids: position ids for positional encoding. + maximum_tokens: maximum number of tokens to be generated; careful! if a batch input is provided maximum_tokens specifies the maximum number of forwards. + longer batch items get less generated tokens. + recompute: flag indicating whether a cache is used for already forwarded tokens (true) or whether all tokens are recomputed at every iteration (false) + temperature (default 0.0): exponential scaling output distribution ("higher == more risk") + top_k (default 0): integer -> integer between 0 and the models vocab size. Filters out any logits with a probability less than that of the top_kth token. + top_p (default 0.0): float -> Top-p (nucleus) sampling chooses from the smallest possible set of tokens whose cumulative probability exceeds the probability top_p. + note: greedy decoding is used if temperature is 0.0, top_k is 0 and top_p is 0.0 + yields: ( + tokens (completions from model), + token_generation_start_index (token index per batch item for the first generated token), + token_generation_end_index (token index per batch item for the last generated token), + logits (logits which are so far computed, zeros otherwise), + is_done (flag for each bach item indicating whether an eod token was generated) + ) + + * each iteration adds a generated token to the context_tokens + * output contains both context_tokens from input and generated tokens + * if batch items have different lengths, the iterator will start at the first completion and return the unchanged input context token otherwise + """ + + model.eval() + + # pad batch in order to allow conversion to tensor + context_tokens, context_lengths = pad_batch( + copy.deepcopy(context_tokens), + pad_id=neox_args.tokenizer.eod, + pad_len=neox_args.seq_length, + ) + + # convert to tensor and broadcast + context_tokens = torch.cuda.LongTensor(context_tokens) + if stop_tokens: + if len(stop_tokens) > 0 and type(stop_tokens[0]) is not list: + stop_tokens = [stop_tokens] + for i in range(0, len(stop_tokens)): + stop_tokens[i] = torch.cuda.LongTensor(stop_tokens[i]) + + # Make sure context tokens + start tokens are the same across all ranks + token_generation_start_index = torch.cuda.LongTensor(context_lengths) + torch.distributed.broadcast( + context_tokens, + mpu.get_model_parallel_src_rank(), + group=mpu.get_model_parallel_group(), + ) + torch.distributed.broadcast( + token_generation_start_index, + mpu.get_model_parallel_src_rank(), + group=mpu.get_model_parallel_group(), + ) + + # get attention mask / position ids + context_tokens, attention_mask, position_ids = get_batch(neox_args, context_tokens) + + # set variables + eos_token_id = eos_token_id or neox_args.tokenizer.eod + maximum_tokens = maximum_tokens or ( + neox_args.seq_length - token_generation_start_index.max().item() - 1 + ) + batch_size = context_tokens.size(0) + + # get the context_index at which generation is to start + # we start generation at the position where the smallest context ends + token_index_to_generate = token_generation_start_index.min().item() + first_token_index_to_generate = token_index_to_generate + last_token_index_to_generate = min( + neox_args.seq_length + - 1, # never generate more than the model's sequence length + token_index_to_generate + maximum_tokens - 1, + ) + + with torch.no_grad(): + # initialize generation variables + state_is_done = torch.zeros([batch_size]).byte().cuda() + token_generation_end_index = torch.ones([batch_size]).long().cuda() * (-1) + generation_logits = ( + torch.empty(maximum_tokens, neox_args.padded_vocab_size).float().cuda() + ) + + while token_index_to_generate <= last_token_index_to_generate: + if recompute: # recompute all tokens + model_inputs = ( + context_tokens, + position_ids, + attention_mask, + ) + logits = forward_model(model, model_inputs, neox_args.is_pipe_parallel) + if logits is not None: # if pipe parallel, not all ranks return logits + generated_token_logits = logits[ + :, token_index_to_generate - 1, : + ] # [bs, seq, vocab_size] -> [bs, vocab_size] + else: # use kv cache + if token_index_to_generate == first_token_index_to_generate: + tokens_to_use = context_tokens[:, :token_index_to_generate] + positions_to_use = position_ids[:, :token_index_to_generate] + else: + tokens_to_use = context_tokens[:, token_index_to_generate - 1].view( + batch_size, -1 + ) + positions_to_use = position_ids[ + :, token_index_to_generate - 1 + ].view(batch_size, -1) + + model_inputs = ( + tokens_to_use, # input_ids + positions_to_use, # position_ids + attention_mask, # attention_mask + ) + + logits = forward_model(model, model_inputs, neox_args.is_pipe_parallel) + if logits is not None: # if pipe parallel, not all ranks return logits + generated_token_logits = ( + logits[:, -1].view(batch_size, -1).contiguous() + ) # [bs, seq, vocab_size] -> [bs, vocab_size] + + if logits is not None: + # sample token id of the to be generated token + if temperature == 0.0 and top_k == 0 and top_p == 0.0: + generated_tokens = torch.argmax( + generated_token_logits, dim=-1 + ).view(-1) + else: + generated_token_logits = generated_token_logits.float() + if temperature > 0.0: + generated_token_logits /= temperature + generated_token_logits = filter_logits( + generated_token_logits, top_k=top_k, top_p=top_p + ) + next_token_log_probs = F.softmax(generated_token_logits, dim=-1) + generated_tokens = torch.multinomial( + next_token_log_probs, num_samples=1 + ).view(-1) + + if neox_args.return_logits: + generation_logits[ + token_index_to_generate - 1 + ] = generated_token_logits[0] + + if neox_args.is_pipe_parallel: + # broadcast generated tokens to pipe parallel group + src_rank = model.grid.stage_to_global(model.num_stages - 1) + generated_tokens = ( + generated_tokens + if logits is not None + else torch.zeros(batch_size, dtype=torch.long).cuda() + ) + torch.distributed.broadcast( + tensor=generated_tokens, + src=src_rank, + group=mpu.get_pipe_parallel_group(), + ) + + # determine if state has started for each batch item + state_started = ( + token_generation_start_index <= token_index_to_generate + ) # check which batch items have been started + + # switch out padding tokens for generated tokens + context_tokens[:, token_index_to_generate] = switch( + context_tokens[:, token_index_to_generate].view(-1), + generated_tokens, + state_started, + ) + + # determine if state has finished for each batch item + state_done = ( + generated_tokens == eos_token_id + ).byte() & state_started.byte() # check which batch items produce an eos_token in the current iteration + state_just_finished = (state_done & ~state_is_done).bool() + state_is_done = state_is_done | state_done + stop_tokens_produced = torch.zeros_like(state_is_done) + for batch_idx, ctx in enumerate(context_tokens): + stop_tokens_produced[batch_idx] = stop_tokens_in_completion( + stop_tokens, context_tokens, batch_idx, token_index_to_generate + ) + state_is_done = state_is_done | stop_tokens_produced + + token_generation_end_index[ + (state_started.byte() & ~state_is_done).bool() + ] = token_index_to_generate + + token_index_to_generate += 1 + + yield context_tokens, token_generation_start_index, token_generation_end_index, generation_logits, state_is_done.bool() + if torch.all(state_is_done): + break + + +def generate_samples_from_prompt( + neox_args, + model, + text: Union[List[str], str], + eos_token_id: int = None, + maximum_tokens: int = 64, + recompute: bool = False, + temperature: float = 0.0, + top_k: int = 0, + top_p: float = 0.0, + stop_tokens=None, +): + """ + Generates samples from raw text and returns them in a dictionary. + + neox_args: NeoXArgs. + model: a Megatron model + text: either a single prompt (str) or a list of prompts (List[str]). + + eos_token_id: end of text token at which completion is terminated, even if max_tokes count has not been reached + maximum_tokens: maximum number of tokens to be generated + + recompute: flag indicating whether a cache is used for already forwarded tokens (true) or whether all tokens are recomputed at every iteration (false) + + temperature (default 0.0): exponential scaling output distribution ("higher == more risk") + top_k (default 0): integer -> integer between 0 and the models vocab size. Filters out any logits with a probability less than that of the top_kth token. + top_p (default 0.0): float -> Top-p (nucleus) sampling chooses from the smallest possible set of tokens whose cumulative probability exceeds the probability top_p. + note: greedy decoding is used if temperature is 0.0, top_k is 0 and top_p is 0.0 + + returns: List[dict] -> a list of dicts containing the following fields: + - 'context' (the input) + - 'text' (the completion) + - 'length' (the length of the completion in number of tokens) + - 'finished': + - 'message': a messaged associated with the generation procedure, can be a warning or error + - 'duration_seconds': duration of the generation in seconds + + """ + eos_token_id = eos_token_id or neox_args.tokenizer.eod + + # type check + assert any( + [isinstance(text, str), isinstance(text, list)] + ), "Text should be in string or list form" + if isinstance(text, str): + text = [text] + + input_count = len(text) + input_pos = 0 + + # generate completions + generated_texts = [] + while True: + + start_time = time.time() + # Tokenize text, and check whether we should terminate process + terminate_runs = 0 + if input_pos == input_count: + terminate_runs = 1 + else: + raw_text = text[input_pos] + input_pos += 1 + + if raw_text == "": + context_tokens = [eos_token_id] + else: + context_tokens = neox_args.tokenizer.tokenize(raw_text) + context_length = len(context_tokens) + + if context_length >= (neox_args.seq_length // 2): + print_rank_0( + "\nWarning! Context length", + context_length, + "\nPlease give smaller context (e.g. half of the " + "max sequence length)!", + ) + if not is_mp_rank_0(): + context_tokens = neox_args.tokenizer.tokenize("EMPTY TEXT") + context_length = len(context_tokens) + terminate_runs = 0 + + terminate_runs = broadcast_terminate_signal(terminate_runs) + if terminate_runs == 1: + return generated_texts + + for ( + batch_context_tokens, + batch_token_generation_start_index, + batch_token_generation_end_index, + batch_generated_token_logits, + is_done, + ) in stream_tokens( + neox_args=neox_args, + model=model, + context_tokens=[context_tokens], + eos_token_id=eos_token_id, + maximum_tokens=maximum_tokens, + recompute=recompute, + temperature=temperature, + top_k=top_k, + top_p=top_p, + stop_tokens=stop_tokens, + ): + pass # finish generation and use all results below + + batch_context_tokens = batch_context_tokens.cpu().numpy().tolist() + batch_token_generation_start_index = ( + batch_token_generation_start_index.cpu().numpy().tolist() + ) + batch_token_generation_end_index = ( + batch_token_generation_end_index.cpu().numpy().tolist() + ) + batch_is_done = is_done.cpu().numpy().tolist() + + for tokens, start_index, end_index, is_done in zip( + batch_context_tokens, + batch_token_generation_start_index, + batch_token_generation_end_index, + batch_is_done, + ): + + if end_index >= start_index: + generated_tokens = tokens[start_index : end_index + 1] + try: + generated_text = neox_args.tokenizer.detokenize(generated_tokens) + message = None + except KeyError: + generated_text = None + message = "WARNING: generated token which doesn't exist." + else: + generated_text = None + generated_tokens = [] + # this will happen if the first generated token is a stop token or eos token + message = "WARNING: text generation did not start; try different batching or adjust parameters" + if is_mp_rank_0(): + data = { + "context": raw_text, + "text": generated_text, + "length": len(generated_tokens), + "finished": is_done, + "message": message, + "duration_seconds": float(time.time() - start_time), + } + + if neox_args.return_logits: + data["logits"] = batch_generated_token_logits.cpu().numpy().tolist() + + generated_texts.append(data) + + return generated_texts + + +def generate_samples_input_from_file( + neox_args, + model, + input_file, + output_file=None, + eos_token_id: int = None, + maximum_tokens: int = 64, + prompt_end: str = "\n", + recompute: bool = False, + temperature: float = 0.0, + top_k: int = 0, + top_p: float = 0.0, +): + """ + Generates samples from an input file and writes them to an output file. + + Reads prompts from neox_args.sample_input_file and writes completions to neox_args.sample_output_file + + neox_args: NeoXArgs. + model: a Megatron model + + input_file: path to input file. Each line in the input file will be treated as separate prompt. The line break at the end of the line is not included in the prompt. + output_file: file where generation results are to be stored in jsonl format. defaults to input_file+'.output.jsonl' if not defined + + eos_token_id: end of text token at which completion is terminated, even if max_tokes count has not been reached + maximum_tokens: maximum number of tokens to be generated + prompt_end: end of a single input prompt. Defaults to newline character '\n'. Other prompt-end sequences may be useful when generating indent-aware completions (e.g. code) + + recompute: flag indicating whether a cache is used for already forwarded tokens (true) or whether all tokens are recomputed at every iteration (false) + + temperature (default 0.0): exponential scaling output distribution ("higher == more risk") + top_k (default 0): integer -> integer between 0 and the models vocab size. Filters out any logits with a probability less than that of the top_kth token. + top_p (default 0.0): float -> Top-p (nucleus) sampling chooses from the smallest possible set of tokens whose cumulative probability exceeds the probability top_p. + + note: greedy decoding is used if temperature is 0.0, top_k is 0 and top_p is 0.0 + + + returns: List[dict] -> a list of dicts containing the following fields: + - 'context' (the input) + - 'text' (the completion) + - 'length' (the length of the completion in number of tokens) + - 'finished': + - 'message': a messaged associated with the generation procedure, can be a warning or error + - 'duration_seconds': duration of the generation in seconds + """ + # Read the sample file + print_rank_0( + "generate_samples_input_from_file() loading input from {}".format(input_file) + ) + with open(input_file, "r", encoding="utf-8") as f: + prompts = f.read() + prompts = prompts.split(prompt_end) + prompts = [p.strip() for p in prompts] + prompts = [p for p in prompts if len(p) > 0] + print_rank_0( + "generate_samples_input_from_file() prompts loaded: {}".format(len(prompts)) + ) + + if is_mp_rank_0(): + if output_file is None: + output_file = str(input_file) + ".output.jsonl" + print_rank_0( + "generate_samples_input_from_file() setting default output file to {}".format( + output_file + ) + ) + + print_rank_0("generate_samples_input_from_file() generating...") + generated_texts = generate_samples_from_prompt( + neox_args=neox_args, + model=model, + text=prompts, + eos_token_id=eos_token_id, + maximum_tokens=maximum_tokens, + recompute=recompute, + temperature=temperature, + top_k=top_k, + top_p=top_p, + ) + + if is_mp_rank_0(): + with open(output_file, "w") as f_out: + for item in generated_texts: + f_out.write(json.dumps(item) + "\n") + print_rank_0("generate_samples_input_from_file() done") + return generated_texts + + +def generate_samples_unconditional( + neox_args, + model, + number_of_samples: int = 10, + output_file=None, + eos_token_id: int = None, + maximum_tokens: int = 64, + recompute: bool = False, + temperature: float = 0.0, + top_k: int = 0, + top_p: float = 0.0, +): + """ + Generates samples unconditionially (no prompt) and yields them in a dictionary. + + neox_args: NeoXArgs. + model: a Megatron model + + number_of_samples (default 10): number of unconditional samples to be generated + + output_file: file where generation results are to be stored in jsonl format. no file will be stored if omitted + + eos_token_id: end of text token at which completion is terminated, even if max_tokes count has not been reached + maximum_tokens: maximum number of tokens to be generated + prompt_end: end of a single input prompt. Defaults to newline character '\n'. Other prompt-end sequences may be useful when generating indent-aware completions (e.g. code). The interactive mode will reroll the user-input request until the stop-char is met + + recompute: flag indicating whether a cache is used for already forwarded tokens (true) or whether all tokens are recomputed at every iteration (false) + + temperature (default 0.0): exponential scaling output distribution ("higher == more risk") + top_k (default 0): integer -> integer between 0 and the models vocab size. Filters out any logits with a probability less than that of the top_kth token. + top_p (default 0.0): float -> Top-p (nucleus) sampling chooses from the smallest possible set of tokens whose cumulative probability exceeds the probability top_p. + + note: greedy decoding is used if temperature is 0.0, top_k is 0 and top_p is 0.0 + + yields: dict containing the following fields: + - 'context' (the input) + - 'text' (the completion) + - 'length' (the length of the completion in number of tokens) + - 'finished': + - 'message': a messaged associated with the generation procedure, can be a warning or error + - 'duration_seconds': duration of the generation in seconds + """ + + print_rank_0("generate_samples_unconditional() generating...") + assert number_of_samples > 0, "number_of_samples must be > 0" + generated_texts = generate_samples_from_prompt( + neox_args=neox_args, + model=model, + text=["" for _ in range(number_of_samples)], + eos_token_id=eos_token_id, + maximum_tokens=maximum_tokens, + recompute=recompute, + temperature=temperature, + top_k=top_k, + top_p=top_p, + ) + + if is_mp_rank_0(): + if output_file is not None: + with open(output_file, "w") as f_out: + for item in generated_texts: + f_out.write(json.dumps(item) + "\n") + print_rank_0("generate_samples_unconditional() done") + return generated_texts + + +def generate_samples_interactive( + neox_args, + model, + maximum_tokens: int = 64, + prompt_end: str = "\n", + eos_token_id: int = None, + recompute: bool = False, + temperature: float = 0.0, + top_k: int = 0, + top_p: float = 0.0, +): + """ + Generates samples unconditionially (no prompt) and yields them in a dictionary. + + neox_args: NeoXArgs. + model: a Megatron model + + maximum_tokens: maximum number of tokens to be generated + eos_token_id: end of text token at which completion is terminated, even if max_tokes count has not been reached + + recompute: flag indicating whether a cache is used for already forwarded tokens (true) or whether all tokens are recomputed at every iteration (false) + + temperature (default 0.0): exponential scaling output distribution ("higher == more risk") + top_k (default 0): integer -> integer between 0 and the models vocab size. Filters out any logits with a probability less than that of the top_kth token. + top_p (default 0.0): float -> Top-p (nucleus) sampling chooses from the smallest possible set of tokens whose cumulative probability exceeds the probability top_p. + + note: greedy decoding is used if temperature is 0.0, top_k is 0 and top_p is 0.0 + + yields: dict containing the following fields: + - 'context' (the input) + - 'text' (the completion) + - 'length' (the length of the completion in number of tokens) + - 'finished': + - 'message': a messaged associated with the generation procedure, can be a warning or error + - 'duration_seconds': duration of the generation in seconds + """ + + while True: + model.module.clear_cache() # clear kv cache between batches + torch.distributed.barrier(group=mpu.get_model_parallel_group()) + terminate_runs = 0 + + if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0: + os.system("clear") + raw_text = "" + while True: + current_input = input("Context prompt >>> ") + if ( + prompt_end == "\n" + ): # we need to handle '\n' case as 'input' strips it and leads to lines being squashed + raw_text += current_input + break + if prompt_end in current_input: + raw_text += current_input.split(prompt_end)[0] + break + raw_text += ( + current_input + "\n" + ) # re-add newline since we stripped it on input + context_tokens = neox_args.tokenizer.tokenize(raw_text) + if len(context_tokens) == 0: + context_tokens = [neox_args.tokenizer.eod] + context_length = len(context_tokens) + if context_length >= (neox_args.seq_length - 1): + print_rank_0( + "\nContext length" + + str(context_length) + + "\nReached max sequence length!" + ) + terminate_runs = 1 + else: + context_tokens = neox_args.tokenizer.tokenize("EMPTY TEXT") + context_length = len(context_tokens) + + terminate_runs = broadcast_terminate_signal(terminate_runs) + if terminate_runs == 1: + return + for ( + batch_context_tokens, + batch_token_generation_start_index, + batch_token_generation_end_index, + batch_generated_token_logits, + is_done, + ) in stream_tokens( + neox_args=neox_args, + model=model, + context_tokens=[context_tokens], + eos_token_id=eos_token_id, + maximum_tokens=maximum_tokens, + recompute=recompute, + temperature=temperature, + top_k=top_k, + top_p=top_p, + ): + if mpu.get_model_parallel_rank() == 0: + generated_tokens = ( + batch_context_tokens[0] + .cpu() + .numpy() + .tolist()[ + batch_token_generation_start_index[0] + .item() : batch_token_generation_end_index[0] + .item() + + 1 + ] + ) + generated_text = neox_args.tokenizer.detokenize(generated_tokens) + print_rank_0("Generated Text: " + generated_text) + if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0: + _ = input("\n") + + +def get_logp(logits, labels, force_fp32=False): + if force_fp32: + logits = logits.float() + logp = logits.log_softmax(dim=-1) + return torch.gather(logp, dim=2, index=labels.unsqueeze(2)).squeeze(2) + + +def precompute_logits(neox_args, model): + """ + Precomputes logprobs from training/testing/validation datasets + + Saves it to the same directory as the dataset with the model name appended to it + + neox_args: NeoXArgs. + model: a Megatron model + + """ + if neox_args.precompute_model_name is None: + mdl_name = str(hash(neox_args.load)) + else: + mdl_name = neox_args.precompute_model_name + print_rank_0("Precomputing logprobs...") + model.eval() + data_paths = list() + if neox_args.train_data_paths is not None: + for path in neox_args.train_data_paths: + data_paths.append(path) + for path in neox_args.test_data_paths: + data_paths.append(path) + for path in neox_args.valid_data_paths: + data_paths.append(path) + elif neox_args.pos_train_data_paths is not None: + # Pairwise data... + for path in neox_args.pos_train_data_paths: + data_paths.append(path) + for path in neox_args.neg_train_data_paths: + data_paths.append(path) + for path in neox_args.pos_valid_data_paths: + data_paths.append(path) + for path in neox_args.neg_valid_data_paths: + data_paths.append(path) + for path in neox_args.pos_test_data_paths: + data_paths.append(path) + for path in neox_args.neg_test_data_paths: + data_paths.append(path) + for path in data_paths: + print_rank_0(f"Precomputing logits for {path}") + # Add hash to path... + out_path = path + f"_{mdl_name}" + if os.path.exists(out_path + ".idx"): + continue + dataset = make_dataset(path, neox_args.data_impl, not neox_args.mmap_warmup) + if is_mp_rank_0(): + out_dataset = make_builder(out_path + ".bin", neox_args.data_impl) + out_dataset._dtype = np.float32 + i = 0 + + # TODO: Not sure why this requires a multiple of 8? Investigate later. + while i < int(math.ceil(len(dataset) / 8.0) * 8): + start = time.time() + model.module.clear_cache() # clear kv cache between batches + if is_mp_rank_0(): + offset = ( + mpu.get_data_parallel_rank() + * neox_args.train_micro_batch_size_per_gpu + ) + context_tokens = [ + [int(x) for x in dataset.get(j % len(dataset)).tolist()] + for j in range( + i + offset, + i + (neox_args.train_micro_batch_size_per_gpu + offset), + ) + ] + # grab microbatch + # pad batch in order to allow conversion to tensor + context_tokens, context_lengths = pad_batch( + copy.deepcopy(context_tokens), + pad_id=0, + pad_len=neox_args.seq_length + 1, + truncate=True, + ) + # print(context_tokens) + label_tokens = [tokens[1:] for tokens in context_tokens] + context_tokens = [tokens[:-1] for tokens in context_tokens] + else: + context_tokens = [ + [0 for _ in range(neox_args.seq_length)] + for _ in range(neox_args.batch_size) + ] + label_tokens = [ + [0 for _ in range(neox_args.seq_length)] + for _ in range(neox_args.batch_size) + ] + context_lengths = [0 for _ in range(neox_args.batch_size)] + i += ( + neox_args.train_micro_batch_size_per_gpu + * mpu.get_data_parallel_world_size() + ) + # print(context_tokens) + # convert to tensor and broadcast + context_tokens = torch.cuda.LongTensor(context_tokens) + label_tokens = torch.cuda.LongTensor(label_tokens) + # Make sure context tokens + start tokens are the same across all ranks + token_generation_start_index = torch.cuda.LongTensor(context_lengths) + torch.distributed.broadcast( + context_tokens, + mpu.get_model_parallel_src_rank(), + group=mpu.get_model_parallel_group(), + ) + torch.distributed.broadcast( + token_generation_start_index, + mpu.get_model_parallel_src_rank(), + group=mpu.get_model_parallel_group(), + ) + torch.distributed.broadcast( + label_tokens, + mpu.get_model_parallel_src_rank(), + group=mpu.get_model_parallel_group(), + ) + # context_tokens = context_tokens[:, :chop_len].contiguous() + # label_tokens = label_tokens[:, :chop_len].contiguous() + with torch.no_grad(): + # get attention mask / position ids + context_tokens, attention_mask, position_ids = get_batch( + neox_args, context_tokens + ) + model_inputs = ( + context_tokens, + position_ids, + attention_mask, + ) + maybe_tuple = forward_model( + model, model_inputs, neox_args.is_pipe_parallel + ) + if isinstance(maybe_tuple, tuple): + logits, _ = maybe_tuple + else: + logits = maybe_tuple + if logits is not None: # if pipe parallel, not all ranks return logits + logits = gather_from_model_parallel_region(logits) + logp = get_logp(logits, label_tokens, True).squeeze() + if neox_args.is_pipe_parallel: + # broadcast generated tokens to pipe parallel group + src_rank = model.grid.stage_to_global(model.num_stages - 1) + logp = ( + logp + if logits is not None + else torch.zeros( + neox_args.batch_size, dtype=torch.float32 + ).cuda() + ) + torch.distributed.broadcast( + tensor=logp, + src=src_rank, + group=mpu.get_pipe_parallel_group(), + ) + logp = logp.squeeze() + logp_list = [ + torch.zeros_like(logp) + for _ in range(mpu.get_data_parallel_world_size()) + ] + torch.distributed.all_gather( + logp_list, logp, group=mpu.get_data_parallel_group() + ) + logp = torch.cat(logp_list, dim=0).cpu().numpy() + if (mpu.get_model_parallel_rank() == 0) and ( + mpu.get_data_parallel_rank() == 0 + ): + for j in range(logp.shape[0]): + out_dataset.add_item(logp[j]) + out_dataset.end_document() + print_rank_0(f"Processed {i} / {len(dataset)} in {time.time() - start}") + if is_mp_rank_0(): + out_dataset.finalize( + out_path + ".idx", + ) + torch.distributed.barrier() diff --git a/megatron/tokenizer/__init__.py b/megatron/tokenizer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b53bbb828901e9c80f57b88bbf3661a9a60ff679 --- /dev/null +++ b/megatron/tokenizer/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from .tokenizer import build_tokenizer diff --git a/megatron/tokenizer/tokenizer.py b/megatron/tokenizer/tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..d39e18243e2d20257b5cd17f03a851363f992b3b --- /dev/null +++ b/megatron/tokenizer/tokenizer.py @@ -0,0 +1,404 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Megatron tokenizers.""" + +from abc import ABC +from abc import abstractmethod + +from tokenizers import Tokenizer +from transformers import GPT2Tokenizer, GPT2TokenizerFast +import numpy as np +import sentencepiece as spm +from typing import List, Union + + +def build_tokenizer(args): + """Initialize tokenizer.""" + if args.rank == 0: + print("> building {} tokenizer ...".format(args.tokenizer_type), flush=True) + + assert ( + args.tokenizer_type is not None + ), "tokenizer_type must be specified in the .yml config" + + # Select and instantiate the tokenizer. + if args.tokenizer_type.lower() == "GPT2BPETokenizer".lower(): + assert args.vocab_file is not None + assert args.merge_file is not None + tokenizer = _GPT2BPETokenizer(args.vocab_file, args.merge_file) + elif args.tokenizer_type.lower() == "SPMTokenizer".lower(): + assert args.vocab_file is not None + tokenizer = SentencePieceTokenizer(args.vocab_file) + elif args.tokenizer_type.lower() == "HFTokenizer".lower(): + assert args.vocab_file is not None + tokenizer = HFTokenizer(args.vocab_file) + elif args.tokenizer_type.lower() == "HFGPT2Tokenizer".lower(): + if args.vocab_file is None: + print( + "WARNING: No vocab file found, loading Huggingface's pretrained GPT2Tokenizer" + ) + tokenizer = HFGPT2Tokenizer(args.vocab_file) + elif args.tokenizer_type.lower() == "CharLevelTokenizer".lower(): + tokenizer = CharLevelTokenizer(vocab_size=512) + elif args.tokenizer_type.lower() == "TiktokenTokenizer".lower(): + assert args.vocab_file is not None + tokenizer = TiktokenTokenizer(args.vocab_file) + else: + raise NotImplementedError( + "{} tokenizer is not " "implemented.".format(args.tokenizer_type) + ) + + # Add vocab size. + args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size, args) + + return tokenizer + + +def _vocab_size_with_padding(orig_vocab_size, args): + """Pad vocab size so it is divisible by model parallel size and + still having GPU friendly size.""" + + after = orig_vocab_size + multiple = args.make_vocab_size_divisible_by * args.model_parallel_size + while (after % multiple) != 0: + after += 1 + if args.rank == 0: + print( + " > padded vocab (size: {}) with {} dummy tokens " + "(new size: {})".format(orig_vocab_size, after - orig_vocab_size, after), + flush=True, + ) + return after + + +class AbstractTokenizer(ABC): + """Abstract class for tokenizer.""" + + def __init__(self, name): + self.name = name + super().__init__() + + @property + @abstractmethod + def vocab_size(self): + pass + + @property + @abstractmethod + def vocab(self): + """Dictionary from vocab text token to id token.""" + pass + + @property + @abstractmethod + def inv_vocab(self): + """Dictionary from vocab id token to text token.""" + pass + + @abstractmethod + def tokenize(self, text): + pass + + def detokenize(self, token_ids): + raise NotImplementedError( + "detokenizer is not implemented for {} " "tokenizer".format(self.name) + ) + + @property + def cls(self): + raise NotImplementedError( + "CLS is not provided for {} " "tokenizer".format(self.name) + ) + + @property + def sep(self): + raise NotImplementedError( + "SEP is not provided for {} " "tokenizer".format(self.name) + ) + + @property + def pad(self): + raise NotImplementedError( + "PAD is not provided for {} " "tokenizer".format(self.name) + ) + + @property + def eod(self): + raise NotImplementedError( + "EOD is not provided for {} " "tokenizer".format(self.name) + ) + + @property + def mask(self): + raise NotImplementedError( + "MASK is not provided for {} " "tokenizer".format(self.name) + ) + + +class _GPT2BPETokenizer(AbstractTokenizer): + """Original GPT2 BPE tokenizer.""" + + def __init__(self, vocab_file, merge_file): + name = "GPT2 BPE" + super().__init__(name) + + self.tokenizer = GPT2Tokenizer( + vocab_file, merge_file, errors="replace", special_tokens=[], max_len=None + ) + self.eod_id = self.tokenizer.encoder["<|endoftext|>"] + + @property + def vocab_size(self): + return len(self.tokenizer.encoder) + + @property + def vocab(self): + return self.tokenizer.encoder + + @property + def inv_vocab(self): + return self.tokenizer.decoder + + def tokenize(self, text): + return self.tokenizer.encode(text) + + def detokenize(self, token_ids): + return self.tokenizer.decode(token_ids) + + @property + def eod(self): + return self.eod_id + + +class SentencePieceTokenizer(AbstractTokenizer): + """Designed to Integrate SP's Tokenizer.""" + + def __init__(self, vocab_file): + name = "SPM" + super().__init__(name) + + self.tokenizer = spm.SentencePieceProcessor(model_file=vocab_file) + self.eod_id = self.tokenizer.piece_to_id("<|endoftext|>") + + @property + def vocab_size(self): + return self.tokenizer.get_piece_size() + + @property + def vocab(self): + return { + self.tokenizer.id_to_piece(idx): idx + for idx in range(self.tokenizer.get_piece_size()) + } + + @property + def inv_vocab(self): + return { + idx: self.tokenizer.id_to_piece(idx) + for idx in range(self.tokenizer.get_piece_size()) + } + + def tokenize(self, text): + return self.tokenizer.encode(text) + + def detokenize(self, token_ids): + return self.tokenizer.decode(token_ids) + + @property + def eod(self): + return self.eod_id + + +class HFTokenizer(AbstractTokenizer): + """Designed to Integrate HF's Tokenizer library.""" + + def __init__(self, vocab_file): + name = "HFTokenizer" + super().__init__(name) + self.tokenizer = Tokenizer.from_file(vocab_file) + self.eod_id = self.tokenizer.token_to_id("<|endoftext|>") + self.pad_id = self.tokenizer.token_to_id("<|padding|>") + + @property + def vocab_size(self): + return self.tokenizer.get_vocab_size() + + @property + def vocab(self): + return self.tokenizer.get_vocab() + + @property + def inv_vocab(self): + return self.tokenizer.decoder + + def tokenize(self, text: str): + return self.tokenizer.encode(text).ids + + def tokenize_batch(self, text_batch: Union[List[str], str]): + return self.tokenizer.encode_batch(text_batch) + + def detokenize(self, token_ids): + return self.tokenizer.decode(token_ids) + + @property + def eod(self): + return self.eod_id + + +class HFGPT2Tokenizer(AbstractTokenizer): + """Designed to Integrate the pretrained OpenAI GPT2 Tokenizers from HF""" + + def __init__(self, vocab_file=None, fast=True): + name = "HFGPT2Tokenizer" + if fast: + name += "Fast" + super().__init__(name) + if vocab_file is None: + vocab_file = "gpt2" + if fast: + self.tokenizer = GPT2TokenizerFast.from_pretrained(vocab_file) + else: + self.tokenizer = GPT2Tokenizer.from_pretrained(vocab_file) + + self.tokenizer.add_special_tokens({"pad_token": "<|padding|>"}) + self.eod_id = self.tokenizer.eos_token_id + self.pad_id = self.tokenizer.pad_token_id + + @property + def vocab_size(self): + return len(self.tokenizer) + + @property + def vocab(self): + return self.tokenizer.get_vocab() + + @property + def inv_vocab(self): + return self.tokenizer._tokenizer.decoder + + def tokenize(self, text: str): + return self.tokenizer.encode(text) + + def tokenize_batch(self, text_batch: Union[List[str], str]): + if isinstance(text_batch, str): + text_batch = [text_batch] + return [self.tokenize(t) for t in text_batch] + + def detokenize(self, token_ids): + return self.tokenizer.decode(token_ids) + + @property + def eod(self): + return self.eod_id + + +class CharLevelTokenizer(AbstractTokenizer): + """Character Level Tokenizer""" + + def __init__(self, vocab_size): + name = "CharLevelTokenizer" + super().__init__(name) + self._vocab_size = vocab_size + self.eod_id = 0 + self.pad_id = 1 + + def clamp(self, n): + return max(32, min(n, self.vocab_size)) + + @property + def vocab_size(self): + return self._vocab_size + + @property + def vocab(self): + raise NotImplementedError + + @property + def inv_vocab(self): + raise NotImplementedError + + def decode_token(self, token: int): + return str(chr(self.clamp(token))) + + def tokenize(self, text: str): + return list(np.fromstring(text, dtype=np.uint8)) + + def tokenize_batch(self, text_batch: Union[List[str], str]): + if isinstance(text_batch, list): + return [self.tokenize(s) for s in text_batch] + else: + return self.tokenize(text_batch) + + def detokenize(self, token_ids): + return "".join(list(map(self.decode_token, token_ids))) + + @property + def eod(self): + return self.eod_id + + +class TiktokenTokenizer(AbstractTokenizer): + """Tokenizer from OpenAI's tiktoken implementation""" + + def __init__(self, vocab_file): + try: + import tiktoken + except ModuleNotFoundError: + print("Please install tiktoken: (https://github.com/openai/tiktoken)") + raise Exception + + name = "TiktokenTokenizer" + super().__init__(name) + + self.tokenizer = tiktoken.get_encoding(vocab_file) + self.eod_id = self.tokenizer.eot_token + self.pad_id = None + + @property + def vocab_size(self): + return self.tokenizer.n_vocab + + @property + def vocab(self): + raise NotImplementedError( + "TiktokenTokenizer does not implement vocabulary access." + ) + + @property + def inv_vocab(self): + raise NotImplementedError( + "TiktokenTokenizer does not implement vocabulary access. \ + To get the idx-th token in vocabulary, use tokenizer.decode([idx]) ." + ) + + def tokenize(self, text: str): + return self.tokenizer.encode(text) # , allowed_special="all") + + def tokenize_batch(self, text_batch: List[str]): + return self.tokenizer.encode_batch(text_batch, allowed_special="all") + + def detokenize(self, token_ids): + return self.tokenizer.decode(tokens=token_ids, errors="strict") + + @property + def eod(self): + return self.eod_id + + @property + def pad(self): + raise NotImplementedError diff --git a/megatron/tokenizer/train_tokenizer.py b/megatron/tokenizer/train_tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..3027677e71264fb4678e211cb1f6ebb62a4ccde7 --- /dev/null +++ b/megatron/tokenizer/train_tokenizer.py @@ -0,0 +1,130 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Assumes a dataset of jsonl files in the same format as the neox training set. +""" + +from tokenizers import Tokenizer, decoders, models, pre_tokenizers, processors, trainers +from tokenizers.normalizers import NFKC + +from glob import glob +import os +import json +import argparse + + +def load_jsonl(input_path, quiet=True) -> list: + """ + Read list of objects from a JSON lines file. + """ + data = [] + with open(input_path, "r", encoding="utf-8") as f: + for line in f: + data.append(json.loads(line.rstrip("\n|\r"))) + if not quiet: + print("Loaded {} records from {}".format(len(data), input_path)) + return data + + +def json_iterator(input_dir, text_key="text"): + all_jsonls = glob(f"{input_dir}/*.jsonl") + glob(f"{input_dir}/*.json") + for j in all_jsonls: + data = load_jsonl(j) + for doc in data: + yield doc[text_key] + + +def train_tokenizer( + input_dir: str, save_path: str, tokenizer_type: str = "BPE", vocab_size: int = 52000 +): + """ + Trains a tokenizer on all the json files in `input_dir` and saves it to `save_path` + + :param input_dir: input directory containing jsonl files + :param save_path: path to save tokenizer to + :param tokenizer_type: type of tokenizer to train. + :param vocab_size: int, size of tokenizer's vocab + :return: + """ + + if tokenizer_type == "BPE": + model = models.BPE() + else: + raise NotImplementedError(f"Tokenizer type {tokenizer_type} not implemented") + tokenizer = Tokenizer(model) + + # Customize pre-tokenization and decoding + tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=True) + tokenizer.decoder = decoders.ByteLevel() + tokenizer.post_processor = processors.ByteLevel(trim_offsets=True) + tokenizer.normalizer = NFKC() + + # And then train + trainer = trainers.BpeTrainer( + vocab_size=vocab_size, special_tokens=["<|endoftext|>", "<|padding|>"] + ) + tokenizer.train_from_iterator(json_iterator(input_dir), trainer) + + # And Save it + if save_path: + tokenizer.save(save_path, pretty=True) + print(f"Tokenizer saved at {save_path}") + + +def parse_args(input_args=None): + parser = argparse.ArgumentParser( + description="script for training a multilingual " + "HF tokenizer on CC dumps with upweighting for low resource languages" + ) + parser.add_argument( + "--json_input_dir", + type=str, + help="Path to folder containing tokenizer training data in jsonl format", + ) + parser.add_argument( + "--tokenizer_output_path", + type=str, + help="Path to which your trained tokenizer will be saved (should end in .json)", + ) + parser.add_argument( + "--tokenizer_type", + type=str, + help="type of tokenizer to train, currently only BPE is supported", + choices=["BPE"], + default="BPE", + ) + parser.add_argument( + "-v", + "--vocab_size", + help="vocabulary size of tokenizer, default=52k", + type=int, + default=52000, + ) + args_parsed = parser.parse_args(input_args) + return args_parsed + + +def main(args): + train_tokenizer( + args.json_input_dir, + save_path=args.tokenizer_output_path, + tokenizer_type=args.tokenizer_type, + vocab_size=args.vocab_size, + ) + + +if __name__ == "__main__": + args = parse_args() + main(args) diff --git a/megatron/training.py b/megatron/training.py new file mode 100644 index 0000000000000000000000000000000000000000..1965faea8cf38ebb4ac7451ce893de04feb6345b --- /dev/null +++ b/megatron/training.py @@ -0,0 +1,1720 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This file has been modified from its original version +# + +"""Pretrain utilities.""" +from datetime import datetime +from functools import partial +from collections import defaultdict + +import math +import sys +from contextlib import nullcontext + +import torch +import torch.nn.functional as F +import deepspeed +from deepspeed.runtime.data_pipeline.curriculum_scheduler import CurriculumScheduler +import numpy as np + +from megatron.utils import ( + Timers, + init_wandb, + get_ltor_masks_and_position_ids, + reduce_losses, +) + +from megatron import print_rank_0, mpu +from megatron.model import ( + GPT2ModelPipe, + SoftEmbedding, + get_params_for_weight_decay_optimization, + mark_norms_for_sequence_parallel_grad_sync, +) +from megatron.mpu.mappings import gather_from_model_parallel_region +from megatron.checkpointing import load_checkpoint, save_checkpoint +from megatron.data.data_utils import ( + build_train_valid_test_data_loaders, + shift_and_wrap_data_loaders, +) +from megatron.initialize import initialize_megatron +from megatron.learning_rates import AnnealingLR +from megatron.logging import tb_wandb_log, training_log +from megatron.utils import ( + OverflowMonitor, + get_noise_scale_logger, + get_total_params, + CharCounter, +) +from megatron.model.gpt2_model import cross_entropy +from megatron.mpu import vocab_parallel_cross_entropy + +from pickle import dump +import os + + +def mup_weights_reinit(neox_args, model): + def has_method(o, name): + return callable(getattr(o, name, None)) + + for layer in model.modules(): + # This normally would happen in set_base_shapes if we actually were able to use the MuReadout class + if hasattr(layer, "mup_rescale_parameters") and layer.mup_rescale_parameters: + layer._rescale_parameters() + + if has_method(layer, "mup_reinitialize_weights"): + layer.mup_reinitialize_weights(neox_args) + + +def save_base_shapes(neox_args, base_shapes, use_cache): + + # Instantiation of the base model fails in the init function (init_functions.py) because we haven't called set_base_shapes on it at this point, so disable it temporarily here + neox_args.use_mup = False + + base_model = GPT2ModelPipe( + neox_args=neox_args, + num_tokentypes=0, + parallel_output=True if neox_args.train_impl != "rm" else False, + topology=mpu.get_topology(), + use_cache=use_cache, + ) + + if not neox_args.is_pipe_parallel: + base_model = base_model.to_sequential() + + try: + import mup + except ModuleNotFoundError: + print("Please install mup https://github.com/microsoft/mup") + raise Exception + + base_shapes = mup.get_shapes(base_model) + + del base_model + + old_hidden_size = neox_args.hidden_size + neox_args.hidden_size = neox_args.hidden_size * neox_args.mup_width_scale + + delta_model = GPT2ModelPipe( + neox_args=neox_args, + num_tokentypes=0, + parallel_output=True if neox_args.train_impl != "rm" else False, + topology=mpu.get_topology(), + use_cache=use_cache, + ) + + if not neox_args.is_pipe_parallel: + delta_model = delta_model.to_sequential() + + delta_shapes = mup.get_shapes(delta_model) + + # change back + neox_args.use_mup = True + neox_args.hidden_size = old_hidden_size + + save_shapes = f"{neox_args.base_shapes_file}.{torch.distributed.get_rank()}" + print(f"saving base shapes at {save_shapes}") + mup.make_base_shapes(base_shapes, delta_shapes, savefile=save_shapes) + print(f"base shapes saved...exiting") + sys.exit(1) + + +def mup_coord_check(neox_args, timers, lr_scheduler, train_data_iterator): + from megatron.mup_substitute import get_coord_data + from mup.coord_check import plot_coord_data + + def lazy_model(hidden_size): + def gen(): + old_hidden_size = neox_args.hidden_size + neox_args.hidden_size = hidden_size + + model, optimizer, _, _ = setup_model_and_optimizer( + neox_args=neox_args, use_cache=False + ) + + neox_args.hidden_size = old_hidden_size + + return model + + return gen + + models = {} + + # Hidden size needs to be divisible by num attention heads + for hidden_size in (neox_args.num_attention_heads * (2**p) for p in range(2, 9)): + models[hidden_size] = lazy_model(hidden_size) + + neox_args.use_mup = True + df_up = get_coord_data( + neox_args, timers, lr_scheduler, models, train_data_iterator, mup=True + ) + neox_args.use_mup = False + df_sp = get_coord_data( + neox_args, timers, lr_scheduler, models, train_data_iterator, mup=False + ) + + plot_coord_data(df_up, save_to=f"coord_check_up.{torch.distributed.get_rank()}.jpg") + plot_coord_data(df_sp, save_to=f"coord_check_sp.{torch.distributed.get_rank()}.jpg") + + print_rank_0("Saved coord check plots... exiting") + sys.exit(1) + + +def update_iterations(neox_args, data_loaders): + """ + Compute the number of train iterations if not specified and num_epochs, updates the neox_args object. + Note that if len(train_dataloader) % gradient_accumulation_steps != 0, this will configure neox + to do as many iterations as possible while ensuring that each example is seen *at most* train_epochs + times. + """ + if (not neox_args.do_train) or (neox_args.train_iters is not None): + pass + elif neox_args.train_iters is None and neox_args.train_epochs is None: + print_rank_0( + "ERROR:Failed to specify either train_epochs or train_iters in config file" + ) + else: + global_rank = torch.distributed.get_rank() + + if global_rank == 0: + train_dataloader = data_loaders["train"] + train_epochs = neox_args.train_epochs + gradient_accumulation_steps = neox_args.gradient_accumulation_steps + + train_dataloader_len = len(train_dataloader) + train_iterations = ( + train_dataloader_len * train_epochs + ) // gradient_accumulation_steps + + train_iters_tensor = torch.cuda.LongTensor([train_iterations]) + else: + train_iters_tensor = torch.cuda.LongTensor([0]) + + torch.distributed.broadcast(train_iters_tensor, src=0) + + neox_args.train_iters = train_iters_tensor[0].item() + + print_rank_0( + f"Training for a total of {neox_args.train_iters} iterations, corresponding to {neox_args.train_epochs} epochs." + ) + + +def pretrain(neox_args): + """Main training program. + + This function will run the following in the order provided: + 1) initialize Megatron. + 2) get train/val/test datasets. + 3) setup model, optimizer and lr schedule. + 4) configure data loading + 5) train the model. + + Arguments: + neox_args: an instance of NeoXArgs containing the configuration for pretrain + + """ + # setup logging and timers + init_wandb(neox_args=neox_args) + timers = Timers( + use_wandb=neox_args.use_wandb, + tensorboard_writer=neox_args.tensorboard_writer, + comet_experiment=neox_args.comet_experiment, + ) + + # Initialize and get arguments, timers, and Tensorboard writer. + initialize_megatron(neox_args=neox_args) + + # Create data loaders + timers("train/valid/test data loaders").start() + data_loaders = build_train_valid_test_data_loaders(neox_args=neox_args) + update_iterations(neox_args=neox_args, data_loaders=data_loaders) + timers("train/valid/test data loaders").stop() + + # Model, optimizer, and learning rate. + timers("model and optimizer").start() + model, optimizer, lr_scheduler, reference_model = setup_model_and_optimizer( + neox_args=neox_args, use_cache=False, iteration=neox_args.iteration + ) + timers("model and optimizer").stop() + + # Make and configure iterators + timers("train/valid/test data iterators").start() + ( + train_data_iterator, + valid_data_iterator, + test_data_iterator, + ) = shift_and_wrap_data_loaders(neox_args=neox_args, data_loaders=data_loaders) + timers("train/valid/test data iterators").stop() + + if neox_args.use_mup and neox_args.coord_check: + mup_coord_check(neox_args, timers, lr_scheduler, train_data_iterator) + + # Print setup timing. + print_rank_0("done with setups ...") + timers.log( + [ + "train/valid/test data loaders", + "model and optimizer", + "train/valid/test data iterators", + ] + ) + print_rank_0("training ...") + + iteration = neox_args.iteration + # edge case: save step 0 checkpoint if requested and we're starting from step 0 + if ( + neox_args.save + and neox_args.extra_save_iters + and 0 in neox_args.extra_save_iters + and iteration == 0 + ): + save_checkpoint( + neox_args=neox_args, + iteration=iteration, + model=model, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + ) + + if neox_args.do_train and neox_args.train_iters > 0: + iteration = train( + neox_args=neox_args, + timers=timers, + model=model, + reference_model=reference_model, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + train_data_iterator=train_data_iterator, + valid_data_iterator=valid_data_iterator, + ) + + if neox_args.do_valid: + prefix = "the end of training for val data" + evaluate_and_print_results( + neox_args=neox_args, + prefix=prefix, + forward_step_func=forward_step, + data_iterator=valid_data_iterator, + model=model, + iteration=iteration, + verbose=False, + timers=timers, + reference_model=reference_model, + ) + + if neox_args.save and iteration != 0: + save_checkpoint( + neox_args=neox_args, + iteration=iteration, + model=model, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + ) + + if neox_args.do_test: + # Run on test data. + prefix = "the end of training for test data" + evaluate_and_print_results( + neox_args=neox_args, + prefix=prefix, + forward_step_func=forward_step, + data_iterator=test_data_iterator, + model=model, + iteration=iteration, + verbose=True, + timers=timers, + chart_name="test", + reference_model=reference_model, + ) + + +def _get_batch(neox_args, tokenizer, keys, data, datatype, label_mask_zero=False): + """Support function for get_batch / get_batch pipe (to avoid code repetition)""" + data_b = mpu.broadcast_data(keys, data, datatype) + token_key = keys[0] + label_key = keys[1] if len(keys) > 1 else None + # Unpack. + tokens_ = data_b[token_key].long() + if label_key in data_b: + label_mask = (data_b[label_key].long() >= 0)[:, 1:].contiguous() + labels = torch.where( + data_b[label_key].long() >= 0, + data_b[label_key].long(), + torch.zeros_like(data_b[label_key].long()), + )[:, 1:].contiguous() + else: + label_mask = (tokens_.long() >= 0)[:, 1:].contiguous() + labels = tokens_[:, 1:].contiguous() + if label_mask_zero: + labels = labels * label_mask + tokens = tokens_[:, :-1].contiguous() + + # Get the masks and position ids. + attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( + data=tokens, + eod_token=neox_args.tokenizer.eod, + eod_mask_loss=neox_args.eod_mask_loss, + sliding_window_width=neox_args.sliding_window_width, + ) + + # combine loss masks from get_ltor_masks_and_position_ids with loss masks from data + loss_mask = label_mask.to(loss_mask.dtype) * loss_mask + return tokens, labels, loss_mask, attention_mask, position_ids + + +def get_batch(neox_args, data_iterator): + """Generate a batch""" + + # Items and their type. + if neox_args.train_impl in ["normal", "kto"]: + keys = ["text", "label"] if neox_args.train_label_data_paths else ["text"] + elif neox_args.train_impl in ["dpo", "rm"]: + keys = ( + [["pos", "pos_label"], ["neg", "neg_label"]] + if neox_args.pos_train_label_data_paths + else [["pos"], ["neg"]] + ) + datatype = torch.int64 + + # Broadcast data. + if data_iterator is not None: + data = next(data_iterator) + else: + data = None + if neox_args.train_impl == "normal": + return _get_batch( + neox_args=neox_args, + tokenizer=neox_args.tokenizer, + keys=keys, + data=data, + datatype=datatype, + ) + elif neox_args.train_impl == "kto": + assert ( + neox_args.train_micro_batch_size_per_gpu > 1 + ), "For KTO training, the train_micro_batch_size_per_gpu must be greater than 1." + tup = _get_batch( + neox_args=neox_args, + tokenizer=neox_args.tokenizer, + keys=keys, + data=data, + datatype=datatype, + ) + # Remove the last token from the reward since we predict the next token, so + # Reward of will be based on the label of + rw_data = mpu.broadcast_data(["reward"], data, torch.float)["reward"][ + :, :-1 + ].contiguous() + ref_data = ( + mpu.broadcast_data(["ref"], data, torch.float)["ref"][:, :-1].contiguous() + if neox_args.precompute_model_name + else None + ) + return tup + (rw_data, ref_data) + elif neox_args.train_impl in ["dpo", "rm"]: + pos_tup = _get_batch( + neox_args=neox_args, + tokenizer=neox_args.tokenizer, + keys=keys[0], + data=data, + datatype=datatype, + label_mask_zero=True, + ) + neg_tup = _get_batch( + neox_args=neox_args, + tokenizer=neox_args.tokenizer, + keys=keys[1], + data=data, + datatype=datatype, + label_mask_zero=True, + ) + if neox_args.precompute_model_name: + ref_data = mpu.broadcast_data(["pos_ref", "neg_ref"], data, torch.float) + else: + ref_data = {"pos_ref": None} + return [ + torch.cat((pos_item, neg_item), dim=0) + for pos_item, neg_item in zip(pos_tup, neg_tup) + ] + [ + torch.cat((ref_data["pos_ref"], ref_data["neg_ref"]), dim=0)[ + :, :-1 + ].contiguous() + if ref_data["pos_ref"] is not None + else None + ] + + +def get_batch_pipe(data, neox_args, curr_scheduler=None): + """A modification of get_batch() to work with the latest batch instead of an iterator.""" + + assert neox_args.train_impl not in [ + "kto", + "dpo", + "rm", + ], "Pipeline parallel is currently unsupported when using any of kto, dpo, rm. Set pipe_parallel_size to 0" + + # Items and their type. + keys = ["text", "label"] if neox_args.train_label_data_paths else ["text"] + datatype = torch.int64 + + tokens, labels, loss_mask, attention_mask, position_ids = _get_batch( + neox_args, neox_args.tokenizer, keys, data, datatype + ) + if curr_scheduler is not None: + # iteration + 1 to align with how/when DeepSpeed updates the buffers + curriculum_seqlen = curr_scheduler.update_difficulty(neox_args.iteration + 1) + if curriculum_seqlen < tokens.size()[1]: + # seqlen-based curriculum learning + # input_ids, position_ids, labels have size [batch size, seqlen] + # input_ids = input_ids[:, :curriculum_seqlen].contiguous() + tokens = tokens[:, :curriculum_seqlen].contiguous() + position_ids = position_ids[:, :curriculum_seqlen].contiguous() + if labels is not None: + labels = labels[:, :curriculum_seqlen].contiguous() + if loss_mask is not None: + loss_mask = loss_mask[:, :curriculum_seqlen].contiguous() + # attention_mask has size [1, 1, seqlen, seqlen] + attention_mask = attention_mask[ + :, :, :curriculum_seqlen, :curriculum_seqlen + ].contiguous() + + # unpack data + return (tokens, position_ids, attention_mask), (labels, loss_mask) + + +def get_batch_sequential(forward_input, neox_args): + """A modification of get_batch() to work with the latest batch instead of an iterator.""" + attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( + data=forward_input[0], + eod_token=neox_args.tokenizer.eod, + eod_mask_loss=neox_args.eod_mask_loss, + ) + return (forward_input[0], forward_input[1], attention_mask) + + +def average_losses_across_data_parallel_group(losses): + """Reduce a tensor of losses across all GPUs.""" + averaged_losses = torch.cat([loss.clone().detach().view(1) for loss in losses]) + torch.distributed.all_reduce(averaged_losses, group=mpu.get_data_parallel_group()) + averaged_losses = averaged_losses / torch.distributed.get_world_size( + group=mpu.get_data_parallel_group() + ) + + return averaged_losses + + +def mb_moe_loss_func(args, loss_mask, output_tensor=None): + from megatron.model import megablocks_utils + from megatron.model.megablocks_utils import moe + + # NOTE: For pipeline parallelism this function will be run on the + # non-final stages to calculate load balancing loss contribution + # for the MoE layers within the stage. For these cases, output_tensor + # will be None. + loss, loss_dict = (None, {}) + if False: + assert output_tensor is not None + loss, loss_dict = loss_func(loss_mask, output_tensor) + assert loss.numel() == 1 + + # NOTE: If recompute is enabled we will collect duplicate load + # balancing loss contributions. Prune these before calculating + # the load balancing loss. + if args.checkpoint_activations: + # Ignore load balancing loss contributions compute during + # the forward pass if recompute is turned on. + load_balancing_loss_data = moe.get_load_balancing_loss() + if args.num_layers * 2 == len(load_balancing_loss_data): + load_balancing_loss_data = load_balancing_loss_data[args.num_layers :] + moe.clear_load_balancing_loss() + for x in load_balancing_loss_data: + moe.save_load_balancing_loss(x) + + # Compute the load balancing loss for all MoE layers. + megablocks_args = args = megablocks_utils.as_megablocks_args(args) + lbl = moe.batched_load_balancing_loss(megablocks_args) + moe.clear_load_balancing_loss() + + # Average the load balancing loss across data parallel + # replicas and save for logging. + averaged_lbl = average_losses_across_data_parallel_group([lbl]) + loss_dict["load balancing loss"] = averaged_lbl[0] + return averaged_lbl, loss_dict + + +def get_logp(logits, labels, force_fp32=False): + # Rather than reimplementing logp, cross entropy loss is actually logp, just inverted. + if force_fp32: + logits = logits.float() + return -vocab_parallel_cross_entropy(logits, labels) + + +def get_pos_neg_logp(logits, labels, force_fp32=False): + # Rather than reimplementing logp, cross entropy loss is actually logp, just inverted. + if force_fp32: + logits = logits.float() + return torch.chunk(-vocab_parallel_cross_entropy(logits, labels), 2, 0) + + +def forward_step( + data_iterator, + model, + neox_args, + timers, + return_logits=False, + is_train=False, + reference_model=None, +): + """Forward step.""" + if neox_args.is_pipe_parallel: + return model.eval_batch(data_iterator, return_logits=return_logits) + + # Get the batch. + if neox_args.memory_profiling and neox_args.iteration: + torch.cuda.nvtx.range_push(f"Get batch") + if timers is not None: + timers("batch generator").start() + if neox_args.train_impl == "normal": + tokens, labels, loss_mask, attention_mask, position_ids = get_batch( + neox_args=neox_args, data_iterator=data_iterator + ) + elif neox_args.train_impl == "kto": + ( + tokens, + labels, + loss_mask, + attention_mask, + position_ids, + rewards, + ref_logp, + ) = get_batch(neox_args=neox_args, data_iterator=data_iterator) + if neox_args.train_impl in ["dpo", "rm"]: + tokens, labels, loss_mask, attention_mask, position_ids, ref_logp = get_batch( + neox_args=neox_args, data_iterator=data_iterator + ) + + if timers is not None: + timers("batch generator").stop() + if neox_args.memory_profiling: + torch.cuda.nvtx.range_pop() + + if neox_args.memory_profiling: + torch.cuda.nvtx.range_push(f"Forward pass") + metrics = {} + if neox_args.train_impl == "normal": + # Sequential returns moe_losses, but this is not yet supported by pipe parallel + maybe_tuple = model((tokens, position_ids, attention_mask), neox_args=neox_args) + if type(maybe_tuple) is tuple: + outputs, moe_losses = maybe_tuple + else: + outputs = maybe_tuple + moe_losses = [] + if ( + is_train + and neox_args.curriculum_learning + and neox_args.curriculum_seqlen < neox_args.seq_length + ): + loss_mask = loss_mask[:, : neox_args.curriculum_seqlen].contiguous() + labels = labels[:, : neox_args.curriculum_seqlen].contiguous() + main_loss = cross_entropy( + outputs, (labels, loss_mask), _fp16=neox_args.fp16_lm_cross_entropy + ) + if neox_args.moe_num_experts > 1: + if neox_args.moe_type == "deepspeed": + moe_loss = neox_args.moe_loss_coeff * sum(m.item() for m in moe_losses) + elif neox_args.moe_type == "megablocks": + moe_loss = mb_moe_loss_func(neox_args, loss_mask, outputs)[0] + else: + raise ValueError(f"Unsupported moe_type: {neox_args.moe_type}") + else: + moe_loss = 0.0 + loss = main_loss + moe_loss + elif neox_args.train_impl == "rm": + maybe_tuple = model((tokens, position_ids, attention_mask), neox_args=neox_args) + if type(maybe_tuple) is tuple: + outputs, _ = maybe_tuple + else: + outputs = maybe_tuple + pos, neg = torch.chunk(outputs, 2, 0) + pos_loss_mask, neg_loss_mask = torch.chunk(loss_mask, 2, 0) + # We assume that each pos, neg pair occur in the same order + # e.g. second nonzero pos is the corresponding second nonzero neg + # and that there are also an equal number of pos and neg in each sequence. + pos_indx = pos_loss_mask.nonzero() + neg_indx = neg_loss_mask.nonzero() + # indx[:, 0] is the batch index, indx[:, 1] is the token index, we only care about the token index. + pos_indx = pos_indx[:, 1].unsqueeze(1) + neg_indx = neg_indx[:, 1].unsqueeze(1) + pos = torch.gather(pos.squeeze(), dim=1, index=pos_indx) + neg = torch.gather(neg.squeeze(), dim=1, index=neg_indx) + with torch.no_grad(): + metrics["pos_values"] = pos.clone().detach().mean() + metrics["neg_values"] = neg.clone().detach().mean() + metrics["margin"] = (pos - neg).clone().detach().mean() + metrics["accuracy"] = ((pos - neg) > 0).clone().detach().float().mean() + loss = (-F.logsigmoid(pos - neg).mean()) + ( + (neox_args.z_loss * (pos**2 + neg**2)).mean() + ) + elif neox_args.train_impl == "dpo": + # Based on https://github.com/eric-mitchell/direct-preference-optimization/blob/main/trainers.py#L90 + with torch.inference_mode(): + # So we can gather token logps... + token_logp_labels = labels.clone() + pos_loss_mask, neg_loss_mask = torch.chunk(loss_mask, 2, 0) + if neox_args.dpo_reference_free: + ref_pos = 0 + ref_neg = 0 + elif ref_logp is None: + ref_maybe_tuple = reference_model( + (tokens, position_ids, attention_mask), neox_args=neox_args + ) + if type(ref_maybe_tuple) is tuple: + # We should ignore MoE losses yeah? + ref_outputs, _ = ref_maybe_tuple + else: + ref_outputs = ref_maybe_tuple + ref_pos, ref_neg = get_pos_neg_logp( + ref_outputs, token_logp_labels, neox_args.dpo_fp32 + ) + else: + ref_pos, ref_neg = torch.chunk(ref_logp, 2, 0) + ref_pos = (ref_pos * pos_loss_mask).sum(-1) + ref_neg = (ref_neg * neg_loss_mask).sum(-1) + chosen_maybe_tuple = model( + (tokens, position_ids, attention_mask), neox_args=neox_args + ) + if type(chosen_maybe_tuple) is tuple: + # We should ignore MoE losses yeah? + chosen_outputs, _ = chosen_maybe_tuple + else: + chosen_outputs = chosen_maybe_tuple + chosen_pos, chosen_neg = get_pos_neg_logp( + chosen_outputs, token_logp_labels, neox_args.dpo_fp32 + ) + chosen_pos = (chosen_pos * pos_loss_mask).sum(-1) + chosen_neg = (chosen_neg * neg_loss_mask).sum(-1) + with torch.no_grad(): + # Collect metrics... + if not neox_args.dpo_reference_free: + metrics["ref_neg"] = ref_neg.clone().detach().mean() + metrics["ref_pos"] = ref_pos.clone().detach().mean() + metrics["chosen_neg"] = chosen_neg.clone().detach().mean() + metrics["chosen_pos"] = chosen_pos.clone().detach().mean() + if not neox_args.dpo_reference_free: + chosen_rewards = neox_args.dpo_beta * ( + chosen_pos.clone().detach() - ref_pos.clone().detach() + ) + rejected_rewards = neox_args.dpo_beta * ( + chosen_neg.clone().detach() - ref_neg.clone().detach() + ) + metrics["chosen_rewards"] = chosen_rewards.mean() + metrics["rejected_rewards"] = rejected_rewards.mean() + reward_acc = (chosen_rewards > rejected_rewards).float() + metrics["reward_acc"] = reward_acc.mean() + metrics["margins"] = (chosen_rewards - rejected_rewards).mean() + pi_logrations = chosen_pos - chosen_neg + ref_logrations = ref_pos - ref_neg + logits = pi_logrations - ref_logrations + loss = -F.logsigmoid(neox_args.dpo_beta * logits).mean() + elif neox_args.train_impl == "kto": + # Based on https://github.com/huggingface/trl/blob/main/trl/trainer/kto_trainer.py + # Except we don't have an extra input for KL logp, we just split the batch in half + with torch.no_grad(): + # So we can gather token logps... + token_logp_labels = labels.clone() + token_logp_labels[token_logp_labels == -100] = 0 + if ref_logp is None: + # Did not precompute logits.... + ref_maybe_tuple = reference_model( + (tokens, position_ids, attention_mask), neox_args=neox_args + ) + if type(ref_maybe_tuple) is tuple: + # We should ignore MoE losses yeah? + ref_outputs, _ = ref_maybe_tuple + else: + ref_outputs = ref_maybe_tuple + # gather across tensor parallel group + ref_outputs = gather_from_model_parallel_region(ref_outputs) + + ref_logp = get_logp(ref_outputs, token_logp_labels, neox_args.kto_fp32) + else: + print(f"REF LOGP: {ref_logp.clone().detach().mean()}") + ref_logp = ref_logp * loss_mask + scaling = (rewards.sum(-1) > 0.001).float() * neox_args.kto_desirable_weight + scaling += ( + rewards.sum(-1) < -0.001 + ).float() * neox_args.kto_undesirable_weight + pos_mask = (rewards > 0.001).float() + neg_mask = (rewards < -0.001).float() + chosen_maybe_tuple = model( + (tokens, position_ids, attention_mask), neox_args=neox_args + ) + if type(chosen_maybe_tuple) is tuple: + # We should ignore MoE losses yeah? + chosen_outputs, _ = chosen_maybe_tuple + else: + chosen_outputs = chosen_maybe_tuple + chosen_outputs = gather_from_model_parallel_region(chosen_outputs) + chosen_logp = get_logp(chosen_outputs, token_logp_labels, neox_args.kto_fp32) + chosen_logp = chosen_logp * loss_mask + with torch.no_grad(): + # Collect metrics... + metrics["ref_logp"] = ref_logp.clone().detach().sum(-1).mean() + metrics["policy_logp"] = chosen_logp.clone().detach().sum(-1).mean() + metrics["pos_ref_logp"] = ( + (ref_logp * pos_mask).clone().detach().sum(-1).mean() + ) + metrics["neg_ref_logp"] = ( + (ref_logp * neg_mask).clone().detach().sum(-1).mean() + ) + metrics["pos_policy_logp"] = ( + (chosen_logp * pos_mask).clone().detach().sum(-1).mean() + ) + metrics["neg_policy_logp"] = ( + (chosen_logp * neg_mask).clone().detach().sum(-1).mean() + ) + metrics["kl"] = ( + chosen_logp.clone().detach() - ref_logp.clone().detach() + ).sum() / loss_mask.sum() + policy_rewards = ( + neox_args.kto_beta + * rewards + * (chosen_logp.clone().detach() - ref_logp.clone().detach()) + ) + reward_acc = (policy_rewards.sum(-1) > 0.0).float() + metrics["reward_acc"] = reward_acc.mean() + metrics["policy_rewards"] = policy_rewards.sum() + print(metrics) + pol_logp1, pol_logp2 = torch.chunk(chosen_logp, 2, 0) + ref_logp1, ref_logp2 = torch.chunk(ref_logp, 2, 0) + reward1, reward2 = torch.chunk(rewards, 2, 0) + scaling1, scaling2 = torch.chunk(scaling, 2, 0) + kl1 = torch.clamp((pol_logp1 - ref_logp1).sum(-1), min=0).mean() + kl2 = torch.clamp((pol_logp2 - ref_logp2).sum(-1), min=0).mean() + log_ratio1 = pol_logp1 - ref_logp1 + log_ratio2 = pol_logp2 - ref_logp2 + + # TODO: Add pack_until_overflow sequence support + loss = ( + 0.5 + * scaling1.mean(-1) + * ( + 1 + - F.sigmoid( + ( + neox_args.kto_beta + * reward1.mean(-1) + * (log_ratio1.sum(-1) - kl2.clone().detach()) + ) + ) + ) + ) + ( + 0.5 + * scaling2.mean(-1) + * ( + 1 + - F.sigmoid( + ( + neox_args.kto_beta + * reward2.mean(-1) + * (log_ratio2.sum(-1) - kl1.clone().detach()) + ) + ) + ) + ) + # print(loss.shape) + loss = loss.mean() + # print(loss.shape) + if neox_args.memory_profiling: + torch.cuda.nvtx.range_pop() + if return_logits: + return loss, outputs, metrics + return loss, metrics + + +def get_model(neox_args, use_cache=False): + """Build the model.""" + + # Build model on cpu. + print_rank_0("building GPT2 model ...") + + # Temporarily disable mup so that the base model does not use the mup init functions before set_base_shapes is called below. + # If mup isn't being used anyways, this has no effect. + old_use_mup = neox_args.use_mup + neox_args.use_mup = False + + if neox_args.zero_stage in [2, 3]: + if neox_args.pipe_parallel_size == 1: + print_rank_0( + "ZeRO stage 2/3 and the PipelineModule are incompatible, please set 'pipe_parallel_size' to 0 instead" + ) + exit() + if neox_args.pipe_parallel_size > 1: + print_rank_0( + "ZeRO stage 2/3 and pipeline paralleism are not supported simultaneously" + ) + exit() + if neox_args.model_parallel_size > 1: + print_rank_0( + "ZeRO stage 2/3 and model paralleism are not currently supported simultaneously" + ) + exit() + + with deepspeed.zero.Init( + config_dict_or_path=neox_args.deepspeed_config + ) if neox_args.zero_stage == 3 else nullcontext() as gs: + model = GPT2ModelPipe( + neox_args=neox_args, + num_tokentypes=0, + parallel_output=True if neox_args.train_impl != "rm" else False, + topology=mpu.get_topology(), + use_cache=use_cache, + ) + + ### soft prompt tuning stuff ### + if neox_args.soft_prompt_tuning is not None and neox_args.soft_prompt_tuning.get( + "enabled", False + ): + soft_prompt = SoftEmbedding( + neox_args, + wte=getattr(model, "0").word_embeddings, + n_tokens=neox_args.soft_prompt_tuning.get("n_tokens", 10), + init_string=neox_args.soft_prompt_tuning.get("init_string", ""), + init_range=neox_args.soft_prompt_tuning.get("init_range", 0.5), + ) + model.insert_layers( + layers=soft_prompt, idx=1 + ) # insert the soft prompt layer directly after the word embeddings + + # freeze everything but the soft prompt + for name, param in model.named_parameters(): + if not "soft_embedding" in name: + param.requires_grad = False + + if not neox_args.is_pipe_parallel: + # Export PipeParallel model to nn.Sequential model to avoid the overhead of deepspeed's pipe parallel training + model = model.to_sequential() + + neox_args.use_mup = old_use_mup + + if neox_args.use_mup: + try: + import mup + except ModuleNotFoundError: + print("Please install mup https://github.com/microsoft/mup") + raise Exception + + base_shapes = f"{neox_args.base_shapes_file}.{torch.distributed.get_rank()}" + + if neox_args.save_base_shapes: + save_base_shapes(neox_args, base_shapes, use_cache) + + mup.set_base_shapes(model, base_shapes) + + # Call the mup replacement init functions on the model now that set_base_shapes has given each weight a .infshape attribute + mup_weights_reinit(neox_args, model) + + if neox_args.deepspeed: + # DeepSpeed handles CUDA, FP16, and DDP components. + return model + else: + raise ValueError("Must be using deepspeed to run neox") + + +def get_optimizer(model, neox_args, dummy=False): + """Set up the optimizer.""" + if neox_args.no_load_optim and neox_args.deepspeed: + # Required to have something so... + dummy = True + neox_args.optimizer = {"params": {"lr": 0.0}} + neox_args.optimizer_type = "adam" + elif neox_args.no_load_optim: + return None, None + + if neox_args.optimizer is None: + print_rank_0( + f"ERROR: Optimizer is None. Either set the optimizer dict in your config (if training) or set no_load_optim in your config (if inference)" + ) + exit() + # Build parameter groups (weight decay and non-decay). + param_groups = get_params_for_weight_decay_optimization(model, neox_args) + print_rank_0( + f'Configuring Optimizer type: {neox_args.optimizer_type} with params: {neox_args.optimizer["params"]}' + ) + + if neox_args.create_moe_param_group: + from deepspeed.moe.utils import ( + is_moe_param, + split_params_into_different_moe_groups_for_optimizer, + ) + + param_groups = split_params_into_different_moe_groups_for_optimizer( + param_groups + ) + + # Add model parallel attribute if it is not set. + for param_group in param_groups: + for param in param_group["params"]: + if not hasattr(param, "model_parallel"): + param.model_parallel = False + + # Filter out params that don't require a grad (for soft prompt tuning, etc.) + _param_groups = [] + for param_group in param_groups: + trainable_params = [p for p in param_group["params"] if p.requires_grad] + if dummy: + trainable_params = [trainable_params[0]] # just take the first one + param_group["params"] = trainable_params + _param_groups.append(param_group) + if dummy: + # Only need one. + break + param_groups = _param_groups + + # If we're using mup, then the optimizer must be adam or sgd + assert not neox_args.use_mup or ( + neox_args.optimizer_type.lower() == "adam" + or neox_args.optimizer_type.lower() == "sgd" + ), f"If use_mup == True, you must specify either the adam or sgd optimizers. You passed: {neox_args.optimizer_type.lower()}" + + if neox_args.optimizer_type.lower() in ["cpu_adam", "cpu_torch_adam"]: + if neox_args.optimizer == "cpu_torch_adam": + cpu_adam_optimizer = torch.optim.Adam + else: + from deepspeed.ops.adam import DeepSpeedCPUAdam + + cpu_adam_optimizer = DeepSpeedCPUAdam + optimizer = cpu_adam_optimizer( + param_groups, + weight_decay=neox_args.weight_decay, + **neox_args.optimizer["params"], + ) + elif neox_args.optimizer_type.lower() == "onebitadam": + assert neox_args.deepspeed + optimizer = None + # onebitadam needs to be instantiated within the deepspeed engine to work :| + elif neox_args.optimizer_type.lower() == "sm3": + from .optimizers import SM3 + + optimizer = SM3(param_groups, **neox_args.optimizer["params"]) + elif neox_args.optimizer_type.lower() == "madgrad_wd": + from .optimizers import madgrad_wd + + optimizer = madgrad_wd( + param_groups, + weight_decay=neox_args.weight_decay, + **neox_args.optimizer["params"], + ) + elif neox_args.optimizer_type.lower() == "lion": + # if we want the deepspeed zero lion...megatron lion will throw DeepSpeed Error + if neox_args.zero_optimization["stage"] != 0: + from deepspeed.ops.lion import FusedLion + + lion_optimizer = FusedLion + # if not zero + else: + from .optimizers import Lion + + lion_optimizer = Lion + + optimizer = lion_optimizer( + param_groups, + weight_decay=neox_args.weight_decay, + **neox_args.optimizer["params"], + ) + elif neox_args.optimizer_type.lower() == "adam": + # Use Adam + if neox_args.use_mup: + try: + from mup import MuAdam + + adam_optimizer = MuAdam + except ModuleNotFoundError: + print("Please install mup https://github.com/microsoft/mup") + raise Exception + else: + if neox_args.use_bnb_optimizer: + try: + import bitsandbytes as bnb + + adam_optimizer = bnb.optim.Adam8bit + except ModuleNotFoundError: + print( + "Please install bitsandbytes following https://github.com/facebookresearch/bitsandbytes." + ) + raise Exception + else: + try: + # default to apex as it's slightly faster + from apex.optimizers import FusedAdam as Adam + except ImportError: + # if apex isn't installed, use deepspeed's FusedAdam + print( + "WARNING: APEX not installed - defaulting to deepspeed's fused adam" + ) + from deepspeed.ops.adam import FusedAdam as Adam + adam_optimizer = Adam + optimizer = adam_optimizer( + param_groups, + weight_decay=neox_args.weight_decay, + **neox_args.optimizer["params"], + ) + elif neox_args.optimizer_type.lower() == "sgd": + try: + from mup import MuSGD + except ModuleNotFoundError: + print("Please install mup https://github.com/microsoft/mup") + raise Exception + optimizer = MuSGD( + param_groups, + weight_decay=neox_args.weight_decay, + **neox_args.optimizer["params"], + ) + else: + raise ValueError(f"Optimizer type {neox_args.optimizer_type} not recognized") + + if neox_args.deepspeed: + # fp16 wrapper is not required for DeepSpeed. + return optimizer, param_groups + else: + raise ValueError("Must be using deepspeed to run neox") + + +def get_learning_rate_scheduler(optimizer, neox_args): + """Build the learning rate scheduler.""" + if (neox_args.no_load_optim) and not neox_args.deepspeed: + # TODO: this should be configured as a separate arg + return None + if neox_args.deepspeed and neox_args.optimizer_type.lower() == "onebitadam": + print_rank_0( + "WARNING: onebitadam requires the lr scheduler be built by deepspeed - " + "Make sure one is added to your deepspeed config" + ) + return None + + # Add linear learning rate scheduler. + if neox_args.lr_decay_iters is not None: + num_iters = neox_args.lr_decay_iters + elif neox_args.lr_decay_fraction is not None: + num_iters = math.floor(neox_args.train_iters * neox_args.lr_decay_fraction) + else: + num_iters = neox_args.train_iters + num_iters = max(1, num_iters) + init_step = 0 + warmup_iter = neox_args.warmup * num_iters + lr_scheduler = AnnealingLR( + optimizer, + start_lr=neox_args.lr, + warmup_iter=warmup_iter, + total_iters=num_iters, + decay_style=neox_args.lr_decay_style, + last_iter=init_step, + min_lr=neox_args.min_lr, + use_checkpoint_lr_scheduler=neox_args.use_checkpoint_lr_scheduler, + override_lr_scheduler=neox_args.override_lr_scheduler, + use_mup=neox_args.use_mup, + ) + + return lr_scheduler + + +def setup_model_and_optimizer(neox_args, use_cache=False, iteration=None): + """Setup memory profiler""" + if neox_args.memory_profiling: + torch.cuda.memory._record_memory_history( + True, + # keep a maximum 100,000 alloc/free events from before the snapshot + trace_alloc_max_entries=100000, + trace_alloc_record_context=True, + ) + + """Setup model and optimizer.""" + needs_reference_model = ( + (neox_args.train_impl == "dpo") + and (neox_args.precompute_model_name is None) + and (not neox_args.dpo_reference_free) + ) or ((neox_args.train_impl == "kto") and (neox_args.precompute_model_name is None)) + model = get_model(neox_args=neox_args, use_cache=use_cache) + if needs_reference_model: + reference_model = get_model(neox_args=neox_args, use_cache=use_cache) + else: + reference_model = None + optimizer, param_groups = get_optimizer(model=model, neox_args=neox_args) + lr_scheduler = get_learning_rate_scheduler(optimizer=optimizer, neox_args=neox_args) + if neox_args.deepspeed and needs_reference_model: + # Need an optimizer & lr_scheduler so make a very small one to keep deepspeed happy... + ref_optimizer, ref_param_groups = get_optimizer( + model=reference_model, neox_args=neox_args, dummy=True + ) + ref_lr_scheduler = get_learning_rate_scheduler( + optimizer=ref_optimizer, neox_args=neox_args + ) + else: + ref_optimizer, ref_param_groups, ref_lr_scheduler = None, None, None + if neox_args.deepspeed: + print_rank_0("DeepSpeed is enabled.") + _model_params = param_groups if optimizer is None else None + _lr_scheduler = lr_scheduler + + model, optimizer, _, lr_scheduler = deepspeed.initialize( + model=model, + optimizer=optimizer, + args=neox_args, + lr_scheduler=_lr_scheduler, + dist_init_required=False, + model_parameters=_model_params, + # Need to remove the below so that it doesn't conflict with --deepspeed_config required by autotuning + # config_params=neox_args.deepspeed_config, + mpu=mpu if not neox_args.is_pipe_parallel else None, + ) + if needs_reference_model: + reference_model, _, _, _ = deepspeed.initialize( + model=reference_model, + optimizer=ref_optimizer, + args=neox_args, + lr_scheduler=ref_lr_scheduler, + dist_init_required=False, + model_parameters=ref_param_groups, + mpu=mpu if not neox_args.is_pipe_parallel else None, + ) + mark_norms_for_sequence_parallel_grad_sync(model, neox_args) + if neox_args.moe_num_experts > 1 and neox_args.moe_type == "megablocks": + # We need to additionally set this flag to ensure DS parallelism properly handles this foreign MoE. + model.has_moe_layers = True + model.total_params = get_total_params(model.module) + print_rank_0(f' > total params: {"{:,}".format(model.total_params)}') + + if neox_args.is_pipe_parallel: + model.set_has_attention_mask(True) + if neox_args.curriculum_learning: + curr_scheduler = CurriculumScheduler(neox_args.curriculum_learning) + if iteration is not None and iteration > 0: + curr_scheduler.update_difficulty(iteration) + else: + curr_scheduler = None + model.set_batch_fn( + partial( + get_batch_pipe, neox_args=neox_args, curr_scheduler=curr_scheduler + ) + ) + else: + model.module.set_batch_fn( + partial(get_batch_sequential, neox_args=neox_args) + ) + + else: + raise ValueError("Must be using deepspeed to run neox") + + if neox_args.load is not None: + neox_args.iteration = load_checkpoint( + neox_args=neox_args, + model=model, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + iteration=iteration, + ) + if needs_reference_model: + _ = load_checkpoint( + neox_args=neox_args, + model=reference_model, + optimizer=ref_optimizer, + lr_scheduler=ref_lr_scheduler, + iteration=iteration, + ) + reference_model.eval() + print_rank_0( + f"Loading checkpoint and starting from iteration {neox_args.iteration}" + ) + else: + neox_args.iteration = 0 + + # need this for correct lr scheduling resume from ckpt + # but it will not exist if this is being called for inference + if lr_scheduler is not None: + lr_scheduler.optimizer = model.optimizer + + return model, optimizer, lr_scheduler, reference_model + + +def backward_step(neox_args, timers, optimizer, model, loss): + """Backward step.""" + + # Backward pass. + timers("backward-backward").start() + if neox_args.deepspeed: + model.backward(loss) + else: + raise ValueError("Must be using deepspeed to run neox") + timers("backward-backward").stop() + + if neox_args.deepspeed: + # DeepSpeed backward propagation already addressed all reduce communication. + # Reset the timer to avoid breaking timer logs below. + timers("backward-allreduce").reset() + else: + raise ValueError("Must be using deepspeed to run neox") + + +def train_step( + neox_args, + timers, + data_iterator, + model, + optimizer, + lr_scheduler, + reference_model=None, +): + """Single training step.""" + + # Pipeline parallelism schedules forward/backward/step + if neox_args.is_pipe_parallel: + reduced_loss = train_step_pipe( + neox_args=neox_args, timers=timers, model=model, data_iterator=data_iterator + ) + reduce_metrics = reduced_loss + if ( + neox_args.memory_profiling + and neox_args.iteration >= neox_args.profile_step_start + and neox_args.iteration <= neox_args.profile_step_stop + and torch.distributed.get_rank() == 0 + ): + save_snapshot(neox_args) + else: + losses = [] + metric_dicts = defaultdict(list) + for _ in range(neox_args.gradient_accumulation_steps): + # Forward model for one step. + timers("forward").start() + loss, metric_dict = forward_step( + neox_args=neox_args, + timers=timers, + data_iterator=data_iterator, + model=model, + is_train=True, + reference_model=reference_model, + ) + timers("forward").stop() + losses.append(loss) + for key in metric_dict.keys(): + metric_dicts[key].append(metric_dict[key]) + # Calculate gradients, reduce across processes, and clip. + if ( + neox_args.profile + and neox_args.iteration >= neox_args.profile_step_start + and neox_args.iteration <= neox_args.profile_step_stop + ): + torch.cuda.nvtx.range_push(f"Backward pass") + timers("backward").start() + backward_step( + neox_args=neox_args, + timers=timers, + optimizer=optimizer, + model=model, + loss=loss, + ) + timers("backward").stop() + if ( + neox_args.profile + and neox_args.iteration >= neox_args.profile_step_start + and neox_args.iteration <= neox_args.profile_step_stop + ): + torch.cuda.nvtx.range_pop() + # Update parameters. + if ( + neox_args.profile + and neox_args.iteration >= neox_args.profile_step_start + and neox_args.iteration <= neox_args.profile_step_stop + ): + torch.cuda.nvtx.range_push(f"Optimizer step") + + timers("optimizer").start() + if neox_args.deepspeed: + model.step() + else: + raise ValueError("Must be using deepspeed to run neox") + timers("optimizer").stop() + if ( + neox_args.profile + and neox_args.iteration >= neox_args.profile_step_start + and neox_args.iteration <= neox_args.profile_step_stop + ): + torch.cuda.nvtx.range_pop() + if ( + neox_args.profile + and neox_args.iteration >= neox_args.profile_step_start + and neox_args.iteration <= neox_args.profile_step_stop + and torch.distributed.get_rank() == 0 + ): + save_snapshot(neox_args) + # reduces metrics across machines for logging + reduce_metrics = { + key: reduce_losses(metric_dicts[key]).mean() for key in metric_dicts.keys() + } + reduce_metrics["lm_loss"] = reduce_losses(losses).mean() + + if neox_args.precision == "fp16" and model.optimizer.overflow: + skipped_iter = 1 + else: + skipped_iter = 0 + + collect_loss_for_unit_test(reduce_metrics["lm_loss"]) + return reduce_metrics, skipped_iter + + +def train_step_pipe(neox_args, timers, model, data_iterator): + """Single training step with DeepSpeed's pipeline parallel engine.""" + + assert neox_args.deepspeed + loss = model.train_batch(data_iter=data_iterator) + loss_dict = {"lm_loss": loss} + # Don't break Megatron's timers because we changed code paths. + for t in [ + "forward", + "backward", + "allreduce", + "optimizer", + "batch generator", + "data loader", + ]: + timers(t).reset() + return loss_dict + + +def is_save_iter(neox_args, iteration): + if neox_args.extra_save_iters and iteration in neox_args.extra_save_iters: + return True + + if neox_args.checkpoint_factor: + if neox_args.checkpoint_scale == "linear": + assert float( + neox_args.checkpoint_factor + ).is_integer(), "checkpoint_factor must be a whole number when using linear checkpoint_scale" + return iteration % neox_args.checkpoint_factor == 0 + elif neox_args.checkpoint_scale == "log": + # Check if iteration is a power of checkpoint_factor + assert neox_args.checkpoint_factor > 1 + power = 1 + while power < iteration + 1: + if int(power) == iteration: + return True + power *= neox_args.checkpoint_factor + return False + + return False + + +def train( + neox_args, + timers, + model, + reference_model, + optimizer, + lr_scheduler, + train_data_iterator, + valid_data_iterator, +): + """Train the model function.""" + + # Turn on training mode which enables dropout. + model.train() + + # Tracking loss. + total_loss_dict = {} + + # Iterations. + iteration = neox_args.iteration + + timers("interval time").start() + report_memory_flag = True + + # get noise scale logger (if neox_args.log_gradient_noise_scale is True) + noise_scale_logger = get_noise_scale_logger(neox_args) + + # to monitor if we've skipped many iterations in a row and trigger an early exit + overflow_monitor = OverflowMonitor(optimizer) + + if neox_args.profile: + schedule = torch.profiler.schedule( + wait=neox_args.profile_step_start, + warmup=1, + active=neox_args.profile_step_stop - neox_args.profile_step_start, + ) + prof = torch.profiler.profile( + schedule=schedule, + on_trace_ready=torch.profiler.tensorboard_trace_handler( + neox_args.tensorboard_dir + ), + record_shapes=True, + profile_memory=True, + with_flops=True, + with_modules=True, + with_stack=True, + ) + prof.start() + while iteration < neox_args.train_iters: + if neox_args.profile: + prof.step() + if neox_args.profile and iteration == neox_args.profile_step_start: + torch.cuda.cudart().cudaProfilerStart() + loss_dict, skipped_iter = train_step( + neox_args=neox_args, + timers=timers, + data_iterator=train_data_iterator, + model=model, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + reference_model=reference_model, + ) + if neox_args.profile and iteration == neox_args.profile_step_stop: + torch.cuda.cudart().cudaProfilerStop() + prof.stop() + iteration += 1 + neox_args.iteration = iteration + if neox_args.precision == "fp16": + overflow_monitor.check(skipped_iter) # check for repeated overflow + if neox_args.log_gradient_noise_scale: # log noise scale if applicable + noise_scale_logger.update() + + # get learning rate (if present) - if doing soft prompt tuning + pipe parallel, you + # may have no tunable parameters on a specific rank + if optimizer.param_groups: + lr = optimizer.param_groups[0].get("lr", 0) + else: + lr = 0 + + # Logging. + report_memory_flag = training_log( + neox_args=neox_args, + timers=timers, + loss_dict=loss_dict, + total_loss_dict=total_loss_dict, + learning_rate=lr, + iteration=iteration, + loss_scale=optimizer.cur_scale if neox_args.precision == "fp16" else None, + report_memory_flag=report_memory_flag, + skipped_iter=skipped_iter, + model=model, + optimizer=optimizer, + noise_scale_logger=noise_scale_logger, + ) + + # Checkpointing + if neox_args.save and is_save_iter(neox_args, iteration): + save_checkpoint( + neox_args=neox_args, + iteration=iteration, + model=model, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + ) + # Evaluation + if ( + neox_args.eval_interval + and iteration % neox_args.eval_interval == 0 + and neox_args.do_valid + ): + prefix = "iteration {}".format(iteration) + evaluate_and_print_results( + neox_args=neox_args, + prefix=prefix, + forward_step_func=forward_step, + data_iterator=valid_data_iterator, + model=model, + iteration=iteration, + verbose=False, + timers=timers, + reference_model=reference_model, + ) + + if neox_args.exit_interval and iteration % neox_args.exit_interval == 0: + torch.distributed.barrier() + time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + rank = torch.distributed.get_rank() + print_rank_0( + "rank: {} | time: {} | exiting the program at iteration {}".format( + rank, time_str, iteration + ) + ) + sys.exit() + + return iteration + + +def evaluate( + neox_args, + forward_step_fn, + data_iterator, + model, + verbose=False, + timers=None, + reference_model=None, +): + """Evaluation. + neox_args: NeoX Arguments + forward_step_fn: function with args `neox_args, timers, + data_iterator & model that will run a forward pass on the model + data_iterator: Iterator that iterates over batches of data. Should return data in the form: + {'text': np.array([tokens], dtype=np.int64)} + where the size of the array is the model's context size + 1 + (`get_batch` transforms it into inputs / labels) + """ + # Turn on evaluation mode which disables dropout. + model.eval() + losses = [] + metric_dicts = defaultdict(list) + if neox_args.char_level_ppl: + data_iterator = CharCounter(data_iterator, neox_args.tokenizer) + + with torch.no_grad(): + iteration = 0 + while iteration < neox_args.eval_iters: + iteration += 1 + if verbose and iteration % neox_args.log_interval == 0: + print_rank_0( + "Evaluating iter {}/{}".format(iteration, neox_args.eval_iters) + ) + + # although we're not accumulating gradients here, we count one iter as train_batch_size_per_gpu * g.a.s + # to be consistent with deepspeed's pipe parallel engine + # since pipe parallel already takes gradient_accumulation_steps into account - default to 1 here if pipe parallel is true + for _ in range( + 1 + if neox_args.is_pipe_parallel + else neox_args.gradient_accumulation_steps + ): + # Forward evaluation + loss, metric_dict = forward_step_fn( + model=model, + data_iterator=data_iterator, + neox_args=neox_args, + timers=timers, + reference_model=reference_model, + ) + losses.append(loss) + for key in metric_dict.keys(): + metric_dicts[key].append(metric_dict[key]) + # When contiguous memory optimizations are enabled, the buffers + # allocated by the optimizations are deallocated during backward pass + # in the absence of backward pass the buffers should be reset after each + # forward pass + if neox_args.deepspeed and neox_args.deepspeed_activation_checkpointing: + deepspeed.checkpointing.reset() + + # reduces losses across processes for logging & run eval harness tasks + eval_results = {"lm_loss": reduce_losses(losses).mean().item()} + for key in metric_dicts.keys(): + eval_results[key] = reduce_losses(metric_dicts[key]).mean().item() + eval_results["lm_loss_ppl"] = math.exp(eval_results["lm_loss"]) + + if neox_args.char_level_ppl: + # calculate character level perplexity, if specified + # if neox_args.char_level_ppl: + # unwrap the data_iterator + tokens_per_char = data_iterator.tokens_per_char() + print_rank_0(f"Counting chars took {data_iterator.total_time} seconds") + + data_iterator = data_iterator.data_iterator + eval_results["lm_loss_char_lvl_ppl"] = math.exp( + eval_results["lm_loss"] * tokens_per_char + ) + + if neox_args.eval_tasks: + from eval_tasks import run_eval_harness + + eval_results.update( + run_eval_harness( + model, forward_step_fn, neox_args, eval_tasks=neox_args.eval_tasks + ).get("results") + ) + # Move model back to the train mode. + model.train() + return eval_results + + +def collect_loss_for_unit_test(lm_ss): + # Logic moved to separate function to allow tracking in unit tests with unittest.mock.patch + pass + + +def evaluate_and_print_results( + neox_args, + prefix, + forward_step_func, + data_iterator, + model, + iteration, + verbose=False, + timers=None, + chart_name="validation", + reference_model=None, +): + """Helper function to evaluate and dump results on screen.""" + total_loss_dict = evaluate( + neox_args=neox_args, + forward_step_fn=forward_step_func, + data_iterator=data_iterator, + model=model, + verbose=verbose, + timers=timers, + reference_model=reference_model, + ) + string = f" {chart_name} results at {prefix} | " + for k, v in total_loss_dict.items(): + if isinstance(v, dict): + if neox_args.eval_tasks and "results" in v: + v = v["results"] + print(v) + for k2, v2 in v.items(): + k3 = "_".join([k, k2]) + string += f"{k3} value: {v2:.6E} | " + tb_wandb_log( + f"{chart_name}/{k3}", + v2, + iteration, + use_wandb=neox_args.use_wandb, + tensorboard_writer=neox_args.tensorboard_writer, + comet_experiment=neox_args.comet_experiment, + ) + else: + string += f"{k} value: {v:.6E} | " + tb_wandb_log( + f"{chart_name}/{k}", + v, + iteration, + use_wandb=neox_args.use_wandb, + tensorboard_writer=neox_args.tensorboard_writer, + comet_experiment=neox_args.comet_experiment, + ) + + length = len(string) + 1 + print_rank_0("-" * length) + print_rank_0(string) + print_rank_0("-" * length) + + +def save_snapshot(neox_args): + assert ( + neox_args.memory_profiling_path is not None + ), "Must pass memory_profiling_path config arg to use profiling" + snapshot = torch.cuda.memory._snapshot() + snapshot_path = os.path.join(neox_args.memory_profiling_path) + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + with open(os.path.join(snapshot_path, "mem_snapshot.pickle"), "wb") as f: + dump(snapshot, f) diff --git a/megatron/utils.py b/megatron/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..507c44179a4855972c898c11da4b5752e039f56b --- /dev/null +++ b/megatron/utils.py @@ -0,0 +1,559 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""General utilities.""" +import os +import sys +import re +import time +import socket +from typing import Dict, List + +import requests + +try: + import wandb +except ModuleNotFoundError: + pass + +import torch + +from deepspeed.launcher.runner import fetch_hostfile, parse_inclusion_exclusion +from deepspeed.runtime.bf16_optimizer import BF16_Optimizer + +from megatron import print_rank_0 +from megatron import mpu + +from collections import deque + + +def reduce_losses(losses): + """Reduce a tensor of losses across all GPUs.""" + reduced_losses = torch.cat([loss.clone().detach().view(1) for loss in losses]) + torch.distributed.all_reduce(reduced_losses) + reduced_losses = reduced_losses / torch.distributed.get_world_size() + return reduced_losses + + +def report_memory(name): + """Simple GPU memory report.""" + mega_bytes = 1024.0 * 1024.0 + string = name + " memory (MB)" + string += " | allocated: {}".format(torch.cuda.memory_allocated() / mega_bytes) + string += " | max allocated: {}".format( + torch.cuda.max_memory_allocated() / mega_bytes + ) + string += " | reserved: {}".format(torch.cuda.memory_reserved() / mega_bytes) + string += " | max reserved: {}".format( + torch.cuda.max_memory_reserved() / mega_bytes + ) + print_rank_0(string) + + +def get_attn_mask(seq_length, device, sliding_window_width): + """ + Get triangular attention mask for a given sequence length / device. + """ + # lower triangular attention mask + mask = torch.tril(torch.ones((1, seq_length, seq_length), device=device)).view( + 1, 1, seq_length, seq_length + ) + # get rid of lower diagonals than the sliding window width, if a value was provided + if sliding_window_width is not None: + mask = torch.triu(mask, diagonal=-sliding_window_width) + + # convert to binary + return mask < 0.5 + + +def get_ltor_masks_and_position_ids( + data, + eod_token, + eod_mask_loss=False, + sliding_window_width=None, +): + """Build masks and position id for left to right model.""" + + # Extract batch size and sequence length. + batch_size, seq_length = data.size() + + # Attention mask (lower triangular). + attention_mask = get_attn_mask( + seq_length=seq_length, + device=data.device, + sliding_window_width=sliding_window_width, + ) + + # Loss mask. + loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device) + if eod_mask_loss: + loss_mask[data == eod_token] = 0.0 + + # Position ids. + position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device) + position_ids = position_ids.unsqueeze(0).expand_as(data) + + return attention_mask, loss_mask, position_ids + + +def local_rank(): + """Local rank of process""" + local_rank = os.environ.get("LOCAL_RANK") + + if local_rank is None: + local_rank = os.environ.get("SLURM_LOCALID") + + if local_rank is None: + print( + "utils.local_rank() environment variable LOCAL_RANK not set, defaulting to 0", + flush=True, + ) + local_rank = 0 + return int(local_rank) + + +def is_bnb_available(): + """True if bitsandbytes optimizers are available""" + return importlib.util.find_spec("bitsandbytes") is not None + + +def is_local_main(): + """True if is the local main process""" + return local_rank() == 0 + + +def is_mp_rank_0(): + """True if mp rank == 0""" + return mpu.get_model_parallel_rank() == 0 + + +def get_wandb_api_key(neox_args): + """Get Weights and Biases API key from ENV or .netrc file. Otherwise return None""" + if "WANDB_LOCAL" in os.environ: + return "LOCAL" + if "WANDB_API_KEY" in os.environ: + return os.environ["WANDB_API_KEY"] + + wandb_token = requests.utils.get_netrc_auth(neox_args.wandb_host) + + if wandb_token is not None: + return wandb_token[1] + + +def init_wandb(neox_args): + # Wandb. (one worker per machine) + if neox_args.use_wandb == False: + return + + if not neox_args.wandb_init_all_ranks: + use_wandb = is_local_main() and ( + get_wandb_api_key(neox_args=neox_args) is not None + ) + neox_args.update_value("use_wandb", use_wandb) + if neox_args.use_wandb: + group_name = neox_args.wandb_group + name = f"{socket.gethostname()}-{local_rank()}" if group_name else None + try: + wandb.init( + project=neox_args.wandb_project, + group=group_name, + name=name, + save_code=False, + force=False, + entity=neox_args.wandb_team, + ) + except wandb.UsageError as e: + neox_args.update_value("use_wandb", False) + print(e) + print( + "Skipping wandb. Execute `wandb login` on local or main node machine to enable.", + flush=True, + ) + wandb.config.update(neox_args.all_config) + + +def obtain_resource_pool( + hostfile_path, include_arg, exclude_arg +) -> Dict[str, List[int]]: + """ + Get dict of `resource_pool[hostname] = [list of GPU ranks]` using hostfile, include and exclude args. + Modified from: `deepspeed.launcher.runner.main` + """ + resource_pool = fetch_hostfile(hostfile_path) + if not resource_pool: + resource_pool = {} + device_count = torch.cuda.device_count() + if device_count == 0: + raise RuntimeError("Unable to proceed, no GPU resources available") + resource_pool["localhost"] = device_count + + active_resources = parse_inclusion_exclusion( + resource_pool, include_arg, exclude_arg + ) + return active_resources + + +def natural_sort(l): + convert = lambda text: int(text) if text.isdigit() else text.lower() + alphanum_key = lambda key: [convert(c) for c in re.split("([0-9]+)", key)] + return sorted(l, key=alphanum_key) + + +def ddb(rank=0): + """ + Distributed Debugger that will insert a py debugger on rank `rank` and + pause all other distributed processes until debugging is complete. + :param rank: + """ + if torch.distributed.get_rank() == rank: + from pdb import Pdb + + pdb = Pdb(skip=["torch.distributed.*"]) + pdb.set_trace(sys._getframe().f_back) + torch.distributed.barrier() + + +class Timer: + """Timer.""" + + def __init__(self, name): + self.name_ = name + self.elapsed_ = 0.0 + self.started_ = False + self.start_time = time.time() + + def start(self): + """Start the timer.""" + assert not self.started_, "timer has already been started" + torch.cuda.synchronize() + self.start_time = time.time() + self.started_ = True + + def stop(self): + """Stop the timer.""" + assert self.started_, "timer is not started" + torch.cuda.synchronize() + self.elapsed_ += time.time() - self.start_time + self.started_ = False + + def reset(self): + """Reset timer.""" + self.elapsed_ = 0.0 + self.started_ = False + + def elapsed(self, reset=True): + """Calculate the elapsed time.""" + started_ = self.started_ + # If the timing in progress, end it first. + if self.started_: + self.stop() + # Get the elapsed time. + elapsed_ = self.elapsed_ + # Reset the elapsed time + if reset: + self.reset() + # If timing was in progress, set it back. + if started_: + self.start() + return elapsed_ + + +class Timers: + """Group of timers.""" + + def __init__(self, use_wandb, tensorboard_writer, comet_experiment): + self.timers = {} + self.use_wandb = use_wandb + self.tensorboard_writer = tensorboard_writer + self.comet_experiment = comet_experiment + + def __call__(self, name): + if name not in self.timers: + self.timers[name] = Timer(name) + return self.timers[name] + + def write(self, names, iteration, normalizer=1.0, reset=False): + """Write timers to a tensorboard writer""" + # currently when using add_scalars, + # torch.utils.add_scalars makes each timer its own run, which + # pollutes the runs list, so we just add each as a scalar + assert normalizer > 0.0 + for name in names: + value = self.timers[name].elapsed(reset=reset) / normalizer + + if self.tensorboard_writer: + self.tensorboard_writer.add_scalar(f"timers/{name}", value, iteration) + + if self.use_wandb: + wandb.log({f"timers/{name}": value}, step=iteration) + + if self.comet_experiment: + self.comet_experiment.__internal_api__log_metric__( + f"timers/{name}", + value, + framework="gpt-neox", + step=iteration, + ) + + def log(self, names, normalizer=1.0, reset=True): + """Log a group of timers.""" + assert normalizer > 0.0 + string = "time (ms)" + for name in names: + elapsed_time = self.timers[name].elapsed(reset=reset) * 1000.0 / normalizer + string += " | {}: {:.2f}".format(name, elapsed_time) + if torch.distributed.is_initialized(): + if torch.distributed.get_rank() == 0: + print(string, flush=True) + else: + print(string, flush=True) + + +def expand_attention_types(attention_config, num_layers): + """ + Expands an `attention_config` list in the following format: + + [ + [['attention_type_1', ..., `attention_type_n`], 12] + ] + + to a flattened list of length `num_layers`. + + :param params_list: + :return: + """ + # if only strings are found in the config, we assume it's already expanded + if all([isinstance(i, str) for i in attention_config]): + return attention_config + newlist = [] + for item in attention_config: + # instead of specifying a number - we can specify 'all' to extend this pattern across all layers + if item[1] == "all": + assert num_layers % len(item[0]) == 0, ( + f"Number of layers ({num_layers}) is not divisible by the length " + f"of pattern: {item[0]}" + ) + return item[0] * (num_layers // len(item[0])) + for _ in range(item[1]): + newlist.extend(item[0]) + return newlist + + +class OverflowMonitor: + + """ + Checks if the past n iterations have been skipped due to overflow, and exits + training if that happens. + """ + + def __init__(self, optimizer, n=50): + self.optimizer = optimizer + self.n = n + self.history = deque(maxlen=n) + self.bf16 = isinstance(optimizer, BF16_Optimizer) + + def check(self, skipped): + if self.bf16: + return + self.history.append(skipped) + if ( + self.optimizer.overflow + and len(self.history) == self.n + and all(self.history) + ): + raise Exception( + f"Skipped {self.n} iterations in a row due to Overflow - Exiting training." + ) + + +def get_noise_scale_logger(neox_args): + if neox_args.log_gradient_noise_scale: + if neox_args.zero_stage >= 1: + raise NotImplementedError( + "Gradient Noise Scale logging does not work with zero stage 2+, as the " + "gradients are distributed across ranks." + ) + noise_scale_logger = GradientNoiseScale( + model=model, + batch_size_small=neox_args.train_batch_size, + n_batches=neox_args.gradient_noise_scale_n_batches, + cpu_offload=neox_args.gradient_noise_scale_cpu_offload, + neox_args=neox_args, + mpu=mpu, + ) + else: + noise_scale_logger = None + return noise_scale_logger + + +def get_total_params(model): + # Print number of parameters. + if mpu.get_data_parallel_rank() == 0: + params = sum([p.nelement() for p in model.parameters()]) + print( + " > number of parameters on model parallel rank {}: {}".format( + mpu.get_model_parallel_rank(), params + ), + flush=True, + ) + else: + params = 0 + + total_n_parameters = torch.tensor([params]).cuda(torch.cuda.current_device()) + torch.distributed.all_reduce(total_n_parameters) + total_n_parameters = total_n_parameters.item() + return total_n_parameters + + +def setup_for_inference_or_eval(use_cache=True, overwrite_values=None, input_args=None): + """ + Initializes the model for evaluation or inference (doesn't load optimizer states, etc.) from command line args. + + use_cache: bool + Whether to use key value caching in inference. + overwrite_values: dict + Optional Values to overwrite in the model config. + """ + + from megatron.neox_arguments import NeoXArgs + from megatron.initialize import initialize_megatron + from megatron.training import setup_model_and_optimizer + + _overwrite_values = { + "checkpoint_activations": False, + "partition_activations": False, + "no_load_optim": True, + "optimizer": None, # prevent loading optimizer (no_load_optim alone won't work) + "zero_optimization": None, # disable zero optimization (won't be used in inference, and loading zero optimizer can cause errors) + } + if overwrite_values: + _overwrite_values.update(overwrite_values) + neox_args = NeoXArgs.consume_neox_args( + overwrite_values=_overwrite_values, input_args=input_args + ) + neox_args.configure_distributed_args() + neox_args.build_tokenizer() + + if neox_args.load is None: + raise ValueError("`load` parameter must be supplied to load a model`") + + # initialize wandb + init_wandb(neox_args=neox_args) + + # initialize megatron + initialize_megatron(neox_args) + + # set up model and load checkpoint. + model, _, _, _ = setup_model_and_optimizer( + neox_args=neox_args, + use_cache=use_cache, + iteration=neox_args.iteration, + ) # we use setup_model_and_optimizer instead of get_model in order to initialize deepspeed + print_rank_0("Finished loading model") + + model.module.inference_mode(use_cache=use_cache) + return model, neox_args + + +class CharCounter: + """ + Wraps the data_iterator to count the number of characters in a batch + """ + + def __init__(self, data_iterator, tokenizer): + self.tokenizer = tokenizer + self.data_iterator = data_iterator + self.char_count = 0 + self.batch_count = 0 + self.token_count = 0 + self.total_time = 0 + + def tokens_per_char(self): + return self.token_count / self.char_count + + def __iter__(self): + return self + + def __next__(self): + start = time.time() + batch = self.data_iterator.__next__() + for b in batch["text"]: + self.token_count += len(b) + self.char_count += len(self.tokenizer.detokenize(b.tolist())) + self.batch_count += 1 + end = time.time() + self.total_time += end - start + return batch + + +def _kernel_make_viewless_tensor(inp, requires_grad): + """Make a viewless tensor. + + View tensors have the undesirable side-affect of retaining a reference + to the originally-viewed tensor, even after manually setting the '.data' + field. This method creates a new tensor that links to the old tensor's + data, without linking the viewed tensor, referenced via the '._base' + field. + """ + out = torch.empty( + (1,), + dtype=inp.dtype, + device=inp.device, + requires_grad=requires_grad, + ) + out.data = inp.data + return out + + +class MakeViewlessTensor(torch.autograd.Function): + """ + Autograd function to make a viewless tensor. + + This function should be used in cases where the computation graph needs + to be propagated, but we only want a viewless tensor (e.g., + ParallelTransformer's hidden_states). Call this function by passing + 'keep_graph = True' to 'make_viewless_tensor()'. + """ + + @staticmethod + def forward(ctx, inp, requires_grad): + return _kernel_make_viewless_tensor(inp, requires_grad) + + @staticmethod + def backward(ctx, grad_output): + return grad_output, None + + +def make_viewless_tensor(inp, requires_grad, keep_graph): + """ + Entry-point for creating viewless tensors. + + This method should be used, rather than calling 'MakeViewlessTensor' + or '_kernel_make_viewless_tensor' directly. This method acts as a + switch for determining if an autograd function or a regular method + should be used to create the tensor. + """ + + # return tensor as-is, if not a 'view' + if inp._base is None: + return inp + + # create viewless tensor + if keep_graph: + return MakeViewlessTensor.apply(inp, requires_grad) + else: + return _kernel_make_viewless_tensor(inp, requires_grad) diff --git a/post-training/README.md b/post-training/README.md new file mode 100644 index 0000000000000000000000000000000000000000..930ad0e313e5a4e3ed647498137bb1efbc5e679f --- /dev/null +++ b/post-training/README.md @@ -0,0 +1,57 @@ +# Post-Training + +Examples for running post-training with ultrafeedback data for SFT/DPO/RM training. + +```bash +python tools/ckpts/convert_hf_llama_to_neox.py --tp 4 --model meta-llama/Meta-Llama-3-8B-Instruct --model_path checkpoints/neox_converted/llama3-8b-instruct +``` + +## Data generation +First, grab the jsonl file... + +```bash +python post-training/llama_data.py +``` +## DPO data +```bash +python tools/datasets/preprocess_data_with_chat_template.py --input data/pairwise/llama3_dpo_train_filtered.jsonl --output-prefix data/pairwise/llama3_dpo_train --tokenizer-path checkpoints/neox_converted/llama3-8b-instruct/tokenizer --jsonl-keys rejected --only-last +python tools/datasets/preprocess_data_with_chat_template.py --input data/pairwise/llama3_dpo_test_filtered.jsonl --output-prefix data/pairwise/llama3_dpo_test --tokenizer-path checkpoints/neox_converted/llama3-8b-instruct/tokenizer --jsonl-keys rejected --only-last +python tools/datasets/preprocess_data_with_chat_template.py --input data/pairwise/llama3_dpo_train_filtered.jsonl --output-prefix data/pairwise/llama3_dpo_val --tokenizer-path checkpoints/neox_converted/llama3-8b-instruct/tokenizer --jsonl-keys rejected --only-last +python tools/datasets/preprocess_data_with_chat_template.py --input data/pairwise/llama3_dpo_train_filtered.jsonl --output-prefix data/pairwise/llama3_dpo_train --tokenizer-path checkpoints/neox_converted/llama3-8b-instruct/tokenizer --jsonl-keys chosen --only-last +python tools/datasets/preprocess_data_with_chat_template.py --input data/pairwise/llama3_dpo_test_filtered.jsonl --output-prefix data/pairwise/llama3_dpo_test --tokenizer-path checkpoints/neox_converted/llama3-8b-instruct/tokenizer --jsonl-keys chosen --only-last +python tools/datasets/preprocess_data_with_chat_template.py --input data/pairwise/llama3_dpo_train_filtered.jsonl --output-prefix data/pairwise/llama3_dpo_val --tokenizer-path checkpoints/neox_converted/llama3-8b-instruct/tokenizer --jsonl-keys chosen --only-last +``` + +## RM data +```bash +python tools/datasets/preprocess_data_with_chat_template.py --input data/pairwise/llama3_dpo_train_filtered.jsonl --output-prefix data/pairwise/llama3_rm_train --tokenizer-path checkpoints/neox_converted/llama3-8b-instruct/tokenizer --jsonl-keys rejected --for-rm +python tools/datasets/preprocess_data_with_chat_template.py --input data/pairwise/llama3_dpo_test_filtered.jsonl --output-prefix data/pairwise/llama3_rm_test --tokenizer-path checkpoints/neox_converted/llama3-8b-instruct/tokenizer --jsonl-keys rejected --for-rm +python tools/datasets/preprocess_data_with_chat_template.py --input data/pairwise/llama3_dpo_train_filtered.jsonl --output-prefix data/pairwise/llama3_rm_val --tokenizer-path checkpoints/neox_converted/llama3-8b-instruct/tokenizer --jsonl-keys rejected --for-rm +python tools/datasets/preprocess_data_with_chat_template.py --input data/pairwise/llama3_dpo_train_filtered.jsonl --output-prefix data/pairwise/llama3_rm_train --tokenizer-path checkpoints/neox_converted/llama3-8b-instruct/tokenizer --jsonl-keys chosen --for-rm +python tools/datasets/preprocess_data_with_chat_template.py --input data/pairwise/llama3_dpo_test_filtered.jsonl --output-prefix data/pairwise/llama3_rm_test --tokenizer-path checkpoints/neox_converted/llama3-8b-instruct/tokenizer --jsonl-keys chosen --for-rm +python tools/datasets/preprocess_data_with_chat_template.py --input data/pairwise/llama3_dpo_train_filtered.jsonl --output-prefix data/pairwise/llama3_rm_val --tokenizer-path checkpoints/neox_converted/llama3-8b-instruct/tokenizer --jsonl-keys chosen --for-rm +``` + +## SFT data +```bash +python tools/datasets/preprocess_data_with_chat_template.py --input data/sft/llama3_sft_train_filtered.jsonl --output-prefix data/sft/llama3_train --tokenizer-path checkpoints/neox_converted/llama3-8b-instruct/tokenizer --jsonl-keys messages +python tools/datasets/preprocess_data_with_chat_template.py --input data/sft/llama3_sft_test_filtered.jsonl --output-prefix data/sft/llama3_test --tokenizer-path checkpoints/neox_converted/llama3-8b-instruct/tokenizer --jsonl-keys messages +python tools/datasets/preprocess_data_with_chat_template.py --input data/sft/llama3_sft_train_filtered.jsonl --output-prefix data/sft/llama3_val --tokenizer-path checkpoints/neox_converted/llama3-8b-instruct/tokenizer --jsonl-keys messages +``` + +## KTO data +```bash +python tools/datasets/preprocess_data_with_chat_template.py --input data/kto/llama3_sft_train_filtered.jsonl --output-prefix data/kto/llama3_train --tokenizer-path checkpoints/neox_converted/llama3-8b-instruct/tokenizer --jsonl-keys messages --reward-key reward +python tools/datasets/preprocess_data_with_chat_template.py --input data/kto/llama3_sft_test_filtered.jsonl --output-prefix data/kto/llama3_test --tokenizer-path checkpoints/neox_converted/llama3-8b-instruct/tokenizer --jsonl-keys messages --reward-key reward +python tools/datasets/preprocess_data_with_chat_template.py --input data/kto/llama3_sft_train_filtered.jsonl --output-prefix data/kto/llama3_val --tokenizer-path checkpoints/neox_converted/llama3-8b-instruct/tokenizer --jsonl-keys messages --reward-key reward +``` + + +## Converting back to hf +```bash +# RM +python tools/ckpts/convert_neox_to_hf.py --input_dir eleuther-neox/checkpoints/rm/llama3/llama3-8b-instruct/global_step100 --output_dir checkpoints/rm/llama3_hf --config_file checkpoints/rm/llama3/llama3-8b-instruct/global_step100/configs/llama3-8b-rm.yml --precision bf16 --vocab-is-hf-tokenizer --architecture llama --pad-token-id 128002 + +# SFT/DPO +python tools/ckpts/convert_neox_to_hf.py --input_dir eleuther-neox/checkpoints//llama3/llama3-8b-instruct/global_step100 --output_dir checkpoints//llama3_hf --config_file checkpoints//llama3/llama3-8b-instruct/global_step100/configs/llama3-8b-rm.yml --precision bf16 --vocab-is-hf-tokenizer +``` diff --git a/post-training/configs/benchmarking/llama-13b-dpo.yml b/post-training/configs/benchmarking/llama-13b-dpo.yml new file mode 100644 index 0000000000000000000000000000000000000000..1b97f51b4a44eea66fa1e93e5e659c28c1fe241a --- /dev/null +++ b/post-training/configs/benchmarking/llama-13b-dpo.yml @@ -0,0 +1,127 @@ +{ + "pipe_parallel_size": 0, + "model_parallel_size": 2, + "make_vocab_size_divisible_by": 64, + + # model settings + "num_layers": 40, + "hidden_size": 5120, + "num_attention_heads": 40, + "num_kv_heads": 40, + # following along with zephyr's max length... + "seq_length": 1024, + "max_position_embeddings": 1024, + "pos_emb": "rotary", + "rotary_pct": 1, + "rotary_emb_base": 500000, + "rope_fusion": true, + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + "rmsnorm_fusion": true, + + "attention_config": [[["flash"], 40]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": false, + "use_bias_in_norms": false, + "use_bias_in_attn_linear": false, + "use_bias_in_mlp": false, + "use_flashattn_swiglu": true, + "activation": "swiglu", + "intermediate_size": 13824, + "mlp_multiple_of": 13824, + + + "optimizer": { + "type": "Adam", + "params": { + "lr": 5.0e-7, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + "min_lr": 0.0, + + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 1000000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 1000000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + + "train_impl": "dpo", + "dataset_impl": "pairwise", + "dpo_reference_free": false, + "dpo_fp32": false, + "dpo_beta": 0.01, + "allow_chopped": false, + "pos_train_data_paths": [ "data/pairwise/dpo_train_chosen_document" ], + "pos_train_label_data_paths": [ "data/pairwise/dpo_train_chosen_label_document" ], + "neg_train_data_paths": [ "data/pairwise/dpo_train_rejected_document" ], + "neg_train_label_data_paths": [ "data/pairwise/dpo_train_rejected_label_document" ], + "pos_valid_data_paths": [ "data/pairwise/dpo_val_chosen_document" ], + "pos_valid_label_data_paths": [ "data/pairwise/dpo_val_chosen_label_document" ], + "neg_valid_data_paths": [ "data/pairwise/dpo_val_rejected_document" ], + "neg_valid_label_data_paths": [ "data/pairwise/dpo_val_rejected_label_document" ], + "pos_test_data_paths": [ "data/pairwise/dpo_val_chosen_document" ], + "pos_test_label_data_paths": [ "data/pairwise/dpo_val_chosen_label_document" ], + "neg_test_data_paths": [ "data/pairwise/dpo_val_rejected_document" ], + "neg_test_label_data_paths": [ "data/pairwise/dpo_val_rejected_label_document" ], + + + "train_micro_batch_size_per_gpu": 2, + "gradient_accumulation_steps": 16, + "data_impl": "mmap", + "pack_impl": "unpacked", + "num_workers": 4, + + "checkpoint_activations": false, + "checkpoint_num_layers": 1, + "partition_activations": false, + "synchronize_each_layer": false, + + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + "precision": "bfloat16", + "fp32_allreduce": false, + "bf16": { + "enabled": true + }, + "data_types": { + "grad_accum_dtype": "bf16" + }, + + "train_iters": 477, + "lr_decay_iters": 477, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.1, + "checkpoint_factor": 1000, + "eval_interval": 143000, + "eval_iters": 10, + + "log_interval": 1, + "steps_per_print": 1, + "wall_clock_breakdown": true, + + + "save": "checkpoints/pairwise/llama-13b-dpo", + #"load": "", # once run is started, to restart from intermediate ckpt use "load" = "save" + # use the same mistral tokenizer just for performance testing + "vocab-file": "checkpoints/neox_converted/zephyr-sft/tokenizer/tokenizer.json", + "use_wandb": true, + "finetune": true, # set to false once resuming from intermediate finetuning step + "tokenizer_type": "HFTokenizer", + "wandb_group": "llama-13b", + "wandb_project": "llama-13b-perf-test", +} diff --git a/post-training/configs/benchmarking/mistral-dpo.yml b/post-training/configs/benchmarking/mistral-dpo.yml new file mode 100644 index 0000000000000000000000000000000000000000..3e2f1a5aceb9504632f46ad6bbb3c3ab81597b8f --- /dev/null +++ b/post-training/configs/benchmarking/mistral-dpo.yml @@ -0,0 +1,126 @@ +{ + "pipe_parallel_size": 0, + "model_parallel_size": 4, + "make_vocab_size_divisible_by": 1, + + # model settings + "num_layers": 32, + "hidden_size": 4096, + "num_attention_heads": 32, + "num_kv_heads": 8, + # following along with zephyr's max length... + "seq_length": 1024, + "max_position_embeddings": 1024, + "pos_emb": "rotary", + "rotary_pct": 1, + "rotary_emb_base": 10000, + "rope_fusion": true, + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + "rmsnorm_fusion": true, + + "attention_config": [[["flash"], 32]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": false, + "use_bias_in_norms": false, + "use_bias_in_attn_linear": false, + "use_bias_in_mlp": false, + "use_flashattn_swiglu": true, + "activation": "swiglu", + "intermediate_size": 14336, + "mlp_multiple_of": 14336, + + + "optimizer": { + "type": "Adam", + "params": { + "lr": 5.0e-7, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + "min_lr": 0.0, + + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 1260000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 1260000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + + "train_impl": "dpo", + "dataset_impl": "pairwise", + "dpo_fp32": false, + "dpo_beta": 0.01, + "allow_chopped": false, + "pos_train_data_paths": [ "data/pairwise/dpo_train_chosen_document" ], + "pos_train_label_data_paths": [ "data/pairwise/dpo_train_chosen_label_document" ], + "neg_train_data_paths": [ "data/pairwise/dpo_train_rejected_document" ], + "neg_train_label_data_paths": [ "data/pairwise/dpo_train_rejected_label_document" ], + "pos_valid_data_paths": [ "data/pairwise/dpo_val_chosen_document" ], + "pos_valid_label_data_paths": [ "data/pairwise/dpo_val_chosen_label_document" ], + "neg_valid_data_paths": [ "data/pairwise/dpo_val_rejected_document" ], + "neg_valid_label_data_paths": [ "data/pairwise/dpo_val_rejected_label_document" ], + "pos_test_data_paths": [ "data/pairwise/dpo_val_chosen_document" ], + "pos_test_label_data_paths": [ "data/pairwise/dpo_val_chosen_label_document" ], + "neg_test_data_paths": [ "data/pairwise/dpo_val_rejected_document" ], + "neg_test_label_data_paths": [ "data/pairwise/dpo_val_rejected_label_document" ], + + + "train_micro_batch_size_per_gpu": 8, + "gradient_accumulation_steps": 8, + "data_impl": "mmap", + "pack_impl": "unpacked", + "num_workers": 1, + + "checkpoint_activations": false, + "checkpoint_num_layers": 32, + "partition_activations": true, + "synchronize_each_layer": true, + + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + "precision": "bfloat16", + "fp32_allreduce": false, + "bf16": { + "enabled": true + }, + "data_types": { + "grad_accum_dtype": "bf16" + }, + + "train_iters": 477, + "lr_decay_iters": 477, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.1, + "checkpoint_factor": 1000, + "eval_interval": 143000, + "eval_iters": 10, + + "log_interval": 1, + "steps_per_print": 1, + "wall_clock_breakdown": true, + + + "save": "checkpoints/pairwise/zephyr-beta-recreation", + #"load": "", # once run is started, to restart from intermediate ckpt use "load" = "save" + "load": "checkpoints/neox_converted/zephyr-sft", + "vocab-file": "checkpoints/neox_converted/zephyr-sft/tokenizer/tokenizer.json", + "use_wandb": true, + "finetune": true, # set to false once resuming from intermediate finetuning step + "tokenizer_type": "HFTokenizer", + "wandb_group": "zephyr-beta-dpo", + "wandb_project": "zephyr-beta-dpo", +} diff --git a/post-training/configs/llama3-8b-dpo.yml b/post-training/configs/llama3-8b-dpo.yml new file mode 100644 index 0000000000000000000000000000000000000000..8a75caef066bfb7381e10179164afce636a57cc5 --- /dev/null +++ b/post-training/configs/llama3-8b-dpo.yml @@ -0,0 +1,125 @@ +{ + "pipe_parallel_size": 0, + "model_parallel_size": 4, + "make_vocab_size_divisible_by": 1, + + # model settings + "num_layers": 32, + "hidden_size": 4096, + "num_attention_heads": 32, + "num_kv_heads": 8, + # llama3 supports more than this but this is just for testing. + "seq_length": 1024, + "max_position_embeddings": 1024, + "pos_emb": "rotary", + "rotary_pct": 1, + "rotary_emb_base": 500000, + "rope_fusion": true, + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + + "attention_config": [[["flash"], 32]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": false, + "use_bias_in_norms": false, + "use_bias_in_attn_linear": false, + "use_bias_in_mlp": false, + "use_flashattn_swiglu": true, + "activation": "swiglu", + "intermediate_size": 14336, + "mlp_multiple_of": 14336, + + + + "optimizer": { + "type": "Adam", + "params": { + "lr": 5.0e-7, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + "min_lr": 0.0, + + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 1260000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 1260000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + + "train_impl": "dpo", + "dataset_impl": "pairwise", + "dpo_fp32": true, + "dpo_beta": 0.01, + "allow_chopped": false, + "pos_train_data_paths": [ "data/pairwise/llama3_dpo_train_chosen_document" ], + "pos_train_label_data_paths": [ "data/pairwise/llama3_dpo_train_chosen_label_document" ], + "neg_train_data_paths": [ "data/pairwise/llama3_dpo_train_rejected_document" ], + "neg_train_label_data_paths": [ "data/pairwise/llama3_dpo_train_rejected_label_document" ], + "pos_valid_data_paths": [ "data/pairwise/llama3_dpo_val_chosen_document" ], + "pos_valid_label_data_paths": [ "data/pairwise/llama3_dpo_val_chosen_label_document" ], + "neg_valid_data_paths": [ "data/pairwise/llama3_dpo_val_rejected_document" ], + "neg_valid_label_data_paths": [ "data/pairwise/llama3_dpo_val_rejected_label_document" ], + "pos_test_data_paths": [ "data/pairwise/llama3_dpo_val_chosen_document" ], + "pos_test_label_data_paths": [ "data/pairwise/llama3_dpo_val_chosen_label_document" ], + "neg_test_data_paths": [ "data/pairwise/llama3_dpo_val_rejected_document" ], + "neg_test_label_data_paths": [ "data/pairwise/llama3_dpo_val_rejected_label_document" ], + + "train_micro_batch_size_per_gpu": 32, + "gradient_accumulation_steps": 2, + "data_impl": "mmap", + "pack_impl": "unpacked", + "num_workers": 1, + + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + "precision": "bfloat16", + "fp32_allreduce": true, + "bf16": { + "enabled": true + }, + "data_types": { + "grad_accum_dtype": "fp32" + }, + + "train_iters": 477, + "lr_decay_iters": 477, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.1, + "checkpoint_factor": 1000, + "eval_interval": 100, + "eval_iters": 10, + + "log_interval": 1, + "steps_per_print": 1, + "wall_clock_breakdown": true, + + + "save": "checkpoints/dpo/llama3/llama3-8b-instruct", + #"load": "", # once run is started, to restart from intermediate ckpt use "load" = "save" + "load": "checkpoints/neox_converted/llama3-8b-instruct", + "vocab-file": "checkpoints/neox_converted/llama3-8b-instruct/tokenizer/tokenizer.json", + "use_wandb": true, + "wandb_group": "llama3-8b-instruct", + "wandb_project": "ultrafeedback-dpo", + "finetune": true, # set to false once resuming from intermediate finetuning step + "tokenizer_type": "HFTokenizer", +} diff --git a/post-training/configs/llama3-8b-kto.yml b/post-training/configs/llama3-8b-kto.yml new file mode 100644 index 0000000000000000000000000000000000000000..e819d37cbb7239d61de45c2f45a76ec331c9be7a --- /dev/null +++ b/post-training/configs/llama3-8b-kto.yml @@ -0,0 +1,120 @@ +{ + "pipe_parallel_size": 0, + "model_parallel_size": 4, + "make_vocab_size_divisible_by": 1, + + # model settings + "num_layers": 32, + "hidden_size": 4096, + "num_attention_heads": 32, + "num_kv_heads": 8, + # llama3 supports more than this but this is just for testing. + "seq_length": 1024, + "max_position_embeddings": 1024, + "pos_emb": "rotary", + "rotary_pct": 1, + "rotary_emb_base": 500000, + "rope_fusion": true, + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + + "attention_config": [[["flash"], 32]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": false, + "use_bias_in_norms": false, + "use_bias_in_attn_linear": false, + "use_bias_in_mlp": false, + "use_flashattn_swiglu": true, + "activation": "swiglu", + "intermediate_size": 14336, + "mlp_multiple_of": 14336, + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.00001, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + "min_lr": 0.000001, + + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 1260000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 1260000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + + + "train_impl": "kto", + "kto_fp32": true, + "kto_beta": 0.1, + "allow_chopped": false, + "train_label_data_paths": [ "data/kto/llama3_train_messages_label_document" ], + "test_label_data_paths": [ "data/kto/llama3_test_messages_label_document" ], + "valid_label_data_paths": [ "data/kto/llama3_train_messages_label_document" ], + "train_data_paths": [ "data/kto/llama3_train_messages_document" ], + "test_data_paths": [ "data/kto/llama3_test_messages_document" ], + "valid_data_paths": [ "data/kto/llama3_train_messages_document" ], + "train_reward_data_paths": [ "data/kto/llama3_train_messages_reward_document" ], + "test_reward_data_paths": [ "data/kto/llama3_test_messages_reward_document" ], + "valid_reward_data_paths": [ "data/kto/llama3_train_messages_reward_document" ], + + "train_micro_batch_size_per_gpu": 32, + "gradient_accumulation_steps": 2, + "data_impl": "mmap", + "pack_impl": "unpacked", + "num_workers": 1, + + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + "precision": "bfloat16", + "fp32_allreduce": true, + "bf16": { + "enabled": true + }, + "data_types": { + "grad_accum_dtype": "fp32" + }, + + "train_iters": 477, + "lr_decay_iters": 477, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.1, + "checkpoint_factor": 1000, + "eval_interval": 100, + "eval_iters": 10, + + "log_interval": 1, + "steps_per_print": 1, + "wall_clock_breakdown": true, + + + "save": "checkpoints/kto/llama3/llama3-8b-instruct", + #"load": "", # once run is started, to restart from intermediate ckpt use "load" = "save" + "load": "checkpoints/neox_converted/llama3-8b-instruct", + "vocab-file": "checkpoints/neox_converted/llama3-8b-instruct/tokenizer/tokenizer.json", + "use_wandb": true, + "wandb_group": "llama3-8b-instruct", + "wandb_project": "ultrafeedback-kto", + "finetune": true, # set to false once resuming from intermediate finetuning step + "tokenizer_type": "HFTokenizer", +} diff --git a/post-training/configs/llama3-8b-rm.yml b/post-training/configs/llama3-8b-rm.yml new file mode 100644 index 0000000000000000000000000000000000000000..43117bf95e0f618f3bdd6b279c0f4c0bed0ae719 --- /dev/null +++ b/post-training/configs/llama3-8b-rm.yml @@ -0,0 +1,121 @@ +{ + "pipe_parallel_size": 0, + "model_parallel_size": 4, + "make_vocab_size_divisible_by": 1, + + # model settings + "num_layers": 32, + "hidden_size": 4096, + "num_attention_heads": 32, + "num_kv_heads": 8, + # llama3 supports more than this but this is just for testing. + "seq_length": 1024, + "max_position_embeddings": 1024, + "pos_emb": "rotary", + "rotary_pct": 1, + "rotary_emb_base": 500000, + "rope_fusion": true, + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + + "attention_config": [[["flash"], 32]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": false, + "use_bias_in_norms": false, + "use_bias_in_attn_linear": false, + "use_bias_in_mlp": false, + "use_flashattn_swiglu": true, + "activation": "swiglu", + "intermediate_size": 14336, + "mlp_multiple_of": 14336, + + "optimizer": { + "type": "Adam", + "params": { + "lr": 5.0e-7, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + "min_lr": 0.0, + + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 1260000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 1260000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + + "train_impl": "rm", + "dataset_impl": "pairwise", + "allow_chopped": false, + "pos_train_data_paths": [ "data/pairwise/llama3_rm_train_chosen_document" ], + "pos_train_label_data_paths": [ "data/pairwise/llama3_rm_train_chosen_label_document" ], + "neg_train_data_paths": [ "data/pairwise/llama3_rm_train_rejected_document" ], + "neg_train_label_data_paths": [ "data/pairwise/llama3_rm_train_rejected_label_document" ], + "pos_valid_data_paths": [ "data/pairwise/llama3_rm_val_chosen_document" ], + "pos_valid_label_data_paths": [ "data/pairwise/llama3_rm_val_chosen_label_document" ], + "neg_valid_data_paths": [ "data/pairwise/llama3_rm_val_rejected_document" ], + "neg_valid_label_data_paths": [ "data/pairwise/llama3_rm_val_rejected_label_document" ], + "pos_test_data_paths": [ "data/pairwise/llama3_rm_val_chosen_document" ], + "pos_test_label_data_paths": [ "data/pairwise/llama3_rm_val_chosen_label_document" ], + "neg_test_data_paths": [ "data/pairwise/llama3_rm_val_rejected_document" ], + "neg_test_label_data_paths": [ "data/pairwise/llama3_rm_val_rejected_label_document" ], + + "train_micro_batch_size_per_gpu": 32, + "gradient_accumulation_steps": 2, + "data_impl": "mmap", + "pack_impl": "unpacked", + "num_workers": 1, + + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + "precision": "bfloat16", + "fp32_allreduce": true, + "bf16": { + "enabled": true + }, + "data_types": { + "grad_accum_dtype": "fp32" + }, + + "train_iters": 477, + "lr_decay_iters": 477, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.1, + "checkpoint_factor": 1000, + "eval_interval": 100, + "eval_iters": 10, + + "log_interval": 1, + "steps_per_print": 1, + "wall_clock_breakdown": true, + + + "save": "checkpoints/rm/llama3/llama3-8b-instruct", + #"load": "", # once run is started, to restart from intermediate ckpt use "load" = "save" + "load": "checkpoints/neox_converted/llama3-8b-instruct", + "vocab-file": "checkpoints/neox_converted/llama3-8b-instruct/tokenizer/tokenizer.json", + "use_wandb": true, + "wandb_group": "llama3-8b-instruct", + "wandb_project": "ultrafeedback-rm", + "finetune": true, # set to false once resuming from intermediate finetuning step + "tokenizer_type": "HFTokenizer", +} diff --git a/post-training/configs/llama3-8b-sft.yml b/post-training/configs/llama3-8b-sft.yml new file mode 100644 index 0000000000000000000000000000000000000000..bfcea1142caebc9e0b97b38e30b352b9c66d9de8 --- /dev/null +++ b/post-training/configs/llama3-8b-sft.yml @@ -0,0 +1,112 @@ +{ + "pipe_parallel_size": 0, + "model_parallel_size": 4, + "make_vocab_size_divisible_by": 1, + + # model settings + "num_layers": 32, + "hidden_size": 4096, + "num_attention_heads": 32, + "num_kv_heads": 8, + # llama3 supports more than this but this is just for testing. + "seq_length": 1024, + "max_position_embeddings": 1024, + "pos_emb": "rotary", + "rotary_pct": 1, + "rotary_emb_base": 500000, + "rope_fusion": true, + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + "norm": "rmsnorm", + "rms_norm_epsilon": 1.0e-5, + + "attention_config": [[["flash"], 32]], + + "scaled_upper_triang_masked_softmax_fusion": true, + "bias_gelu_fusion": false, + "use_bias_in_norms": false, + "use_bias_in_attn_linear": false, + "use_bias_in_mlp": false, + "use_flashattn_swiglu": true, + "activation": "swiglu", + "intermediate_size": 14336, + "mlp_multiple_of": 14336, + + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.00001, + "betas": [0.9, 0.95], + "eps": 1.0e-8 + } + }, + "min_lr": 0.000001, + + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 1260000000, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 1260000000, + "contiguous_gradients": true, + "cpu_offload": false + }, + + "train_label_data_paths": [ "data/sft/llama3_train_messages_label_document" ], + "test_label_data_paths": [ "data/sft/llama3_test_messages_label_document" ], + "valid_label_data_paths": [ "data/sft/llama3_train_messages_label_document" ], + "train_data_paths": [ "data/sft/llama3_train_messages_document" ], + "test_data_paths": [ "data/sft/llama3_test_messages_document" ], + "valid_data_paths": [ "data/sft/llama3_train_messages_document" ], + + "train_micro_batch_size_per_gpu": 32, + "gradient_accumulation_steps": 2, + "data_impl": "mmap", + "pack_impl": "unpacked", + "num_workers": 1, + + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + "precision": "bfloat16", + "fp32_allreduce": true, + "bf16": { + "enabled": true + }, + "data_types": { + "grad_accum_dtype": "fp32" + }, + + "train_iters": 477, + "lr_decay_iters": 477, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.1, + "checkpoint_factor": 1000, + "eval_interval": 100, + "eval_iters": 10, + + "log_interval": 1, + "steps_per_print": 1, + "wall_clock_breakdown": true, + + + "save": "checkpoints/sft/llama3/llama3-8b-instruct", + #"load": "", # once run is started, to restart from intermediate ckpt use "load" = "save" + "load": "checkpoints/neox_converted/llama3-8b-instruct", + "vocab-file": "checkpoints/neox_converted/llama3-8b-instruct/tokenizer/tokenizer.json", + "use_wandb": true, + "wandb_group": "llama3-8b-instruct", + "wandb_project": "ultrafeedback-sft", + "finetune": true, # set to false once resuming from intermediate finetuning step + "tokenizer_type": "HFTokenizer", +} diff --git a/post-training/dpo_data.py b/post-training/dpo_data.py new file mode 100644 index 0000000000000000000000000000000000000000..d24eb43e50f441f0ff9c4d8033ed8cbe24884e49 --- /dev/null +++ b/post-training/dpo_data.py @@ -0,0 +1,103 @@ +""" +https://github.com/huggingface/alignment-handbook/blob/main/scripts/run_dpo.py +adapted to just grab the dataset +""" +import os +from alignment import ( + DataArguments, + DPOConfig, + H4ArgumentParser, + ModelArguments, + apply_chat_template, + decontaminate_humaneval, + get_checkpoint, + get_datasets, + get_kbit_device_map, + get_peft_config, + get_quantization_config, + get_tokenizer, + is_adapter_model, +) +from datasets import load_dataset, DatasetDict +from transformers import AutoTokenizer + +import jsonlines + +############### +# Load datasets +############### +raw_datasets = load_dataset("HuggingFaceH4/ultrafeedback_binarized") +raw_datasets = DatasetDict( + { + "train": raw_datasets["train_prefs"], + "test": raw_datasets["test_prefs"], + } +) +column_names = list(raw_datasets["train"].features) + +##################################### +# Load tokenizer and process datasets +##################################### +truncation_side = ( + "left" # Truncate from left to ensure we don't lose labels in final turn +) +tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") + +##################### +# Apply chat template +##################### +raw_datasets = raw_datasets.map( + apply_chat_template, + fn_kwargs={ + "tokenizer": tokenizer, + "task": "dpo", + "auto_insert_empty_system_msg": True, + }, + desc="Formatting comparisons with prompt template", +) + +########################## +# Decontaminate benchmarks +########################## +num_raw_train_samples = len(raw_datasets["train"]) +raw_datasets = raw_datasets.filter( + decontaminate_humaneval, + fn_kwargs={"text_column": "text_chosen"}, + batched=True, + batch_size=10_000, + num_proc=1, + desc="Decontaminating HumanEval samples", +) +num_filtered_train_samples = num_raw_train_samples - len(raw_datasets["train"]) +print( + f"Decontaminated {num_filtered_train_samples} ({num_filtered_train_samples / num_raw_train_samples * 100:.2f}%) samples from the training set." +) +############### +# Length filter +############### +# Since the alignment handbook recipes call for a max token limit of 1024... +num_filtered_train_samples = len(raw_datasets["train"]) + + +def length_filter(example): + return (len(tokenizer.apply_chat_template(example["chosen"])) < 1024) and ( + len(tokenizer.apply_chat_template(example["rejected"])) < 1024 + ) + + +num_length_filtered_train_samples = num_filtered_train_samples - len( + raw_datasets["train"] +) +print( + f"Length Filtered {num_length_filtered_train_samples} ({num_length_filtered_train_samples / num_filtered_train_samples * 100:.2f}%) samples from the training set." +) +# get directory of the python script +dir_path = os.path.dirname(os.path.realpath(__file__)) +for split in ["train", "test"]: + with open(os.path.join(dir_path, f"dpo_{split}_filtered.jsonl"), "w") as f: + writer = jsonlines.Writer(f) + for item in raw_datasets[split]: + # add empty system messages + item["chosen"] = [{"role": "system", "content": ""}] + item["chosen"] + item["rejected"] = [{"role": "system", "content": ""}] + item["rejected"] + writer.write(item) diff --git a/post-training/llama_data.py b/post-training/llama_data.py new file mode 100644 index 0000000000000000000000000000000000000000..eab6ac9f1687fb64f1807e821cfc383656a4d216 --- /dev/null +++ b/post-training/llama_data.py @@ -0,0 +1,49 @@ +import os + +from datasets import load_dataset, DatasetDict + +import jsonlines + +############### +# Load datasets +############### +raw_datasets = load_dataset("HuggingFaceH4/ultrafeedback_binarized") +# convert to just train and test, not necessary but it looks better +raw_datasets = DatasetDict( + { + "train": raw_datasets["train_prefs"], + "test": raw_datasets["test_prefs"], + } +) +os.makedirs(os.path.join("data", "pairwise"), exist_ok=True) +for split in ["train", "test"]: + with open( + os.path.join("data", "pairwise", f"llama3_dpo_{split}_filtered.jsonl"), "w" + ) as f: + writer = jsonlines.Writer(f) + for item in raw_datasets[split]: + item["chosen"] = item["chosen"] + item["rejected"] = item["rejected"] + writer.write(item) +os.makedirs(os.path.join("data", "sft"), exist_ok=True) +for split in ["train", "test"]: + with open( + os.path.join("data", "sft", f"llama3_sft_{split}_filtered.jsonl"), "w" + ) as f: + writer = jsonlines.Writer(f) + for item in raw_datasets[split]: + item["messages"] = item["chosen"] + writer.write(item) +os.makedirs(os.path.join("data", "kto"), exist_ok=True) +for split in ["train", "test"]: + with open( + os.path.join("data", "kto", f"llama3_kto_{split}_filtered.jsonl"), "w" + ) as f: + writer = jsonlines.Writer(f) + for item in raw_datasets[split]: + item["messages"] = item["chosen"] + item["reward"] = 1 + writer.write(item) + item["messages"] = item["rejected"] + item["reward"] = -1 + writer.write(item) diff --git a/post-training/recreating_zephyr_dpo.md b/post-training/recreating_zephyr_dpo.md new file mode 100644 index 0000000000000000000000000000000000000000..d97eb37914c47e9a864bb9828a056177a1d90627 --- /dev/null +++ b/post-training/recreating_zephyr_dpo.md @@ -0,0 +1,39 @@ +# Initial setup + +```bash +python tools/ckpts/convert_hf_llama_to_neox.py --tp 2 --model HuggingFaceH4/mistral-7b-sft-beta --model_path checkpoints/neox_converted/zephyr-sft_tp2 +``` + + +# To generate data +First make a new environment... We want to keep the same data between runs so the easiest way is to create a new conda +environment and follow the steps below. +``` +conda create -n handbook python=3.10 && conda activate handbook +git clone https://github.com/huggingface/alignment-handbook.git +cd ./alignment-handbook/ +python -m pip install . +python -m pip install jsonlines +``` + +## DPO data +```bash +# from the gpt-neox repo +conda activate handbook +python post-training/dpo_data.py +conda deactivate +# activate your neox conda environment, or whatever you need to switch to the neox environment +mkdir data +mkdir data/pairwise +python tools/datasets/preprocess_data_with_chat_template.py --input post-training/dpo_train_filtered.jsonl --output-prefix data/pairwise/dpo_train --tokenizer-path checkpoints/neox_converted/zephyr-sft/tokenizer --jsonl-keys rejected --only-last +python tools/datasets/preprocess_data_with_chat_template.py --input post-training/dpo_test_filtered.jsonl --output-prefix data/pairwise/dpo_test --tokenizer-path checkpoints/neox_converted/zephyr-sft/tokenizer --jsonl-keys rejected --only-last +python tools/datasets/preprocess_data_with_chat_template.py --input post-training/dpo_train_filtered.jsonl --output-prefix data/pairwise/dpo_val --tokenizer-path checkpoints/neox_converted/zephyr-sft/tokenizer --jsonl-keys rejected --only-last +python tools/datasets/preprocess_data_with_chat_template.py --input post-training/dpo_train_filtered.jsonl --output-prefix data/pairwise/dpo_train --tokenizer-path checkpoints/neox_converted/zephyr-sft/tokenizer --jsonl-keys chosen --only-last +python tools/datasets/preprocess_data_with_chat_template.py --input post-training/dpo_test_filtered.jsonl --output-prefix data/pairwise/dpo_test --tokenizer-path checkpoints/neox_converted/zephyr-sft/tokenizer --jsonl-keys chosen --only-last +python tools/datasets/preprocess_data_with_chat_template.py --input post-training/dpo_train_filtered.jsonl --output-prefix data/pairwise/dpo_val --tokenizer-path checkpoints/neox_converted/zephyr-sft/tokenizer --jsonl-keys chosen --only-last +``` + +## Running +```bash +python deepy.py train.py post-training/configs/benchmarking/mistral-dpo.yml +``` diff --git a/prepare_data.py b/prepare_data.py new file mode 100644 index 0000000000000000000000000000000000000000..62363e27b156e6fdaa37a288467bf246afd802f2 --- /dev/null +++ b/prepare_data.py @@ -0,0 +1,77 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tools.datasets.corpora import prepare_dataset, DATA_DOWNLOADERS +import argparse + +TOKENIZER_CHOICES = [ + "HFGPT2Tokenizer", + "HFTokenizer", + "GPT2BPETokenizer", + "CharLevelTokenizer", + "TiktokenTokenizer", + "SPMTokenizer", +] +DATASET_CHOICES = [i for i in DATA_DOWNLOADERS.keys() if i != "pass"] + + +def get_args(): + parser = argparse.ArgumentParser(description="Download & preprocess neox datasets") + parser.add_argument( + "dataset", + nargs="?", + default="enwik8", + help="name of dataset to download.", + choices=DATASET_CHOICES, + ) + parser.add_argument( + "-t", + "--tokenizer", + default="GPT2BPETokenizer", + choices=TOKENIZER_CHOICES, + help=f'Type of tokenizer to use - choose from {", ".join(TOKENIZER_CHOICES)}', + ) + parser.add_argument( + "-d", + "--data-dir", + default=None, + help=f"Directory to which to download datasets / tokenizer " + f"files - defaults to ./data", + ) + parser.add_argument( + "-v", "--vocab-file", default=None, help=f"Tokenizer vocab file (if required)" + ) + parser.add_argument( + "-m", "--merge-file", default=None, help=f"Tokenizer merge file (if required)" + ) + parser.add_argument( + "-f", + "--force-redownload", + dest="force_redownload", + default=False, + action="store_true", + ) + return parser.parse_args() + + +if __name__ == "__main__": + args = get_args() + prepare_dataset( + dataset_name=args.dataset, + tokenizer_type=args.tokenizer, + data_dir=args.data_dir, + vocab_file=args.vocab_file, + merge_file=args.merge_file, + force_redownload=args.force_redownload, + ) diff --git a/requirements/requirements-apex-pip.txt b/requirements/requirements-apex-pip.txt new file mode 100644 index 0000000000000000000000000000000000000000..981f54a306a92245c8e49b2a0596676208d42eca --- /dev/null +++ b/requirements/requirements-apex-pip.txt @@ -0,0 +1 @@ +pip==23.3.2 diff --git a/requirements/requirements-comet.txt b/requirements/requirements-comet.txt new file mode 100644 index 0000000000000000000000000000000000000000..904301eaaf4eeed4ef796d6a6eb433dc99c6a1f4 --- /dev/null +++ b/requirements/requirements-comet.txt @@ -0,0 +1 @@ +comet_ml>=3.45.0 diff --git a/requirements/requirements-dev.txt b/requirements/requirements-dev.txt new file mode 100644 index 0000000000000000000000000000000000000000..60ff3224ff2fef1e4b8a6a7f8348f50bc5521690 --- /dev/null +++ b/requirements/requirements-dev.txt @@ -0,0 +1,8 @@ +autopep8>=1.5.6 +clang-format>=13.0.1 +pre-commit>=2.17.0 +pytest>=6.2.3 +pytest-cov>=2.11.1 +pytest-forked>=1.3.0 +pytest-html==4.1.1 +pytest-xdist diff --git a/requirements/requirements-flashattention.txt b/requirements/requirements-flashattention.txt new file mode 100644 index 0000000000000000000000000000000000000000..a004e807578d35f1cae8df3ef0d3ab48b26cdee8 --- /dev/null +++ b/requirements/requirements-flashattention.txt @@ -0,0 +1 @@ +flash-attn==2.5.6 diff --git a/requirements/requirements-mamba.txt b/requirements/requirements-mamba.txt new file mode 100644 index 0000000000000000000000000000000000000000..09dfa1fb9366ef35c491ee4608f172c4a701b0d3 --- /dev/null +++ b/requirements/requirements-mamba.txt @@ -0,0 +1,3 @@ +causal_conv1d>=1.1.0 +einops +mamba_ssm>=1.2.0.post1 # required for untied embedding + unembedding layers diff --git a/requirements/requirements-onebitadam.txt b/requirements/requirements-onebitadam.txt new file mode 100644 index 0000000000000000000000000000000000000000..349e3b39a92d3d39bbe4f3ae6ecab01f93f06e73 --- /dev/null +++ b/requirements/requirements-onebitadam.txt @@ -0,0 +1 @@ +cupy-cuda111>=8.6.0 diff --git a/requirements/requirements-s3.txt b/requirements/requirements-s3.txt new file mode 100644 index 0000000000000000000000000000000000000000..fbf031fe478588ec34bb7b6e2a0d63b0ae71c9b9 --- /dev/null +++ b/requirements/requirements-s3.txt @@ -0,0 +1,2 @@ +boto3 +hf-transfer>=0.1.3 diff --git a/requirements/requirements-sparseattention.txt b/requirements/requirements-sparseattention.txt new file mode 100644 index 0000000000000000000000000000000000000000..3b382f83f2aef216abb32c9e4048830c998b43e2 --- /dev/null +++ b/requirements/requirements-sparseattention.txt @@ -0,0 +1 @@ +triton==2.1.0 diff --git a/requirements/requirements-tensorboard.txt b/requirements/requirements-tensorboard.txt new file mode 100644 index 0000000000000000000000000000000000000000..6d5967364055a0604235fd5d0195083f4b45664f --- /dev/null +++ b/requirements/requirements-tensorboard.txt @@ -0,0 +1 @@ +tensorboard==2.13.0 diff --git a/requirements/requirements-transformerengine.txt b/requirements/requirements-transformerengine.txt new file mode 100644 index 0000000000000000000000000000000000000000..2050d7566eea540f954e4ddf2628bb8f8cc46a65 --- /dev/null +++ b/requirements/requirements-transformerengine.txt @@ -0,0 +1 @@ +pip install git+https://github.com/NVIDIA/TransformerEngine.git@stable diff --git a/requirements/requirements-wandb.txt b/requirements/requirements-wandb.txt new file mode 100644 index 0000000000000000000000000000000000000000..1df18b0518347176f207ddd5dff98f592feee20c --- /dev/null +++ b/requirements/requirements-wandb.txt @@ -0,0 +1 @@ +wandb>=0.10.28 diff --git a/requirements/requirements.txt b/requirements/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..b5a84674bb0458509178d6685c86c5a853473ae0 --- /dev/null +++ b/requirements/requirements.txt @@ -0,0 +1,15 @@ +deepspeed@git+https://github.com/EleutherAI/DeeperSpeed.git@02e2ebf7dee6aaab3d89094ed470a4609763c742#egg=deepspeed +ftfy>=6.0.1 +huggingface_hub>=0.11.0 +jinja2==3.1.4 +lm_dataformat@git+https://github.com/EleutherAI/lm_dataformat.git@4eec05349977071bf67fc072290b95e31c8dd836 +lm_eval>=0.4.0,<=0.4.1 +mpi4py>=3.0.3 +numpy<2.0 +pybind11>=2.6.2 +regex +sentencepiece +six +tiktoken>=0.1.2 +tokenizers>=0.12.1 +transformers==4.38.0 diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000000000000000000000000000000000000..32618d7574fb39fe0d7e8e9c00f483325c8b43c3 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,129 @@ +# Dependencies + +Tests use pytests with coverage and forked plugins. Install with: + +```bash +pip install -r requirements/requirements.txt +pip install -r requirements/requirements-dev.txt +``` + +Download the required test data +```bash +python prepare_data.py +``` + +# Run + +Tests can be run using pytest. + +* The argument --forked needs to be provided +* A coverage report can be created using the optional arguments --cov-report and --cov (see pytest documentation) +* A subset of tests can be selected by pointing to the module within tests + +```bash +# run all tests, output coverage report of megatron module in terminal +pytest --forked --cov-report term --cov=megatron tests + +# run tests in tests/model, output coverage report of megatron module as html +pytest --forked --cov-report html --cov=megatron tests/model + +# run tests in tests/model/test_model_generation.py, don't output coverage report +pytest --forked tests/model/test_model_generation.py +``` + +Some tests can run on cpu only. These are marked with the decorator @pytest.mark.cpu. +The test cases for cpu can be run with: +``` +pytest tests -m cpu +``` + +If a html coverage report has been created a simple http server can be run to serve static files. + +```bash +python -m http.server --directory htmlcov 8000 +``` + + +## Tips and Tricks +if You see this kind of error: +``` +RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the 'spawn' start method +``` +It usually means that you used some pytorch.cuda function before the test creates the processes. However just importing `from torch.utils import cpp_extension` can also trigger this. + + +## CPU Test Integration + +Tests can be run against physical CPUs through GitHub Actions. To have tests run on the physical CPU test, here is generally how the CI should be written: + +### runs-on + +#### NOTE: These BKMs were written to work with CI infrastructure that is no longer in place. To use the Github runners (ubuntu-22.04 / ubuntu-latest), skip the 'runs-on' section. + +The CI needs to be written to target the CPU Github Action runner. The jobs that need to run on CPU should use the hardware runner's labels: +```yaml +jobs: + cpu-test-job: + runs-on: [ 'self-hosted', 'aws', 'test'] # these labels tell GitHub to execute on the runner with the 'aws' and 'test' labels +``` + +### Software dependencies + +Hardware tests that need python and docker should install them as part of the test execution to make sure the tests run as expected: +```yaml +steps: + # sample syntax to setup python with pip + - uses: actions/setup-python@v4 + with: + python-version: "3.8" + cache: "pip" + + # sample setup of docker (there's no official Docker setup action) + - name: Docker setup + run: | # taken from Docker's installation page: https://docs.docker.com/engine/install/ubuntu/ + # Add Docker's official GPG key: + sudo apt-get update + sudo apt-get install ca-certificates curl + sudo install -m 0755 -d /etc/apt/keyrings + sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc + sudo chmod a+r /etc/apt/keyrings/docker.asc + # Add the repository to Apt sources: + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + sudo apt-get update + sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y +``` + +Any other software dependencies should be assumed to be missing and installed as part of the CI. + +### Using Docker image + +Using the Docker image and running tests in a container is recommended to resolve environment issues. There is a modified docker-compose.yml in tests/cpu_tests directory that is recommended to be used for CPU tests: + +```bash +cp tests/cpu_tests/docker-compose.yml . +# export any env variables here that should be used: +export NEOX_DATA_PATH='./data/enwik8' +docker compose run -d --build --name $CONTAINER gpt-neox tail -f /dev/null +# then can set up and run tests in the container using docker exec +docker exec $CONTAINER pip install -r /workspace/requirements-dev.txt +# etc. +# please clean up the container as part of the CI: +docker rm $CONTAINER +``` + +At the time of writing there is no built-in method to provide an offline-built Docker image to `jobs..container`. + +### Using existing CPU test CI + +There is an existing CPU test workflow that can be included in existing CI: + +```yaml +steps: + - name: Run CPU Tests + uses: + target_test_ref: $GITHUB_REF # replace with the ref/SHA that the tests should be run on + # have a look at the reusable workflow here: https://github.com/EleutherAI/gpt-neox/blob/main/tests/cpu_tests/action.yml +``` diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/common.py b/tests/common.py new file mode 100644 index 0000000000000000000000000000000000000000..c63ced0f7b1d0f6610986edefafc8f5a6c47a91a --- /dev/null +++ b/tests/common.py @@ -0,0 +1,630 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import time +import shutil +import itertools +from pathlib import Path +from abc import ABC, abstractmethod +from deepspeed.accelerator import get_accelerator + +import pytest +from _pytest.outcomes import Skipped +from _pytest.fixtures import FixtureLookupError, FixtureFunctionMarker +import random +import train + +import torch + +import torch.distributed as dist +from torch.multiprocessing import Process +import torch.multiprocessing as mp +from yaml import load + +try: + from yaml import CLoader as Loader, CDumper as Dumper +except ImportError: + from yaml import Loader, Dumper +from copy import deepcopy +import deepspeed + +TEST_CHECKPOINT_DIR = "test_checkpoint" +TEST_LOG_DIR = "test_logs" +TEST_TENSORBOARD_DIR = "test_tensorboard" + +# Worker timeout *after* the first worker has completed. +DEEPSPEED_UNIT_WORKER_TIMEOUT = 120 +DEEPSPEED_TEST_TIMEOUT = 600 + + +def get_xdist_worker_id(): + xdist_worker = os.environ.get("PYTEST_XDIST_WORKER", None) + if xdist_worker is not None: + xdist_worker_id = xdist_worker.replace("gw", "") + return int(xdist_worker_id) + return None + + +def get_master_port(): + master_port = os.environ.get("DS_TEST_PORT", "29503") + xdist_worker_id = get_xdist_worker_id() + if xdist_worker_id is not None: + master_port = str(int(master_port) + xdist_worker_id) + return master_port + + +_num_gpus = None + + +def set_accelerator_visible(): + cuda_visible = os.environ.get("CUDA_VISIBLE_DEVICES", None) + xdist_worker_id = get_xdist_worker_id() + if xdist_worker_id is None: + xdist_worker_id = 0 + if cuda_visible is None: + # CUDA_VISIBLE_DEVICES is not set, discover it using accelerator specific command instead + if get_accelerator().device_name() == "cuda": + if is_rocm_pytorch(): + rocm_smi = subprocess.check_output(["rocm-smi", "--showid"]) + gpu_ids = filter( + lambda s: "GPU" in s, rocm_smi.decode("utf-8").strip().split("\n") + ) + num_accelerators = len(list(gpu_ids)) + else: + nvidia_smi = subprocess.check_output(["nvidia-smi", "--list-gpus"]) + num_accelerators = len(nvidia_smi.decode("utf-8").strip().split("\n")) + elif get_accelerator().device_name() == "xpu": + clinfo = subprocess.check_output(["clinfo"]) + lines = clinfo.decode("utf-8").strip().split("\n") + num_accelerators = 0 + for line in lines: + match = re.search("Device Type.*GPU", line) + if match: + num_accelerators += 1 + elif get_accelerator().device_name() == "npu": + npu_smi = subprocess.check_output(["npu-smi", "info", "-l"]) + num_accelerators = int( + npu_smi.decode("utf-8").strip().split("\n")[0].split(":")[1].strip() + ) + else: + assert get_accelerator().device_name() == "cpu" + cpu_sockets = int( + subprocess.check_output( + 'cat /proc/cpuinfo | grep "physical id" | sort -u | wc -l', + shell=True, + ) + ) + num_accelerators = cpu_sockets + + cuda_visible = ",".join(map(str, range(num_accelerators))) + + # rotate list based on xdist worker id, example below + # wid=0 -> ['0', '1', '2', '3'] + # wid=1 -> ['1', '2', '3', '0'] + # wid=2 -> ['2', '3', '0', '1'] + # wid=3 -> ['3', '0', '1', '2'] + dev_id_list = cuda_visible.split(",") + dev_id_list = dev_id_list[xdist_worker_id:] + dev_id_list[:xdist_worker_id] + os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(dev_id_list) + + +def count_gpus(): + global _num_gpus + if _num_gpus is None: + import subprocess + + nvidia_smi = subprocess.check_output(["nvidia-smi", "--list-gpus"]) + _num_gpus = len(nvidia_smi.decode("utf-8").strip().split("\n")) + return _num_gpus + + +def set_cuda_visibile(): + cuda_visible = os.environ.get("CUDA_VISIBLE_DEVICES", None) + xdist_worker_id = get_xdist_worker_id() + if xdist_worker_id is None: + xdist_worker_id = 0 + if cuda_visible is None: + # CUDA_VISIBLE_DEVICES is not set, discover it from nvidia-smi instead + import subprocess + + nvidia_smi = subprocess.check_output(["nvidia-smi", "--list-gpus"]) + num_gpus = len(nvidia_smi.decode("utf-8").strip().split("\n")) + cuda_visible = ",".join(map(str, range(num_gpus))) + + # rotate list based on xdist worker id, example below + # wid=0 -> ['0', '1', '2', '3'] + # wid=1 -> ['1', '2', '3', '0'] + # wid=2 -> ['2', '3', '0', '1'] + # wid=3 -> ['3', '0', '1', '2'] + dev_id_list = cuda_visible.split(",") + dev_id_list = dev_id_list[xdist_worker_id:] + dev_id_list[:xdist_worker_id] + os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(dev_id_list) + + +def get_root_directory(): + return Path(__file__).parents[1] + + +def get_config_directory(): + return get_root_directory() / "configs" + + +def get_configs_with_path(configs): + return [str(get_config_directory() / cfg) for cfg in configs] + + +def clear_test_dirs(): + log_dir = os.path.join(get_root_directory(), TEST_LOG_DIR) + if os.path.isdir(log_dir): + shutil.rmtree(log_dir) + + checkpoint_dir = os.path.join(get_root_directory(), TEST_CHECKPOINT_DIR) + if os.path.isdir(checkpoint_dir): + shutil.rmtree(checkpoint_dir) + + tensorboard_dir = os.path.join(get_root_directory(), TEST_TENSORBOARD_DIR) + if os.path.isdir(tensorboard_dir): + shutil.rmtree(tensorboard_dir) + + +class DistributedExec(ABC): + """ + Base class for distributed execution of functions/methods. Contains common + methods needed for DistributedTest and DistributedFixture. + """ + + world_size = 2 + backend = get_accelerator().communication_backend_name() + init_distributed = True + set_dist_env = True + requires_cuda_env = True + reuse_dist_env = False + _pool_cache = {} + exec_timeout = DEEPSPEED_TEST_TIMEOUT + + @abstractmethod + def run(self): + ... + + def __call__(self, request=None): + self._fixture_kwargs = self._get_fixture_kwargs(request, self.run) + world_size = self.world_size + if self.requires_cuda_env and not get_accelerator().is_available(): + pytest.skip("only supported in accelerator environments.") + + if isinstance(world_size, int): + world_size = [world_size] + for procs in world_size: + self._launch_procs(procs) + + def _get_fixture_kwargs(self, request, func): + if not request: + return {} + # Grab fixture / parametrize kwargs from pytest request object + fixture_kwargs = {} + params = inspect.getfullargspec(func).args + params.remove("self") + for p in params: + try: + fixture_kwargs[p] = request.getfixturevalue(p) + except FixtureLookupError: + pass # test methods can have kwargs that are not fixtures + return fixture_kwargs + + def _launch_procs(self, num_procs): + # Verify we have enough accelerator devices to run this test + if ( + get_accelerator().is_available() + and get_accelerator().device_count() < num_procs + ): + pytest.skip( + f"Skipping test because not enough GPUs are available: {num_procs} required, {get_accelerator().device_count()} available" + ) + + mp.set_start_method("spawn", force=True) + + # Create process pool or use cached one + master_port = None + if self.reuse_dist_env: + if num_procs not in self._pool_cache: + self._pool_cache[num_procs] = mp.Pool(processes=num_procs) + master_port = get_master_port() + pool = self._pool_cache[num_procs] + else: + pool = mp.Pool(processes=num_procs) + master_port = get_master_port() + + # Run the test + args = [(local_rank, num_procs, master_port) for local_rank in range(num_procs)] + skip_msgs_async = pool.starmap_async(self._dist_run, args) + + try: + skip_msgs = skip_msgs_async.get(self.exec_timeout) + except mp.TimeoutError: + # Shortcut to exit pytest in the case of a hanged test. This + # usually means an environment error and the rest of tests will + # hang (causing super long unit test runtimes) + pytest.exit("Test hanged, exiting", returncode=0) + + # Tear down distributed environment and close process pools + self._close_pool(pool, num_procs) + + # If we skipped a test, propagate that to this process + if any(skip_msgs): + assert len(set(skip_msgs)) == 1, "Multiple different skip messages received" + pytest.skip(skip_msgs[0]) + + def _dist_run(self, local_rank, num_procs, master_port): + skip_msg = "" + if not dist.is_initialized(): + """Initialize deepspeed.comm and execute the user function.""" + if self.set_dist_env: + os.environ["MASTER_ADDR"] = "127.0.0.1" + os.environ["MASTER_PORT"] = str(master_port) + os.environ["LOCAL_RANK"] = str(local_rank) + # NOTE: unit tests don't support multi-node so local_rank == global rank + os.environ["RANK"] = str(local_rank) + # In case of multiprocess launching LOCAL_SIZE should be same as WORLD_SIZE + # DeepSpeed single node launcher would also set LOCAL_SIZE accordingly + os.environ["LOCAL_SIZE"] = str(num_procs) + os.environ["WORLD_SIZE"] = str(num_procs) + + # turn off NCCL logging if set + os.environ.pop("NCCL_DEBUG", None) + + if get_accelerator().is_available(): + set_accelerator_visible() + + if get_accelerator().is_available(): + get_accelerator().set_device(local_rank) + + if self.init_distributed: + deepspeed.init_distributed(dist_backend=self.backend) + dist.barrier() + + try: + self.run(**self._fixture_kwargs) + except BaseException as e: + if isinstance(e, Skipped): + skip_msg = e.msg + else: + raise e + + return skip_msg + + def _dist_destroy(self): + if (dist is not None) and dist.is_initialized(): + dist.barrier() + dist.destroy_process_group() + + def _close_pool(self, pool, num_procs, force=False): + if force or not self.reuse_dist_env: + msg = pool.starmap(self._dist_destroy, [() for _ in range(num_procs)]) + pool.close() + pool.join() + + +class DistributedFixture(DistributedExec): + """ + Implementation that extends @pytest.fixture to allow for distributed execution. + This is primarily meant to be used when a test requires executing two pieces of + code with different world sizes. + + There are 2 parameters that can be modified: + - world_size: int = 2 -- the number of processes to launch + - backend: Literal['nccl','mpi','gloo'] = 'nccl' -- which backend to use + + Features: + - able to call pytest.skip() inside fixture + - can be reused by multiple tests + - can accept other fixtures as input + + Limitations: + - cannot use @pytest.mark.parametrize + - world_size cannot be modified after definition and only one world_size value is accepted + - any fixtures used must also be used in the test that uses this fixture (see example below) + - return values cannot be returned. Passing values to a DistributedTest + object can be achieved using class_tmpdir and writing to file (see example below) + + Usage: + - must implement a run(self, ...) method + - fixture can be used by making the class name input to a test function + + Example: + @pytest.fixture(params=[10,20]) + def regular_pytest_fixture(request): + return request.param + + class distributed_fixture_example(DistributedFixture): + world_size = 4 + + def run(self, regular_pytest_fixture, class_tmpdir): + assert int(os.environ["WORLD_SIZE"]) == self.world_size + local_rank = os.environ["LOCAL_RANK"] + print(f"Rank {local_rank} with value {regular_pytest_fixture}") + with open(os.path.join(class_tmpdir, f"{local_rank}.txt"), "w") as f: + f.write(f"{local_rank},{regular_pytest_fixture}") + + class TestExample(DistributedTest): + world_size = 1 + + def test(self, distributed_fixture_example, regular_pytest_fixture, class_tmpdir): + assert int(os.environ["WORLD_SIZE"]) == self.world_size + for rank in range(4): + with open(os.path.join(class_tmpdir, f"{rank}.txt"), "r") as f: + assert f.read() == f"{rank},{regular_pytest_fixture}" + """ + + is_dist_fixture = True + + # These values are just placeholders so that pytest recognizes this as a fixture + _pytestfixturefunction = FixtureFunctionMarker(scope="function", params=None) + __name__ = "" + + def __init__(self): + assert isinstance( + self.world_size, int + ), "Only one world size is allowed for distributed fixtures" + self.__name__ = type(self).__name__ + _pytestfixturefunction = FixtureFunctionMarker( + scope="function", params=None, name=self.__name__ + ) + + +class DistributedTest(DistributedExec): + """ + Implementation for running pytest with distributed execution. + + There are 2 parameters that can be modified: + - world_size: Union[int,List[int]] = 2 -- the number of processes to launch + - backend: Literal['nccl','mpi','gloo'] = 'nccl' -- which backend to use + + Features: + - able to call pytest.skip() inside tests + - works with pytest fixtures, parametrize, mark, etc. + - can contain multiple tests (each of which can be parametrized separately) + - class methods can be fixtures (usable by tests in this class only) + - world_size can be changed for individual tests using @pytest.mark.world_size(world_size) + - class_tmpdir is a fixture that can be used to get a tmpdir shared among + all tests (including DistributedFixture) + + Usage: + - class name must start with "Test" + - must implement one or more test*(self, ...) methods + + Example: + @pytest.fixture(params=[10,20]) + def val1(request): + return request.param + + @pytest.mark.fast + @pytest.mark.parametrize("val2", [30,40]) + class TestExample(DistributedTest): + world_size = 2 + + @pytest.fixture(params=[50,60]) + def val3(self, request): + return request.param + + def test_1(self, val1, val2, str1="hello world"): + assert int(os.environ["WORLD_SIZE"]) == self.world_size + assert all(val1, val2, str1) + + @pytest.mark.world_size(1) + @pytest.mark.parametrize("val4", [70,80]) + def test_2(self, val1, val2, val3, val4): + assert int(os.environ["WORLD_SIZE"]) == 1 + assert all(val1, val2, val3, val4) + """ + + def __init__(self): + self.is_dist_test = True + + # Temporary directory that is shared among test methods in a class + @pytest.fixture(autouse=True, scope="class") + def class_tmpdir(self, tmpdir_factory): + fn = tmpdir_factory.mktemp(self.__class__.__name__) + return fn + + def run(self, **fixture_kwargs): + self._current_test(**fixture_kwargs) + + def __call__(self, request): + self._current_test = self._get_current_test_func(request) + self._fixture_kwargs = self._get_fixture_kwargs(request, self._current_test) + + if self.requires_cuda_env and not get_accelerator().is_available(): + pytest.skip("only supported in accelerator environments.") + + # Catch world_size override pytest mark + for mark in getattr(request.function, "pytestmark", []): + if mark.name == "world_size": + world_size = mark.args[0] + break + else: + world_size = self.world_size + + if isinstance(world_size, int): + world_size = [world_size] + for procs in world_size: + self._launch_procs(procs) + time.sleep(0.5) + + def _get_current_test_func(self, request): + # DistributedTest subclasses may have multiple test methods + func_name = request.function.__name__ + return getattr(self, func_name) + + +def get_test_path(filename): + curr_path = Path(__file__).parent + return str(curr_path.joinpath(filename)) + + +def model_setup(yaml_list=None, param_dict=None, clear_data=True): + from megatron.neox_arguments import NeoXArgs + from megatron.mpu import destroy_model_parallel + from megatron import initialize_megatron + from megatron.training import setup_model_and_optimizer + + destroy_model_parallel() # mpu model parallel contains remaining global vars + if clear_data and ( + not torch.distributed.is_initialized() + or torch.distributed.get_world_size() == 1 + or torch.distributed.get_rank() == 0 + ): + clear_test_dirs() + + overwrite_values = { + "user_script": str(get_root_directory() / "train.py"), + "save": TEST_CHECKPOINT_DIR, + "load": TEST_CHECKPOINT_DIR, + "log_dir": TEST_LOG_DIR, + "tensorboard_dir": TEST_TENSORBOARD_DIR, + } + + # should not both be none + assert yaml_list is not None or param_dict is not None + + # initially load config from files as would be the case in deepy.py + if yaml_list is not None: + args_loaded = NeoXArgs.from_ymls(yaml_list, overwrite_values=overwrite_values) + else: + p_dict = param_dict.copy() + p_dict.update(overwrite_values) + args_loaded = NeoXArgs.from_dict(p_dict) + + args_loaded.build_tokenizer() + + initialize_megatron(neox_args=args_loaded) + model, optimizer, lr_scheduler = setup_model_and_optimizer( + neox_args=args_loaded, use_cache=True + ) + return model, optimizer, lr_scheduler, args_loaded + + +def simulate_deepy_env(monkeypatch, input_args): + from megatron.neox_arguments import NeoXArgs + + monkeypatch.setenv("WORLD_SIZE", "1") + monkeypatch.setenv("RANK", "0") + neox_args = NeoXArgs.consume_deepy_args(input_args) + deepspeed_main_args = neox_args.get_deepspeed_main_args() + return deepspeed_main_args + + +def save_random_model(input_args, model_dir, train_iters=0): + # Save randomly initialised model + train_args = { + "do_train": False, + "train_iters": train_iters, + "save": model_dir, + "extra_save_iters": [train_iters], + } + train.main(input_args=input_args, overwrite_values=train_args) + + +def bounded_product(sequence, n=None, seed=None): + """ + Returns a shuffled, bounded cartesian product of the input sequence. + Designed to cover as wide a range of permutations as possible with a limited number of iterations. + Will manifest the whole list in memory, so not suitable for super large sequences. + + :param sequence: iterable + :param n: length of returned list + :param seed: random seed for reproducibility + :return: list + """ + p = list(itertools.product(*sequence)) + if seed is not None: + random.seed(seed) + random.shuffle(p) + return p if n is None else p[:n] + + +def model_setup_simple(deepspeed_main_args, overwrite_values, iteration=None): + from megatron.neox_arguments import NeoXArgs + from megatron import initialize_megatron + from megatron.training import setup_model_and_optimizer + + neox_args = NeoXArgs.consume_neox_args( + input_args=deepspeed_main_args, overwrite_values=overwrite_values + ) + neox_args.configure_distributed_args() + neox_args.build_tokenizer() + initialize_megatron(neox_args=neox_args) + model, optimizer, lr_scheduler = setup_model_and_optimizer( + neox_args=neox_args, use_cache=False + ) + return model, optimizer, lr_scheduler, neox_args + + +def parametrize( + params_to_test: dict, max_tests: int = 50, seed: int = None, with_names=True +): + """ + Generates a random sample of max_tests length of all possible combinations of values in + `params_to_test`. + + In `params_to_test` you can either specify one value, and all possible settings of that value, + or two values separated by a comma, and all possible combinations of those two values in tandem. + i.e "hidden_size,num_heads": [[768,12], [1024,32], [2048, 64]] + so the first item in each list is a value of `hidden_size` and the second a value of `num_heads` + this is useful for reducing the size of possible tests for values we know are unlikely to interact beforehand, + since the cartesian product can grow very large. + + :param params_to_test: dict of neox params + :param max_tests: maximum number of tests to run + :param seed: random seed + :return: a list of neox param dicts to pass to a parametrized unit test + """ + keys, values = zip(*params_to_test.items()) + ret = [] + if with_names: + experiments = [] + for p in bounded_product(values, n=max_tests, seed=seed): + experiment = dict(zip(keys, p)) + to_pop = [] + to_add = {} + for k, v in experiment.items(): + if "," in k: + keys_split = [i.strip() for i in k.split(",")] + values_separated = experiment[k] + to_pop.append(k) + assert len(values_separated) == len(keys_split) + new_dict = dict(zip(keys_split, values_separated)) + to_add.update(new_dict) + experiment.update(to_add) + for k in to_pop: + experiment.pop(k) + base = deepcopy(BASE_CONFIG) + base.update(experiment) + ret.append(base) + if with_names: + experiments.append(experiment) + if with_names: + return ret, [dict_repr(d) for d in experiments] + return ret + + +def dict_repr(d): + return " ".join([f"{str(k)} : {str(v)}" for k, v in d.items()]) + + +binary = [True, False] + +with open("tests/config/test_setup.yml", "r") as f: + BASE_CONFIG = load(f, Loader=Loader) + print(f"Base Config:\n{BASE_CONFIG}") diff --git a/tests/config/test_setup.yml b/tests/config/test_setup.yml new file mode 100644 index 0000000000000000000000000000000000000000..882bf7e63d57ab99b60b3ae4ac8f5c1a4a0e9219 --- /dev/null +++ b/tests/config/test_setup.yml @@ -0,0 +1,83 @@ +# 19M parameter model, & local setup with some additional simplifications +{ + # Settings to make the test setup as lightweight as possible + "data_path": "data/enwik8/enwik8_text_document", + "vocab_file": "data/gpt2-vocab.json", + "merge_file": "data/gpt2-merges.txt", + "lr_decay_iters": 20, + "train_iters": 20, + "hostfile": "None", + "include": "localhost:1", + "use_wandb": False, + + # Settings copied from 19M parameter config (some modifications above, meaning we can't use configs/19M.yml directly) + "pipe_parallel_size": 1, + "model_parallel_size": 1, + + # model settings + "num_layers": 2, + "hidden_size": 8, + "num_attention_heads": 4, + "seq_length": 1024, + "max_position_embeddings": 1024, + "pos_emb": "rotary", + "no_weight_tying": true, + "gpt_j_residual": false, + "output_layer_parallelism": "column", + + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + "layernorm_fusion": false, + + # Optimizer + "optimizer": { + "type": "sm3", + "params": {}, + }, + + # precision + "precision": "fp16", + + # init methods + "init_method": "small_init", + "output_layer_init_method": "wang_init", + + "train_micro_batch_size_per_gpu": 4, + "gradient_accumulation_steps": 1, + "data_impl": "mmap", + "num_workers": 1, + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.1, + "hidden_dropout": 0, + "attention_dropout": 0, + + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 1000, + "eval_interval": 100000, + "eval_iters": 10, + + "log_interval": 10, + "steps_per_print": 10, + "wall_clock_breakdown": true, + + # additional deepspeed args not specified above + "deepspeed_extra_args": { + "comms_logger": { + "enabled": true, + "verbose": true, + "prof_all": true, + "debug": false + }, + } +} diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..917dd8543bbdb8a23d273c48f21dc737b7d66a7a --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,91 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# tests directory-specific settings - this file is run automatically by pytest before any tests are run + +import sys +import pytest +import os +from os.path import abspath, dirname, join +import torch +import warnings + +# Set this environment variable for the T5 inference unittest(s) (e.g. google/t5-v1_1-small) +os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python" + +# allow having multiple repository checkouts and not needing to remember to rerun +# 'pip install -e .[dev]' when switching between checkouts and running tests. +git_repo_path = abspath(join(dirname(dirname(__file__)), "src")) +sys.path.insert(1, git_repo_path) + + +def pytest_configure(config): + # config.option.color = "yes" + # config.option.durations = 0 + # config.option.durations_min = 1 + config.option.verbose = True + + +def pytest_addoption(parser): + parser.addoption("--torch_ver", default=None, type=str) + parser.addoption("--cuda_ver", default=None, type=str) + + +def validate_version(expected, found): + version_depth = expected.count(".") + 1 + found = ".".join(found.split(".")[:version_depth]) + return found == expected + + +@pytest.fixture(scope="session", autouse=True) +def check_environment(pytestconfig): + expected_torch_version = pytestconfig.getoption("torch_ver") + expected_cuda_version = pytestconfig.getoption("cuda_ver") + if expected_torch_version is None: + warnings.warn( + "Running test without verifying torch version, please provide an expected torch version with --torch_ver" + ) + elif not validate_version(expected_torch_version, torch.__version__): + pytest.exit( + f"expected torch version {expected_torch_version} did not match found torch version {torch.__version__}", + returncode=2, + ) + if expected_cuda_version is None: + warnings.warn( + "Running test without verifying cuda version, please provide an expected cuda version with --cuda_ver" + ) + elif not validate_version(expected_cuda_version, torch.version.cuda): + pytest.exit( + f"expected cuda version {expected_cuda_version} did not match found cuda version {torch.version.cuda}", + returncode=2, + ) + + +# Override of pytest "runtest" for DistributedTest class +# This hook is run before the default pytest_runtest_call +@pytest.hookimpl(tryfirst=True) +def pytest_runtest_call(item): + # We want to use our own launching function for distributed tests + if getattr(item.cls, "is_dist_test", False): + dist_test_class = item.cls() + dist_test_class(item._request) + item.runtest = lambda: True # Dummy function so test is not run twice + + +# We allow DistributedTest to reuse distributed environments. When the last +# test for a class is run, we want to make sure those distributed environments +# are destroyed. +def pytest_runtest_teardown(item, nextitem): + if getattr(item.cls, "reuse_dist_env", False) and not nextitem: + dist_test_class = item.cls() + for num_procs, pool in dist_test_class._pool_cache.items(): + dist_test_class._close_pool(pool, num_procs, force=True) + + +@pytest.hookimpl(tryfirst=True) +def pytest_fixture_setup(fixturedef, request): + if getattr(fixturedef.func, "is_dist_fixture", False): + dist_fixture_class = fixturedef.func() + dist_fixture_class(request) diff --git a/tests/cpu_tests/action.yml b/tests/cpu_tests/action.yml new file mode 100644 index 0000000000000000000000000000000000000000..f8180605ff9da41831c7c038ab3837024fac5b89 --- /dev/null +++ b/tests/cpu_tests/action.yml @@ -0,0 +1,96 @@ +name: Composite CPU Test Run +inputs: + target_test_ref: + description: 'Target ref to checkout and run CPU tests on' + required: true + type: string +runs: + using: composite + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ inputs.target_test_ref }} + - name: Install Python + uses: actions/setup-python@v4 + with: + python-version: "3.8" + cache: "pip" + cache-dependency-path: "**/requirements*.txt" + - name: Upgrade Pip + shell: bash + run: python -m pip install --upgrade pip + - name: Set up Docker repository + shell: bash + run: | + # Add Docker's official GPG key: + sudo apt-get update -y + sudo apt-get install ca-certificates curl -y + sudo install -m 0755 -d /etc/apt/keyrings + sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc + sudo chmod a+r /etc/apt/keyrings/docker.asc + # Add the repository to Apt sources: + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + sudo apt-get update + - name: Docker installation + shell: bash + run: | + sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y + sudo docker run hello-world + - name: Prepare data + shell: bash + run: | + python prepare_data.py -d ./data + - name: Remove previous container + shell: bash + run: | + if docker ps -a | grep -q "$CONTAINER"; then + echo "Container already exists, deleting it..." + docker rm -f $CONTAINER + fi + env: + CONTAINER: gpt-neox-${{ inputs.target_test_ref }} + - name: Create container + shell: bash + run: | + mv docker-compose.yml .docker-compose.yml + cp tests/cpu_tests/docker-compose.yml . + export NEOX_DATA_PATH='./data/enwik8' + export NEOX_CHECKPOINT_PATH='/mnt/sda/checkpoints' #todo: where do I get this? + docker compose run -d --build --name $CONTAINER gpt-neox tail -f /dev/null + env: + CONTAINER: gpt-neox-${{ inputs.target_test_ref }} + - name: Install test requirements + shell: bash + run: | + docker exec $CONTAINER pip install -r /workspace/requirements-dev.txt + env: + CONTAINER: gpt-neox-${{ inputs.target_test_ref }} + - name: Execute CPU tests 1 + shell: bash + run: | + docker exec $CONTAINER sh -c "cd gpt-neox && pytest tests -m cpu" + env: + CONTAINER: gpt-neox-${{ inputs.target_test_ref }} + - name: Execute CPU tests 2 + if: always() + shell: bash + run: | + docker exec $CONTAINER sh -c "cd gpt-neox && PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python pytest tests -m cpu" + env: + CONTAINER: gpt-neox-${{ inputs.target_test_ref }} +# - name: Generate report +# shell: bash +# if: always() +# run: | +# docker exec $CONTAINER python -m http.server --directory htmlcov 8000 # this may not work with AWS - should perhaps pass back as an artifact for Github to use instead. +# env: +# CONTAINER: gpt-neox-${{ inputs.target_test_ref }} + - name: Remove CPU docker-compose + shell: bash + if: always() + run: | + rm docker-compose.yml + mv .docker-compose.yml docker-compose.yml diff --git a/tests/cpu_tests/docker-compose.yml b/tests/cpu_tests/docker-compose.yml new file mode 100644 index 0000000000000000000000000000000000000000..8b29243467bf9bb01b1ea43158770bd4b11960c4 --- /dev/null +++ b/tests/cpu_tests/docker-compose.yml @@ -0,0 +1,21 @@ +version: '3' # slightly different to make sure CPU tests run without nvidia device +services: + gpt-neox: + command: nvidia-smi dmon + image: gpt-neox + build: + context: . + dockerfile: Dockerfile + shm_size: 1g + ulimits: + memlock: + soft: -1 + hard: -1 + logging: + options: + max-size: "100m" + max-file: "3" + volumes: + - ${NEOX_DATA_PATH}:/home/mchorse/data + - ${NEOX_CHECKPOINT_PATH}:/home/mchorse/chk + - .:/home/mchorse/gpt-neox diff --git a/tests/data/enwik8_first100.txt b/tests/data/enwik8_first100.txt new file mode 100644 index 0000000000000000000000000000000000000000..cf0afdd9814cb4434534f48b1a8be77019dba27a --- /dev/null +++ b/tests/data/enwik8_first100.txt @@ -0,0 +1,100 @@ + + + Wikipedia + http://en.wikipedia.org/wiki/Main_Page + MediaWiki 1.6alpha + first-letter + + Media + Special + + Talk + User + User talk + Wikipedia + Wikipedia talk + Image + Image talk + MediaWiki + MediaWiki talk + Template + Template talk + Help + Help talk + Category + Category talk + Portal + Portal talk + + + + AaA + 1 + + 32899315 + 2005-12-27T18:46:47Z + + Jsmethers + 614213 + + #REDIRECT [[AAA]] + + + + AlgeriA + 5 + + 18063769 + 2005-07-03T11:13:13Z + + Docu + 8029 + + + adding cur_id=5: {{R from CamelCase}} + #REDIRECT [[Algeria]]{{R from CamelCase}} + + + + AmericanSamoa + 6 + + 18063795 + 2005-07-03T11:14:17Z + + Docu + 8029 + + + adding to cur_id=6 {{R from CamelCase}} + #REDIRECT [[American Samoa]]{{R from CamelCase}} + + + + AppliedEthics + 8 + + 15898943 + 2002-02-25T15:43:11Z + + Conversion script + + + Automated conversion + #REDIRECT [[Applied ethics]] + + + + + AccessibleComputing + 10 + + 15898945 + 2003-04-25T22:18:38Z + + Ams80 + 7543 + + + Fixing redirect + #REDIRECT [[Accessible_computing]] diff --git a/tests/data/hf_cache/tokenizer/gpt2.json b/tests/data/hf_cache/tokenizer/gpt2.json new file mode 100644 index 0000000000000000000000000000000000000000..6dd16ac313fb23409c086fd712581528ddfc602b --- /dev/null +++ b/tests/data/hf_cache/tokenizer/gpt2.json @@ -0,0 +1,100305 @@ +{ + "version": "1.0", + "truncation": null, + "padding": null, + "added_tokens": [ + { + "id": 50256, + "content": "<|endoftext|>", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": true, + "special": true + } + ], + "normalizer": null, + "pre_tokenizer": { + "type": "ByteLevel", + "add_prefix_space": false, + "trim_offsets": true, + "use_regex": true + }, + "post_processor": { + "type": "ByteLevel", + "add_prefix_space": true, + "trim_offsets": false, + "use_regex": true + }, + "decoder": { + "type": "ByteLevel", + "add_prefix_space": true, + "trim_offsets": true, + "use_regex": true + }, + "model": { + "type": "BPE", + "dropout": null, + "unk_token": null, + "continuing_subword_prefix": "", + "end_of_word_suffix": "", + "fuse_unk": false, + "byte_fallback": false, + "vocab": { + "!": 0, + "\"": 1, + "#": 2, + "$": 3, + "%": 4, + "&": 5, + "'": 6, + "(": 7, + ")": 8, + "*": 9, + "+": 10, + ",": 11, + "-": 12, + ".": 13, + "/": 14, + "0": 15, + "1": 16, + "2": 17, + "3": 18, + "4": 19, + "5": 20, + "6": 21, + "7": 22, + "8": 23, + "9": 24, + ":": 25, + ";": 26, + "<": 27, + "=": 28, + ">": 29, + "?": 30, + "@": 31, + "A": 32, + "B": 33, + "C": 34, + "D": 35, + "E": 36, + "F": 37, + "G": 38, + "H": 39, + "I": 40, + "J": 41, + "K": 42, + "L": 43, + "M": 44, + "N": 45, + "O": 46, + "P": 47, + "Q": 48, + "R": 49, + "S": 50, + "T": 51, + "U": 52, + "V": 53, + "W": 54, + "X": 55, + "Y": 56, + "Z": 57, + "[": 58, + "\\": 59, + "]": 60, + "^": 61, + "_": 62, + "`": 63, + "a": 64, + "b": 65, + "c": 66, + "d": 67, + "e": 68, + "f": 69, + "g": 70, + "h": 71, + "i": 72, + "j": 73, + "k": 74, + "l": 75, + "m": 76, + "n": 77, + "o": 78, + "p": 79, + "q": 80, + "r": 81, + "s": 82, + "t": 83, + "u": 84, + "v": 85, + "w": 86, + "x": 87, + "y": 88, + "z": 89, + "{": 90, + "|": 91, + "}": 92, + "~": 93, + "¡": 94, + "¢": 95, + "£": 96, + "¤": 97, + "¥": 98, + "¦": 99, + "§": 100, + "¨": 101, + "©": 102, + "ª": 103, + "«": 104, + "¬": 105, + "®": 106, + "¯": 107, + "°": 108, + "±": 109, + "²": 110, + "³": 111, + "´": 112, + "µ": 113, + "¶": 114, + "·": 115, + "¸": 116, + "¹": 117, + "º": 118, + "»": 119, + "¼": 120, + "½": 121, + "¾": 122, + "¿": 123, + "À": 124, + "Á": 125, + "Â": 126, + "Ã": 127, + "Ä": 128, + "Å": 129, + "Æ": 130, + "Ç": 131, + "È": 132, + "É": 133, + "Ê": 134, + "Ë": 135, + "Ì": 136, + "Í": 137, + "Î": 138, + "Ï": 139, + "Ð": 140, + "Ñ": 141, + "Ò": 142, + "Ó": 143, + "Ô": 144, + "Õ": 145, + "Ö": 146, + "×": 147, + "Ø": 148, + "Ù": 149, + "Ú": 150, + "Û": 151, + "Ü": 152, + "Ý": 153, + "Þ": 154, + "ß": 155, + "à": 156, + "á": 157, + "â": 158, + "ã": 159, + "ä": 160, + "å": 161, + "æ": 162, + "ç": 163, + "è": 164, + "é": 165, + "ê": 166, + "ë": 167, + "ì": 168, + "í": 169, + "î": 170, + "ï": 171, + "ð": 172, + "ñ": 173, + "ò": 174, + "ó": 175, + "ô": 176, + "õ": 177, + "ö": 178, + "÷": 179, + "ø": 180, + "ù": 181, + "ú": 182, + "û": 183, + "ü": 184, + "ý": 185, + "þ": 186, + "ÿ": 187, + "Ā": 188, + "ā": 189, + "Ă": 190, + "ă": 191, + "Ą": 192, + "ą": 193, + "Ć": 194, + "ć": 195, + "Ĉ": 196, + "ĉ": 197, + "Ċ": 198, + "ċ": 199, + "Č": 200, + "č": 201, + "Ď": 202, + "ď": 203, + "Đ": 204, + "đ": 205, + "Ē": 206, + "ē": 207, + "Ĕ": 208, + "ĕ": 209, + "Ė": 210, + "ė": 211, + "Ę": 212, + "ę": 213, + "Ě": 214, + "ě": 215, + "Ĝ": 216, + "ĝ": 217, + "Ğ": 218, + "ğ": 219, + "Ġ": 220, + "ġ": 221, + "Ģ": 222, + "ģ": 223, + "Ĥ": 224, + "ĥ": 225, + "Ħ": 226, + "ħ": 227, + "Ĩ": 228, + "ĩ": 229, + "Ī": 230, + "ī": 231, + "Ĭ": 232, + "ĭ": 233, + "Į": 234, + "į": 235, + "İ": 236, + "ı": 237, + "IJ": 238, + "ij": 239, + "Ĵ": 240, + "ĵ": 241, + "Ķ": 242, + "ķ": 243, + "ĸ": 244, + "Ĺ": 245, + "ĺ": 246, + "Ļ": 247, + "ļ": 248, + "Ľ": 249, + "ľ": 250, + "Ŀ": 251, + "ŀ": 252, + "Ł": 253, + "ł": 254, + "Ń": 255, + "Ġt": 256, + "Ġa": 257, + "he": 258, + "in": 259, + "re": 260, + "on": 261, + "Ġthe": 262, + "er": 263, + "Ġs": 264, + "at": 265, + "Ġw": 266, + "Ġo": 267, + "en": 268, + "Ġc": 269, + "it": 270, + "is": 271, + "an": 272, + "or": 273, + "es": 274, + "Ġb": 275, + "ed": 276, + "Ġf": 277, + "ing": 278, + "Ġp": 279, + "ou": 280, + "Ġan": 281, + "al": 282, + "ar": 283, + "Ġto": 284, + "Ġm": 285, + "Ġof": 286, + "Ġin": 287, + "Ġd": 288, + "Ġh": 289, + "Ġand": 290, + "ic": 291, + "as": 292, + "le": 293, + "Ġth": 294, + "ion": 295, + "om": 296, + "ll": 297, + "ent": 298, + "Ġn": 299, + "Ġl": 300, + "st": 301, + "Ġre": 302, + "ve": 303, + "Ġe": 304, + "ro": 305, + "ly": 306, + "Ġbe": 307, + "Ġg": 308, + "ĠT": 309, + "ct": 310, + "ĠS": 311, + "id": 312, + "ot": 313, + "ĠI": 314, + "ut": 315, + "et": 316, + "ĠA": 317, + "Ġis": 318, + "Ġon": 319, + "im": 320, + "am": 321, + "ow": 322, + "ay": 323, + "ad": 324, + "se": 325, + "Ġthat": 326, + "ĠC": 327, + "ig": 328, + "Ġfor": 329, + "ac": 330, + "Ġy": 331, + "ver": 332, + "ur": 333, + "Ġu": 334, + "ld": 335, + "Ġst": 336, + "ĠM": 337, + "'s": 338, + "Ġhe": 339, + "Ġit": 340, + "ation": 341, + "ith": 342, + "ir": 343, + "ce": 344, + "Ġyou": 345, + "il": 346, + "ĠB": 347, + "Ġwh": 348, + "ol": 349, + "ĠP": 350, + "Ġwith": 351, + "Ġ1": 352, + "ter": 353, + "ch": 354, + "Ġas": 355, + "Ġwe": 356, + "Ġ(": 357, + "nd": 358, + "ill": 359, + "ĠD": 360, + "if": 361, + "Ġ2": 362, + "ag": 363, + "ers": 364, + "ke": 365, + "Ġ\"": 366, + "ĠH": 367, + "em": 368, + "Ġcon": 369, + "ĠW": 370, + "ĠR": 371, + "her": 372, + "Ġwas": 373, + "Ġr": 374, + "od": 375, + "ĠF": 376, + "ul": 377, + "ate": 378, + "Ġat": 379, + "ri": 380, + "pp": 381, + "ore": 382, + "ĠThe": 383, + "Ġse": 384, + "us": 385, + "Ġpro": 386, + "Ġha": 387, + "um": 388, + "Ġare": 389, + "Ġde": 390, + "ain": 391, + "and": 392, + "Ġor": 393, + "igh": 394, + "est": 395, + "ist": 396, + "ab": 397, + "rom": 398, + "ĠN": 399, + "th": 400, + "Ġcom": 401, + "ĠG": 402, + "un": 403, + "op": 404, + "00": 405, + "ĠL": 406, + "Ġnot": 407, + "ess": 408, + "Ġex": 409, + "Ġv": 410, + "res": 411, + "ĠE": 412, + "ew": 413, + "ity": 414, + "ant": 415, + "Ġby": 416, + "el": 417, + "os": 418, + "ort": 419, + "oc": 420, + "qu": 421, + "Ġfrom": 422, + "Ġhave": 423, + "Ġsu": 424, + "ive": 425, + "ould": 426, + "Ġsh": 427, + "Ġthis": 428, + "nt": 429, + "ra": 430, + "pe": 431, + "ight": 432, + "art": 433, + "ment": 434, + "Ġal": 435, + "ust": 436, + "end": 437, + "--": 438, + "all": 439, + "ĠO": 440, + "ack": 441, + "Ġch": 442, + "Ġle": 443, + "ies": 444, + "red": 445, + "ard": 446, + "âĢ": 447, + "out": 448, + "ĠJ": 449, + "Ġab": 450, + "ear": 451, + "iv": 452, + "ally": 453, + "our": 454, + "ost": 455, + "gh": 456, + "pt": 457, + "Ġpl": 458, + "ast": 459, + "Ġcan": 460, + "ak": 461, + "ome": 462, + "ud": 463, + "The": 464, + "Ġhis": 465, + "Ġdo": 466, + "Ġgo": 467, + "Ġhas": 468, + "ge": 469, + "'t": 470, + "ĠU": 471, + "rou": 472, + "Ġsa": 473, + "Ġj": 474, + "Ġbut": 475, + "Ġwor": 476, + "Ġall": 477, + "ect": 478, + "Ġk": 479, + "ame": 480, + "Ġwill": 481, + "ok": 482, + "Ġwhe": 483, + "Ġthey": 484, + "ide": 485, + "01": 486, + "ff": 487, + "ich": 488, + "pl": 489, + "ther": 490, + "Ġtr": 491, + "..": 492, + "Ġint": 493, + "ie": 494, + "ure": 495, + "age": 496, + "Ġne": 497, + "ial": 498, + "ap": 499, + "ine": 500, + "ice": 501, + "Ġme": 502, + "Ġout": 503, + "ans": 504, + "one": 505, + "ong": 506, + "ions": 507, + "Ġwho": 508, + "ĠK": 509, + "Ġup": 510, + "Ġtheir": 511, + "Ġad": 512, + "Ġ3": 513, + "Ġus": 514, + "ated": 515, + "ous": 516, + "Ġmore": 517, + "ue": 518, + "og": 519, + "ĠSt": 520, + "ind": 521, + "ike": 522, + "Ġso": 523, + "ime": 524, + "per": 525, + ".\"": 526, + "ber": 527, + "iz": 528, + "act": 529, + "Ġone": 530, + "Ġsaid": 531, + "Ġ-": 532, + "are": 533, + "Ġyour": 534, + "cc": 535, + "ĠTh": 536, + "Ġcl": 537, + "ep": 538, + "ake": 539, + "able": 540, + "ip": 541, + "Ġcont": 542, + "Ġwhich": 543, + "ia": 544, + "Ġim": 545, + "Ġabout": 546, + "Ġwere": 547, + "very": 548, + "ub": 549, + "Ġhad": 550, + "Ġen": 551, + "Ġcomp": 552, + ",\"": 553, + "ĠIn": 554, + "Ġun": 555, + "Ġag": 556, + "ire": 557, + "ace": 558, + "au": 559, + "ary": 560, + "Ġwould": 561, + "ass": 562, + "ry": 563, + "ĠâĢ": 564, + "cl": 565, + "ook": 566, + "ere": 567, + "so": 568, + "ĠV": 569, + "ign": 570, + "ib": 571, + "Ġoff": 572, + "Ġte": 573, + "ven": 574, + "ĠY": 575, + "ile": 576, + "ose": 577, + "ite": 578, + "orm": 579, + "Ġ201": 580, + "Ġres": 581, + "Ġman": 582, + "Ġper": 583, + "Ġother": 584, + "ord": 585, + "ult": 586, + "Ġbeen": 587, + "Ġlike": 588, + "ase": 589, + "ance": 590, + "ks": 591, + "ays": 592, + "own": 593, + "ence": 594, + "Ġdis": 595, + "ction": 596, + "Ġany": 597, + "Ġapp": 598, + "Ġsp": 599, + "int": 600, + "ress": 601, + "ations": 602, + "ail": 603, + "Ġ4": 604, + "ical": 605, + "Ġthem": 606, + "Ġher": 607, + "ount": 608, + "ĠCh": 609, + "Ġar": 610, + "Ġif": 611, + "Ġthere": 612, + "Ġpe": 613, + "Ġyear": 614, + "av": 615, + "Ġmy": 616, + "Ġsome": 617, + "Ġwhen": 618, + "ough": 619, + "ach": 620, + "Ġthan": 621, + "ru": 622, + "ond": 623, + "ick": 624, + "Ġover": 625, + "vel": 626, + "Ġqu": 627, + "ĊĊ": 628, + "Ġsc": 629, + "reat": 630, + "ree": 631, + "ĠIt": 632, + "ound": 633, + "port": 634, + "Ġalso": 635, + "Ġpart": 636, + "fter": 637, + "Ġkn": 638, + "Ġbec": 639, + "Ġtime": 640, + "ens": 641, + "Ġ5": 642, + "ople": 643, + "Ġwhat": 644, + "Ġno": 645, + "du": 646, + "mer": 647, + "ang": 648, + "Ġnew": 649, + "----": 650, + "Ġget": 651, + "ory": 652, + "ition": 653, + "ings": 654, + "Ġjust": 655, + "Ġinto": 656, + "Ġ0": 657, + "ents": 658, + "ove": 659, + "te": 660, + "Ġpeople": 661, + "Ġpre": 662, + "Ġits": 663, + "Ġrec": 664, + "Ġtw": 665, + "ian": 666, + "irst": 667, + "ark": 668, + "ors": 669, + "Ġwork": 670, + "ade": 671, + "ob": 672, + "Ġshe": 673, + "Ġour": 674, + "wn": 675, + "ink": 676, + "lic": 677, + "Ġ19": 678, + "ĠHe": 679, + "ish": 680, + "nder": 681, + "ause": 682, + "Ġhim": 683, + "ons": 684, + "Ġ[": 685, + "Ġro": 686, + "form": 687, + "ild": 688, + "ates": 689, + "vers": 690, + "Ġonly": 691, + "oll": 692, + "Ġspe": 693, + "ck": 694, + "ell": 695, + "amp": 696, + "Ġacc": 697, + "Ġbl": 698, + "ious": 699, + "urn": 700, + "ft": 701, + "ood": 702, + "Ġhow": 703, + "hed": 704, + "Ġ'": 705, + "Ġafter": 706, + "aw": 707, + "Ġatt": 708, + "ov": 709, + "ne": 710, + "Ġplay": 711, + "erv": 712, + "ict": 713, + "Ġcould": 714, + "itt": 715, + "Ġam": 716, + "Ġfirst": 717, + "Ġ6": 718, + "Ġact": 719, + "Ġ$": 720, + "ec": 721, + "hing": 722, + "ual": 723, + "ull": 724, + "Ġcomm": 725, + "oy": 726, + "old": 727, + "ces": 728, + "ater": 729, + "Ġfe": 730, + "Ġbet": 731, + "we": 732, + "iff": 733, + "Ġtwo": 734, + "ock": 735, + "Ġback": 736, + ").": 737, + "ident": 738, + "Ġunder": 739, + "rough": 740, + "sel": 741, + "xt": 742, + "Ġmay": 743, + "round": 744, + "Ġpo": 745, + "ph": 746, + "iss": 747, + "Ġdes": 748, + "Ġmost": 749, + "Ġdid": 750, + "Ġadd": 751, + "ject": 752, + "Ġinc": 753, + "fore": 754, + "Ġpol": 755, + "ont": 756, + "Ġagain": 757, + "clud": 758, + "tern": 759, + "Ġknow": 760, + "Ġneed": 761, + "Ġcons": 762, + "Ġco": 763, + "Ġ.": 764, + "Ġwant": 765, + "Ġsee": 766, + "Ġ7": 767, + "ning": 768, + "iew": 769, + "ĠThis": 770, + "ced": 771, + "Ġeven": 772, + "Ġind": 773, + "ty": 774, + "ĠWe": 775, + "ath": 776, + "Ġthese": 777, + "Ġpr": 778, + "Ġuse": 779, + "Ġbecause": 780, + "Ġfl": 781, + "ng": 782, + "Ġnow": 783, + "ĠâĢĵ": 784, + "com": 785, + "ise": 786, + "Ġmake": 787, + "Ġthen": 788, + "ower": 789, + "Ġevery": 790, + "ĠUn": 791, + "Ġsec": 792, + "oss": 793, + "uch": 794, + "Ġem": 795, + "Ġ=": 796, + "ĠRe": 797, + "ied": 798, + "rit": 799, + "Ġinv": 800, + "lect": 801, + "Ġsupp": 802, + "ating": 803, + "Ġlook": 804, + "man": 805, + "pect": 806, + "Ġ8": 807, + "row": 808, + "Ġbu": 809, + "Ġwhere": 810, + "ific": 811, + "Ġyears": 812, + "ily": 813, + "Ġdiff": 814, + "Ġshould": 815, + "Ġrem": 816, + "Th": 817, + "In": 818, + "Ġev": 819, + "day": 820, + "'re": 821, + "rib": 822, + "Ġrel": 823, + "ss": 824, + "Ġdef": 825, + "Ġright": 826, + "Ġsy": 827, + "),": 828, + "les": 829, + "000": 830, + "hen": 831, + "Ġthrough": 832, + "ĠTr": 833, + "__": 834, + "Ġway": 835, + "Ġdon": 836, + "Ġ,": 837, + "Ġ10": 838, + "ased": 839, + "Ġass": 840, + "ublic": 841, + "Ġreg": 842, + "ĠAnd": 843, + "ix": 844, + "Ġvery": 845, + "Ġinclud": 846, + "other": 847, + "Ġimp": 848, + "oth": 849, + "Ġsub": 850, + "ĠâĢĶ": 851, + "Ġbeing": 852, + "arg": 853, + "ĠWh": 854, + "==": 855, + "ible": 856, + "Ġdoes": 857, + "ange": 858, + "ram": 859, + "Ġ9": 860, + "ert": 861, + "ps": 862, + "ited": 863, + "ational": 864, + "Ġbr": 865, + "Ġdown": 866, + "Ġmany": 867, + "aking": 868, + "Ġcall": 869, + "uring": 870, + "ities": 871, + "Ġph": 872, + "ics": 873, + "als": 874, + "Ġdec": 875, + "ative": 876, + "ener": 877, + "Ġbefore": 878, + "ility": 879, + "Ġwell": 880, + "Ġmuch": 881, + "erson": 882, + "Ġthose": 883, + "Ġsuch": 884, + "Ġke": 885, + "Ġend": 886, + "ĠBut": 887, + "ason": 888, + "ting": 889, + "Ġlong": 890, + "ef": 891, + "Ġthink": 892, + "ys": 893, + "Ġbel": 894, + "Ġsm": 895, + "its": 896, + "ax": 897, + "Ġown": 898, + "Ġprov": 899, + "Ġset": 900, + "ife": 901, + "ments": 902, + "ble": 903, + "ward": 904, + "Ġshow": 905, + "Ġpres": 906, + "ms": 907, + "omet": 908, + "Ġob": 909, + "Ġsay": 910, + "ĠSh": 911, + "ts": 912, + "ful": 913, + "Ġeff": 914, + "Ġgu": 915, + "Ġinst": 916, + "und": 917, + "ren": 918, + "cess": 919, + "Ġent": 920, + "ĠYou": 921, + "Ġgood": 922, + "Ġstart": 923, + "ince": 924, + "Ġmade": 925, + "tt": 926, + "stem": 927, + "olog": 928, + "up": 929, + "Ġ|": 930, + "ump": 931, + "Ġhel": 932, + "vern": 933, + "ular": 934, + "ually": 935, + "Ġac": 936, + "Ġmon": 937, + "Ġlast": 938, + "Ġ200": 939, + "10": 940, + "Ġstud": 941, + "ures": 942, + "ĠAr": 943, + "self": 944, + "ars": 945, + "meric": 946, + "ues": 947, + "cy": 948, + "Ġmin": 949, + "ollow": 950, + "Ġcol": 951, + "io": 952, + "Ġmod": 953, + "Ġcount": 954, + "ĠCom": 955, + "hes": 956, + "Ġfin": 957, + "air": 958, + "ier": 959, + "âĢĶ": 960, + "read": 961, + "ank": 962, + "atch": 963, + "ever": 964, + "Ġstr": 965, + "Ġpoint": 966, + "ork": 967, + "ĠNew": 968, + "Ġsur": 969, + "ool": 970, + "alk": 971, + "ement": 972, + "Ġused": 973, + "ract": 974, + "ween": 975, + "Ġsame": 976, + "oun": 977, + "ĠAl": 978, + "ci": 979, + "Ġdiffere": 980, + "Ġwhile": 981, + "--------": 982, + "Ġgame": 983, + "cept": 984, + "Ġsim": 985, + "...": 986, + "Ġinter": 987, + "ek": 988, + "Ġreport": 989, + "Ġprodu": 990, + "Ġstill": 991, + "led": 992, + "ah": 993, + "Ġhere": 994, + "Ġworld": 995, + "Ġthough": 996, + "Ġnum": 997, + "arch": 998, + "imes": 999, + "ale": 1000, + "ĠSe": 1001, + "ĠIf": 1002, + "//": 1003, + "ĠLe": 1004, + "Ġret": 1005, + "Ġref": 1006, + "Ġtrans": 1007, + "ner": 1008, + "ution": 1009, + "ters": 1010, + "Ġtake": 1011, + "ĠCl": 1012, + "Ġconf": 1013, + "way": 1014, + "ave": 1015, + "Ġgoing": 1016, + "Ġsl": 1017, + "ug": 1018, + "ĠAmeric": 1019, + "Ġspec": 1020, + "Ġhand": 1021, + "Ġbetween": 1022, + "ists": 1023, + "ĠDe": 1024, + "oot": 1025, + "It": 1026, + "Ġear": 1027, + "Ġagainst": 1028, + "Ġhigh": 1029, + "gan": 1030, + "az": 1031, + "ather": 1032, + "Ġexp": 1033, + "Ġop": 1034, + "Ġins": 1035, + "Ġgr": 1036, + "Ġhelp": 1037, + "Ġrequ": 1038, + "ets": 1039, + "ins": 1040, + "ĠPro": 1041, + "ism": 1042, + "Ġfound": 1043, + "land": 1044, + "ata": 1045, + "uss": 1046, + "ames": 1047, + "Ġperson": 1048, + "Ġgreat": 1049, + "pr": 1050, + "Ġsign": 1051, + "ĠAn": 1052, + "'ve": 1053, + "Ġsomet": 1054, + "Ġser": 1055, + "hip": 1056, + "Ġrun": 1057, + "Ġ:": 1058, + "Ġter": 1059, + "irect": 1060, + "Ġfollow": 1061, + "Ġdet": 1062, + "ices": 1063, + "Ġfind": 1064, + "12": 1065, + "Ġmem": 1066, + "Ġcr": 1067, + "ered": 1068, + "ex": 1069, + "Ġext": 1070, + "uth": 1071, + "ense": 1072, + "co": 1073, + "Ġteam": 1074, + "ving": 1075, + "ouse": 1076, + "ash": 1077, + "att": 1078, + "ved": 1079, + "Ġsystem": 1080, + "ĠAs": 1081, + "der": 1082, + "ives": 1083, + "min": 1084, + "Ġlead": 1085, + "ĠBl": 1086, + "cent": 1087, + "Ġaround": 1088, + "Ġgovern": 1089, + "Ġcur": 1090, + "velop": 1091, + "any": 1092, + "Ġcour": 1093, + "alth": 1094, + "ages": 1095, + "ize": 1096, + "Ġcar": 1097, + "ode": 1098, + "Ġlaw": 1099, + "Ġread": 1100, + "'m": 1101, + "con": 1102, + "Ġreal": 1103, + "Ġsupport": 1104, + "Ġ12": 1105, + "....": 1106, + "Ġreally": 1107, + "ness": 1108, + "Ġfact": 1109, + "Ġday": 1110, + "Ġboth": 1111, + "ying": 1112, + "Ġserv": 1113, + "ĠFor": 1114, + "Ġthree": 1115, + "Ġwom": 1116, + "Ġmed": 1117, + "ody": 1118, + "ĠThey": 1119, + "50": 1120, + "Ġexper": 1121, + "ton": 1122, + "Ġeach": 1123, + "akes": 1124, + "Ġche": 1125, + "Ġcre": 1126, + "ines": 1127, + "Ġrep": 1128, + "19": 1129, + "gg": 1130, + "illion": 1131, + "Ġgrou": 1132, + "ute": 1133, + "ik": 1134, + "We": 1135, + "get": 1136, + "ER": 1137, + "Ġmet": 1138, + "Ġsays": 1139, + "ox": 1140, + "Ġduring": 1141, + "ern": 1142, + "ized": 1143, + "ared": 1144, + "Ġfam": 1145, + "ically": 1146, + "Ġhapp": 1147, + "ĠIs": 1148, + "Ġchar": 1149, + "med": 1150, + "vent": 1151, + "Ġgener": 1152, + "ient": 1153, + "ple": 1154, + "iet": 1155, + "rent": 1156, + "11": 1157, + "ves": 1158, + "ption": 1159, + "Ġ20": 1160, + "formation": 1161, + "Ġcor": 1162, + "Ġoffic": 1163, + "ield": 1164, + "Ġtoo": 1165, + "ision": 1166, + "Ġinf": 1167, + "ĠZ": 1168, + "the": 1169, + "oad": 1170, + "Ġpublic": 1171, + "Ġprog": 1172, + "ric": 1173, + "**": 1174, + "Ġwar": 1175, + "Ġpower": 1176, + "view": 1177, + "Ġfew": 1178, + "Ġloc": 1179, + "Ġdifferent": 1180, + "Ġstate": 1181, + "Ġhead": 1182, + "'ll": 1183, + "Ġposs": 1184, + "Ġstat": 1185, + "ret": 1186, + "ants": 1187, + "Ġval": 1188, + "Ġiss": 1189, + "Ġcle": 1190, + "ivers": 1191, + "anc": 1192, + "Ġexpl": 1193, + "Ġanother": 1194, + "ĠQ": 1195, + "Ġav": 1196, + "thing": 1197, + "nce": 1198, + "Wh": 1199, + "Ġchild": 1200, + "Ġsince": 1201, + "ired": 1202, + "less": 1203, + "Ġlife": 1204, + "Ġdevelop": 1205, + "ittle": 1206, + "Ġdep": 1207, + "Ġpass": 1208, + "ãĥ": 1209, + "Ġturn": 1210, + "orn": 1211, + "This": 1212, + "bers": 1213, + "ross": 1214, + "ĠAd": 1215, + "Ġfr": 1216, + "Ġresp": 1217, + "Ġsecond": 1218, + "oh": 1219, + "Ġ/": 1220, + "Ġdisc": 1221, + "Ġ&": 1222, + "Ġsomething": 1223, + "Ġcomple": 1224, + "Ġed": 1225, + "Ġfil": 1226, + "Ġmonth": 1227, + "aj": 1228, + "uc": 1229, + "Ġgovernment": 1230, + "Ġwithout": 1231, + "Ġleg": 1232, + "Ġdist": 1233, + "Ġput": 1234, + "Ġquest": 1235, + "ann": 1236, + "Ġprot": 1237, + "20": 1238, + "Ġnever": 1239, + "ience": 1240, + "Ġlevel": 1241, + "Ġart": 1242, + "Ġthings": 1243, + "Ġmight": 1244, + "Ġeffect": 1245, + "Ġcontro": 1246, + "Ġcent": 1247, + "Ġ18": 1248, + "Ġallow": 1249, + "Ġbelie": 1250, + "chool": 1251, + "ott": 1252, + "Ġincre": 1253, + "Ġfeel": 1254, + "Ġresult": 1255, + "Ġlot": 1256, + "Ġfun": 1257, + "ote": 1258, + "Ġty": 1259, + "erest": 1260, + "Ġcontin": 1261, + "Ġusing": 1262, + "Ġbig": 1263, + "201": 1264, + "Ġask": 1265, + "Ġbest": 1266, + "Ġ)": 1267, + "IN": 1268, + "Ġopp": 1269, + "30": 1270, + "Ġnumber": 1271, + "iness": 1272, + "St": 1273, + "lease": 1274, + "Ġca": 1275, + "Ġmust": 1276, + "Ġdirect": 1277, + "Ġgl": 1278, + "Ġ<": 1279, + "Ġopen": 1280, + "Ġpost": 1281, + "Ġcome": 1282, + "Ġseem": 1283, + "ording": 1284, + "Ġweek": 1285, + "ately": 1286, + "ital": 1287, + "Ġel": 1288, + "riend": 1289, + "Ġfar": 1290, + "Ġtra": 1291, + "inal": 1292, + "Ġpri": 1293, + "ĠUS": 1294, + "Ġplace": 1295, + "Ġform": 1296, + "Ġtold": 1297, + "\":": 1298, + "ains": 1299, + "ature": 1300, + "ĠTrump": 1301, + "Ġstand": 1302, + "Ġ#": 1303, + "ider": 1304, + "ĠFr": 1305, + "Ġnext": 1306, + "Ġsoc": 1307, + "Ġpur": 1308, + "Ġlet": 1309, + "Ġlittle": 1310, + "Ġhum": 1311, + "Ġi": 1312, + "ron": 1313, + "15": 1314, + "Ġ15": 1315, + "Ġcommun": 1316, + "Ġmark": 1317, + "ĠThere": 1318, + "Ġwr": 1319, + "ĠThat": 1320, + "Ġinformation": 1321, + "ways": 1322, + "Ġbus": 1323, + "app": 1324, + "Ġinvest": 1325, + "me": 1326, + "Ġhard": 1327, + "ained": 1328, + "ead": 1329, + "Ġimport": 1330, + "Ġappro": 1331, + "Ġtest": 1332, + "Ġtri": 1333, + "Ġrest": 1334, + "osed": 1335, + "Ġfull": 1336, + "Ġcare": 1337, + "ĠSp": 1338, + "Ġcase": 1339, + "ON": 1340, + "Ġsk": 1341, + "Ġless": 1342, + "Ġ+": 1343, + "Ġpartic": 1344, + "ĠPl": 1345, + "ably": 1346, + "uck": 1347, + "ished": 1348, + "chn": 1349, + "be": 1350, + "Ġlist": 1351, + "ator": 1352, + "Ġtop": 1353, + "Ġadv": 1354, + "ĠBe": 1355, + "ruct": 1356, + "Ġdem": 1357, + "ration": 1358, + "ling": 1359, + "gy": 1360, + "reen": 1361, + "ger": 1362, + "Ġhome": 1363, + "Ġleft": 1364, + "Ġbetter": 1365, + "Ġdata": 1366, + "Ġ11": 1367, + "Ġattack": 1368, + "Ġproble": 1369, + "line": 1370, + "ards": 1371, + "Ġbeh": 1372, + "ral": 1373, + "ĠHow": 1374, + "ĠShe": 1375, + "arge": 1376, + "Ġ--": 1377, + "://": 1378, + "Ġbro": 1379, + "ĠPh": 1380, + "ats": 1381, + "Ġbuild": 1382, + "ww": 1383, + "ided": 1384, + "aim": 1385, + "ases": 1386, + "ency": 1387, + "Ġmain": 1388, + "ined": 1389, + "Ġincluding": 1390, + "Ġ{": 1391, + "Ġgot": 1392, + "Ġinterest": 1393, + "Ġkeep": 1394, + "ĠX": 1395, + "Ġeas": 1396, + "aining": 1397, + "Ġclass": 1398, + "âĢ¦": 1399, + "ĠNo": 1400, + "Ġvar": 1401, + "Ġsmall": 1402, + "ample": 1403, + "AT": 1404, + "Ġide": 1405, + "ĠSo": 1406, + "Ġrece": 1407, + "Ġpolit": 1408, + "Ġmov": 1409, + "Ġplan": 1410, + "Ġpercent": 1411, + "iving": 1412, + "Ġcamp": 1413, + "Ġpay": 1414, + "14": 1415, + "sc": 1416, + "ised": 1417, + "Ġunt": 1418, + "oney": 1419, + "ploy": 1420, + "====": 1421, + "Ġdidn": 1422, + "ĠInd": 1423, + "els": 1424, + "ertain": 1425, + "Ġpos": 1426, + "____": 1427, + "iver": 1428, + "Ġprocess": 1429, + "Ġprogram": 1430, + "ified": 1431, + "ĠRep": 1432, + "16": 1433, + "uro": 1434, + "ology": 1435, + "atter": 1436, + "ina": 1437, + "Ġname": 1438, + "ĠAll": 1439, + "Ġfour": 1440, + "Ġreturn": 1441, + "vious": 1442, + "bs": 1443, + "Ġcalled": 1444, + "Ġmove": 1445, + "ĠSc": 1446, + "ird": 1447, + "Ġgroup": 1448, + "Ġbre": 1449, + "Ġmen": 1450, + "Ġcap": 1451, + "ten": 1452, + "ee": 1453, + "Ġdri": 1454, + "leg": 1455, + "here": 1456, + "uthor": 1457, + "Ġpat": 1458, + "Ġcurrent": 1459, + "ides": 1460, + "Ġpop": 1461, + "to": 1462, + "ention": 1463, + "Ġalways": 1464, + "Ġmil": 1465, + "Ġwomen": 1466, + "Ġ16": 1467, + "Ġold": 1468, + "iven": 1469, + "raph": 1470, + "ĠOr": 1471, + "ror": 1472, + "ently": 1473, + "Ġnear": 1474, + "ĠEx": 1475, + "ream": 1476, + "sh": 1477, + "Ġ14": 1478, + "Ġfree": 1479, + "ission": 1480, + "stand": 1481, + "ĠCon": 1482, + "ality": 1483, + "used": 1484, + "13": 1485, + "Ġdesign": 1486, + "Ġchange": 1487, + "Ġchang": 1488, + "Ġbo": 1489, + "Ġvis": 1490, + "ember": 1491, + "Ġbook": 1492, + "ready": 1493, + "Ġkill": 1494, + "25": 1495, + "pped": 1496, + "Ġaway": 1497, + "Ġable": 1498, + "Ġcountry": 1499, + "Ġconst": 1500, + "arn": 1501, + "Ġorder": 1502, + "AR": 1503, + "ior": 1504, + "ium": 1505, + "orth": 1506, + "18": 1507, + "ailable": 1508, + "Ġsw": 1509, + "Ġmillion": 1510, + "Ġ13": 1511, + "atic": 1512, + "ted": 1513, + "ĠGo": 1514, + "Ġoper": 1515, + "eng": 1516, + "Ġthing": 1517, + "ajor": 1518, + "conom": 1519, + "ĠComm": 1520, + "Ġwhy": 1521, + "ured": 1522, + "ural": 1523, + "Ġschool": 1524, + "by": 1525, + "ĠMar": 1526, + "Ġaff": 1527, + "Ġdays": 1528, + "Ġann": 1529, + "ush": 1530, + "ane": 1531, + "If": 1532, + "eg": 1533, + "Ġprof": 1534, + "Ġhealth": 1535, + "outh": 1536, + "But": 1537, + "ional": 1538, + ".,": 1539, + "Ġsol": 1540, + "Ġalready": 1541, + "Ġ30": 1542, + "Ġcharact": 1543, + "He": 1544, + "Ġfriend": 1545, + "ES": 1546, + "ians": 1547, + "icle": 1548, + "'d": 1549, + "ĠOn": 1550, + "Ġleast": 1551, + "Ġprom": 1552, + "Ġdr": 1553, + "Ġhist": 1554, + "ither": 1555, + "Ġest": 1556, + "iqu": 1557, + "17": 1558, + "son": 1559, + "Ġtell": 1560, + "Ġtalk": 1561, + "ohn": 1562, + "oint": 1563, + "lection": 1564, + "AN": 1565, + "Ġuntil": 1566, + "augh": 1567, + "Ġlater": 1568, + "Ġve": 1569, + "Ġview": 1570, + "ending": 1571, + "ived": 1572, + "Ġword": 1573, + "ware": 1574, + "Ġcost": 1575, + "Ġenough": 1576, + "Ġgive": 1577, + "ĠUnited": 1578, + "Ġtechn": 1579, + "arent": 1580, + "OR": 1581, + "Ġpar": 1582, + "ĠDr": 1583, + "Ġ2016": 1584, + "rist": 1585, + "ering": 1586, + "ĠÂ": 1587, + "Ġlarge": 1588, + "side": 1589, + "acy": 1590, + "ccess": 1591, + "Ġwin": 1592, + "Ġimportant": 1593, + "Ġ199": 1594, + "Ġdoesn": 1595, + "Ġ17": 1596, + "Ġbusiness": 1597, + "Ġclear": 1598, + "Ġrese": 1599, + "\",": 1600, + "ury": 1601, + "Ġequ": 1602, + "aster": 1603, + "alf": 1604, + "ĠAmerican": 1605, + "nect": 1606, + "Ġexpect": 1607, + "iversity": 1608, + "Ġocc": 1609, + "ĠFl": 1610, + "Ġkind": 1611, + "Ġmean": 1612, + "Ġpast": 1613, + "Ġdev": 1614, + "Ġbas": 1615, + "let": 1616, + "raft": 1617, + "Ġorgan": 1618, + "Ġdel": 1619, + "Ġperform": 1620, + "Ġstory": 1621, + "Ġseason": 1622, + "ĠCol": 1623, + "Ġclaim": 1624, + "Ġcame": 1625, + "Ġwithin": 1626, + "Ġline": 1627, + "Ġproject": 1628, + "ĠAt": 1629, + "Ġcontrol": 1630, + "ended": 1631, + "ĠSy": 1632, + "Ġair": 1633, + "ization": 1634, + "Ġ*": 1635, + "ley": 1636, + "Ġmoney": 1637, + "idd": 1638, + "You": 1639, + "for": 1640, + "Ġfamily": 1641, + "Ġmaking": 1642, + "Ġbit": 1643, + "Ġpolice": 1644, + "Ġhappen": 1645, + "Ġvers": 1646, + "ony": 1647, + "uff": 1648, + "ĠWhen": 1649, + "Ġsit": 1650, + "ideo": 1651, + "lf": 1652, + "ison": 1653, + "Ġsure": 1654, + "gin": 1655, + "Ġappear": 1656, + "Ġlight": 1657, + "Ġes": 1658, + "of": 1659, + "Ġwater": 1660, + "Ġtimes": 1661, + "not": 1662, + "Ġgrow": 1663, + "Ġcompany": 1664, + "ĠTe": 1665, + "ows": 1666, + "Ġmar": 1667, + "ource": 1668, + "iol": 1669, + "arm": 1670, + "br": 1671, + "Ġexample": 1672, + "Ġconc": 1673, + "Ġfore": 1674, + "ĠTo": 1675, + "pro": 1676, + "EN": 1677, + "ries": 1678, + "Ġ25": 1679, + "ĠCan": 1680, + "ney": 1681, + "Ġactually": 1682, + "Ġever": 1683, + "urity": 1684, + "aken": 1685, + "aps": 1686, + "Ġtax": 1687, + "Ġmajor": 1688, + "ama": 1689, + "Ġoften": 1690, + "eral": 1691, + "Ġhuman": 1692, + "Ġjob": 1693, + "ister": 1694, + "Ġavailable": 1695, + "ocr": 1696, + "enn": 1697, + "aid": 1698, + "ivid": 1699, + "Ġrecord": 1700, + "?\"": 1701, + "Ġsing": 1702, + "ĠAm": 1703, + "idence": 1704, + "Ġnews": 1705, + "ster": 1706, + "Ġeconom": 1707, + "Ġfollowing": 1708, + "ĠBr": 1709, + "ising": 1710, + "Ġhour": 1711, + "most": 1712, + "ument": 1713, + "Ġsex": 1714, + "Ġdesc": 1715, + "Ġbecome": 1716, + "ĠEd": 1717, + "Ġtook": 1718, + "Ġhaving": 1719, + "Ġproduct": 1720, + "ault": 1721, + "As": 1722, + "aring": 1723, + "Ġmeans": 1724, + "Ġhop": 1725, + "une": 1726, + "Ġcho": 1727, + "Ġcertain": 1728, + "Ġnon": 1729, + "Ġdeal": 1730, + "24": 1731, + "lement": 1732, + "oci": 1733, + "ene": 1734, + "Ġside": 1735, + "ĠPr": 1736, + "ĠMay": 1737, + "Ġreason": 1738, + "ued": 1739, + "ched": 1740, + "ulation": 1741, + "Ġelect": 1742, + "Ġofficial": 1743, + "Ġpossible": 1744, + "Ġhold": 1745, + "ands": 1746, + "ots": 1747, + "Ġcity": 1748, + "ories": 1749, + "Ġsever": 1750, + "Ġchildren": 1751, + "Ġonce": 1752, + "Ġactiv": 1753, + "ler": 1754, + "Ġnight": 1755, + "itions": 1756, + "ĠJohn": 1757, + "ape": 1758, + "play": 1759, + "Ġdone": 1760, + "Ġlim": 1761, + "Ġworking": 1762, + "ĠPres": 1763, + "orld": 1764, + "eb": 1765, + "ĠCo": 1766, + "Ġbody": 1767, + "ails": 1768, + "utes": 1769, + "ĠMr": 1770, + "Ġwhether": 1771, + "Ġauthor": 1772, + "rop": 1773, + "Ġproper": 1774, + "Ġseen": 1775, + ");": 1776, + "Ġfac": 1777, + "ĠSu": 1778, + "Ġcond": 1779, + "iting": 1780, + "Ġcourse": 1781, + "Ġ}": 1782, + "----------------": 1783, + "aign": 1784, + "Ġevent": 1785, + "Ġeng": 1786, + "Ġpot": 1787, + "Ġintern": 1788, + "iam": 1789, + "Ġshort": 1790, + "empt": 1791, + "ãĤ": 1792, + "ĠGod": 1793, + "ilar": 1794, + "80": 1795, + "Ġorig": 1796, + "IS": 1797, + "ourn": 1798, + "ability": 1799, + "itive": 1800, + "Ġdam": 1801, + "Ġ100": 1802, + "Ġpress": 1803, + "Ġdoing": 1804, + "Ġprotect": 1805, + "ring": 1806, + "Ġthought": 1807, + "Ġquestion": 1808, + "rew": 1809, + "ĠWar": 1810, + "Ġseveral": 1811, + "ĠState": 1812, + "Ġgiven": 1813, + "Ġfund": 1814, + "ĠTw": 1815, + "Ġwent": 1816, + "ances": 1817, + "work": 1818, + "por": 1819, + "my": 1820, + "40": 1821, + "Ġarg": 1822, + "artment": 1823, + "ustom": 1824, + "Ġpolic": 1825, + "Ġmeet": 1826, + "Ġcreat": 1827, + "22": 1828, + "ĠStates": 1829, + "Ġgames": 1830, + "raw": 1831, + "uture": 1832, + "Ġunderstand": 1833, + "urs": 1834, + "ĠOb": 1835, + "lish": 1836, + "sy": 1837, + "Ġmakes": 1838, + "Ġwon": 1839, + "agon": 1840, + "Ġhtt": 1841, + "Ġlove": 1842, + "ential": 1843, + "Ġcomplete": 1844, + "par": 1845, + "ĠIm": 1846, + "AL": 1847, + "Ġaccount": 1848, + "Âł": 1849, + "ored": 1850, + "vert": 1851, + "Ġident": 1852, + "Ġ2015": 1853, + "Ġothers": 1854, + "ĠMin": 1855, + "iber": 1856, + "verage": 1857, + "There": 1858, + "itional": 1859, + "dd": 1860, + "Ġprob": 1861, + "Ġyoung": 1862, + "Ġalong": 1863, + "Ġaccording": 1864, + "Ġyet": 1865, + "Ġmembers": 1866, + "ĠWhat": 1867, + "oid": 1868, + "ĠMan": 1869, + "And": 1870, + "Ġamong": 1871, + "ai": 1872, + "Ġemploy": 1873, + "ĠRes": 1874, + "Ġ>": 1875, + "Ġinvol": 1876, + "Ġlow": 1877, + "af": 1878, + "ĠCar": 1879, + "Ġhig": 1880, + "ĠOne": 1881, + "ĠSec": 1882, + "ination": 1883, + "Ġlikely": 1884, + "Ġant": 1885, + "aged": 1886, + "ĠRuss": 1887, + "Ġben": 1888, + "Ġrele": 1889, + "For": 1890, + "back": 1891, + "ĠNot": 1892, + "Ġpresident": 1893, + "ball": 1894, + "Ġaccess": 1895, + "ividual": 1896, + "ĠDem": 1897, + "ĠEuro": 1898, + "60": 1899, + "Ġknown": 1900, + "irl": 1901, + "ĠGr": 1902, + "Ġearly": 1903, + "use": 1904, + "iety": 1905, + "âĢĵ": 1906, + "Ġfight": 1907, + "Ġsent": 1908, + "Ġtoday": 1909, + "Ġmarket": 1910, + "\".": 1911, + "Ġbased": 1912, + "Ġstrong": 1913, + "urther": 1914, + "Ġdeb": 1915, + "mber": 1916, + "Ġproblem": 1917, + "Ġdeath": 1918, + "Ġsocial": 1919, + "imate": 1920, + "AS": 1921, + "ortun": 1922, + "Ġcampaign": 1923, + "ery": 1924, + "Ch": 1925, + "Ġey": 1926, + "ially": 1927, + "Ġmus": 1928, + "wh": 1929, + "pos": 1930, + "Ġer": 1931, + "Ġsaf": 1932, + "Ġmonths": 1933, + "iron": 1934, + "Ġviol": 1935, + "Ġfive": 1936, + "Ġstre": 1937, + "Ġplayers": 1938, + "inc": 1939, + "ald": 1940, + "year": 1941, + "aun": 1942, + "Ġsuccess": 1943, + "Ġpresent": 1944, + "erence": 1945, + "Ġ2014": 1946, + "Ġsugg": 1947, + "Ġparticular": 1948, + "Ġtry": 1949, + "Ġsuggest": 1950, + "ĠChrist": 1951, + "ones": 1952, + "Ġpriv": 1953, + "23": 1954, + "Ġcrit": 1955, + "Ġland": 1956, + "Ġlocal": 1957, + "ify": 1958, + "29": 1959, + "Ġaut": 1960, + "ED": 1961, + "ĠGu": 1962, + "Ġmult": 1963, + "Ġpolitical": 1964, + "Ġasked": 1965, + "Ġformer": 1966, + "itter": 1967, + "ript": 1968, + "Ġclose": 1969, + "Ġpract": 1970, + "ĠYork": 1971, + "Ġgetting": 1972, + "Ġacross": 1973, + "Ġcomb": 1974, + "Ġbelieve": 1975, + "Ġz": 1976, + "Ġtoget": 1977, + "Ġtogether": 1978, + "ĠCent": 1979, + "irc": 1980, + "Ġindividual": 1981, + "ĠMc": 1982, + "27": 1983, + "isk": 1984, + "ĠEng": 1985, + "Ġface": 1986, + "Ġ24": 1987, + "Ġvalue": 1988, + "Ġarea": 1989, + "ev": 1990, + "Ġwrit": 1991, + "ĠPresident": 1992, + "Ġvot": 1993, + "Ġkey": 1994, + "Ġmom": 1995, + "put": 1996, + "Ġanything": 1997, + "Ġexperience": 1998, + "attle": 1999, + "Ġmind": 2000, + "aff": 2001, + "omm": 2002, + "Ġfuture": 2003, + "ged": 2004, + "Ġcut": 2005, + "Ġtot": 2006, + "itch": 2007, + "Ġvideo": 2008, + "Ġinvestig": 2009, + "Ġnet": 2010, + "ĠMy": 2011, + "rict": 2012, + "ien": 2013, + ".)": 2014, + "Ġimpro": 2015, + "though": 2016, + "wards": 2017, + "Ġconnect": 2018, + "ĠMed": 2019, + "selves": 2020, + "ensive": 2021, + "mb": 2022, + "ober": 2023, + "ators": 2024, + "An": 2025, + "Ġ50": 2026, + "Ġredu": 2027, + "resent": 2028, + "Ġabove": 2029, + "Ġfre": 2030, + "ĠEurope": 2031, + "sw": 2032, + "Ġamount": 2033, + "ĠApp": 2034, + "Ġeither": 2035, + "Ġmilit": 2036, + "Ġanal": 2037, + "Ġfail": 2038, + "ĠEn": 2039, + "ales": 2040, + "Ġspecial": 2041, + "Ġblack": 2042, + "IT": 2043, + "cher": 2044, + "Ġlooking": 2045, + "Ġfire": 2046, + "yn": 2047, + "Ġalmost": 2048, + "oon": 2049, + "Ġstudy": 2050, + "Ġmiss": 2051, + "ches": 2052, + "rown": 2053, + "Ġtre": 2054, + "Ġcommunity": 2055, + "Ġmedia": 2056, + "Ġfood": 2057, + "Ġcomes": 2058, + "ĠUniversity": 2059, + "Ġsingle": 2060, + "What": 2061, + "uly": 2062, + "Ġhalf": 2063, + "ague": 2064, + "hod": 2065, + "ĠRepublic": 2066, + "Ġstarted": 2067, + "Ġquick": 2068, + "oto": 2069, + "book": 2070, + "Ġissue": 2071, + "itor": 2072, + "Ġelse": 2073, + "Ġconsider": 2074, + "26": 2075, + "rodu": 2076, + "Ġtaken": 2077, + "28": 2078, + "99": 2079, + "ĠWith": 2080, + "Ġtrue": 2081, + "Ġwa": 2082, + "Ġtrad": 2083, + "Ġago": 2084, + "Ġmess": 2085, + "ief": 2086, + "Ġadded": 2087, + "oke": 2088, + "Ġbad": 2089, + "Ġfav": 2090, + "33": 2091, + "Ġsimilar": 2092, + "ask": 2093, + "ĠDon": 2094, + "Ġcharacter": 2095, + "orts": 2096, + "ĠHouse": 2097, + "Ġreported": 2098, + "Ġtype": 2099, + "val": 2100, + "iod": 2101, + "ĠHowever": 2102, + "Ġtarg": 2103, + "Ġentire": 2104, + "pping": 2105, + "Ġhistory": 2106, + "Ġlive": 2107, + "ffic": 2108, + "........": 2109, + "ederal": 2110, + "Ġtrying": 2111, + "Ġdiscuss": 2112, + "ĠHar": 2113, + "aces": 2114, + "lished": 2115, + "Ġself": 2116, + "osp": 2117, + "rest": 2118, + "Ġroom": 2119, + "elt": 2120, + "Ġfall": 2121, + "olution": 2122, + "Ġet": 2123, + "Ġx": 2124, + "Ġisn": 2125, + "Ġidea": 2126, + "bo": 2127, + "Ġsound": 2128, + "ĠDep": 2129, + "Ġsomeone": 2130, + "cially": 2131, + "ully": 2132, + "Ġfoc": 2133, + "Ġobject": 2134, + "ift": 2135, + "aper": 2136, + "Ġplayer": 2137, + "Ġrather": 2138, + "Ġservice": 2139, + "ashing": 2140, + "ĠDo": 2141, + "ĠPart": 2142, + "rug": 2143, + "mon": 2144, + "ply": 2145, + "Ġmor": 2146, + "Ġnothing": 2147, + "Ġprovide": 2148, + "IC": 2149, + "ung": 2150, + "Ġparty": 2151, + "Ġexist": 2152, + "Ġmag": 2153, + "70": 2154, + "Ġrul": 2155, + "Ġhouse": 2156, + "Ġbehind": 2157, + "Ġhowever": 2158, + "ĠWorld": 2159, + "Ġsum": 2160, + "Ġapplic": 2161, + "Ġ;": 2162, + "Ġfunction": 2163, + "gr": 2164, + "ĠPol": 2165, + "Ġfront": 2166, + "200": 2167, + "Ġseries": 2168, + "Ġtem": 2169, + "Ġtyp": 2170, + "ills": 2171, + "Ġopt": 2172, + "Ġpoints": 2173, + "Ġbelow": 2174, + "itted": 2175, + "Ġspecific": 2176, + "Ġ2017": 2177, + "umb": 2178, + "Ġra": 2179, + "Ġprevious": 2180, + "Ġpret": 2181, + "reme": 2182, + "Ġcustom": 2183, + "Ġcourt": 2184, + "ĠMe": 2185, + "Ġrepl": 2186, + "Ġwhole": 2187, + "go": 2188, + "cer": 2189, + "Ġtreat": 2190, + "ĠAct": 2191, + "Ġprobably": 2192, + "Ġlearn": 2193, + "ender": 2194, + "ĠAss": 2195, + "Ġversion": 2196, + "now": 2197, + "Ġcheck": 2198, + "ĠCal": 2199, + "RE": 2200, + "minist": 2201, + "On": 2202, + "ources": 2203, + "Ġbenef": 2204, + "Ġdoc": 2205, + "Ġdeter": 2206, + "Ġenc": 2207, + "Ġsuper": 2208, + "Ġaddress": 2209, + "Ġvict": 2210, + "Ġ2013": 2211, + "Ġmeas": 2212, + "tr": 2213, + "Ġfield": 2214, + "When": 2215, + "Ġsignific": 2216, + "uge": 2217, + "Ġfeat": 2218, + "Ġcommon": 2219, + "load": 2220, + "Ġbegin": 2221, + "Ġbring": 2222, + "Ġaction": 2223, + "erman": 2224, + "Ġdescrib": 2225, + "Ġindust": 2226, + "Ġwanted": 2227, + "ried": 2228, + "ming": 2229, + "Ġattempt": 2230, + "45": 2231, + "fer": 2232, + "Ġdue": 2233, + "ression": 2234, + "##": 2235, + "Ġshall": 2236, + "Ġsix": 2237, + "oo": 2238, + "Ġstep": 2239, + "Ġpub": 2240, + "Ġhimself": 2241, + "Ġ23": 2242, + "Ġcop": 2243, + "Ġdest": 2244, + "Ġstop": 2245, + "AC": 2246, + "ibility": 2247, + "Ġlab": 2248, + "icult": 2249, + "Ġhours": 2250, + "Ġcreate": 2251, + "Ġfurther": 2252, + "ĠAmerica": 2253, + "ĠCity": 2254, + "Ġdou": 2255, + "head": 2256, + "ST": 2257, + "ĠNorth": 2258, + "cing": 2259, + "Ġnational": 2260, + "ule": 2261, + "ĠInst": 2262, + "Ġtaking": 2263, + "ĠQu": 2264, + "irt": 2265, + "Ġred": 2266, + "Ġresearch": 2267, + "viron": 2268, + "ĠGe": 2269, + "Ġbreak": 2270, + "ana": 2271, + "Ġspace": 2272, + "aterial": 2273, + "Ġrecent": 2274, + "ĠAb": 2275, + "Ġgeneral": 2276, + "Ġhit": 2277, + "Ġperiod": 2278, + "Ġeverything": 2279, + "ively": 2280, + "Ġphys": 2281, + "Ġsaying": 2282, + "anks": 2283, + "Ġcou": 2284, + "Ġcult": 2285, + "aced": 2286, + "eal": 2287, + "uation": 2288, + "Ġcoun": 2289, + "lu": 2290, + "Ġinclude": 2291, + "Ġposition": 2292, + "ĠAfter": 2293, + "ĠCanad": 2294, + "ĠEm": 2295, + "Ġimm": 2296, + "ĠRed": 2297, + "Ġpick": 2298, + "Ġcompl": 2299, + "Ġmatter": 2300, + "reg": 2301, + "ext": 2302, + "angu": 2303, + "isc": 2304, + "ole": 2305, + "aut": 2306, + "Ġcompet": 2307, + "eed": 2308, + "fect": 2309, + "Ġ21": 2310, + "ĠSen": 2311, + "ĠThese": 2312, + "asing": 2313, + "Ġcannot": 2314, + "Ġinit": 2315, + "Ġrelations": 2316, + "ached": 2317, + "Ġbar": 2318, + "Ġ40": 2319, + "ĠTH": 2320, + "Ġ2012": 2321, + "Ġvol": 2322, + "Ġground": 2323, + "Ġsecurity": 2324, + "Ġupd": 2325, + "ilt": 2326, + "35": 2327, + "Ġconcern": 2328, + "ĠJust": 2329, + "Ġwhite": 2330, + "Ġseems": 2331, + "ĠHer": 2332, + "pecially": 2333, + "ients": 2334, + "Ġannoun": 2335, + "Ġfig": 2336, + "ights": 2337, + "Ġstri": 2338, + "like": 2339, + "ids": 2340, + "Ġsus": 2341, + "Ġwatch": 2342, + "Ġâ": 2343, + "Ġwind": 2344, + "ĠCont": 2345, + "Ġitself": 2346, + "Ġmass": 2347, + "Al": 2348, + "yle": 2349, + "ique": 2350, + "ĠNational": 2351, + "Ġabs": 2352, + "Ġpack": 2353, + "Ġoutside": 2354, + "Ġanim": 2355, + "Ġpain": 2356, + "eter": 2357, + "Ġmanag": 2358, + "duct": 2359, + "ogn": 2360, + "Ġ]": 2361, + "ĠSept": 2362, + "sec": 2363, + "off": 2364, + "ĠJan": 2365, + "Ġfoot": 2366, + "ades": 2367, + "Ġthird": 2368, + "Ġmot": 2369, + "Ġevidence": 2370, + "inton": 2371, + "Ġthreat": 2372, + "apt": 2373, + "ples": 2374, + "cle": 2375, + "Ġlo": 2376, + "Ġdecl": 2377, + "Ġitem": 2378, + "medi": 2379, + "Ġrepresent": 2380, + "omb": 2381, + "amer": 2382, + "Ġsignificant": 2383, + "ograph": 2384, + "su": 2385, + "Ġcal": 2386, + "ires": 2387, + "0000": 2388, + "ID": 2389, + "AM": 2390, + "Ġsimply": 2391, + "Ġlonger": 2392, + "Ġfile": 2393, + "OT": 2394, + "che": 2395, + "So": 2396, + "ateg": 2397, + "org": 2398, + "ĠHis": 2399, + "Ġener": 2400, + "Ġdom": 2401, + "Ġupon": 2402, + "ili": 2403, + "\":\"": 2404, + "Ġthemselves": 2405, + "Ġcoming": 2406, + "Ġquite": 2407, + "Ġdifficult": 2408, + "ĠBar": 2409, + "ilities": 2410, + "rel": 2411, + "ends": 2412, + "cial": 2413, + "64": 2414, + "Ġwoman": 2415, + "rap": 2416, + "yr": 2417, + "Ġnecess": 2418, + "ips": 2419, + "Ġtext": 2420, + "Ġrequire": 2421, + "Ġmilitary": 2422, + "Ġreview": 2423, + "Ġrespons": 2424, + "75": 2425, + "Ġsubject": 2426, + "Ġinstead": 2427, + "Ġissues": 2428, + "Ġgen": 2429, + "\",\"": 2430, + "Ġminutes": 2431, + "Ġweap": 2432, + "ray": 2433, + "amed": 2434, + "time": 2435, + "bl": 2436, + "How": 2437, + "Ġcode": 2438, + "ĠSm": 2439, + "Ġhigher": 2440, + "ĠSte": 2441, + "ris": 2442, + "Ġpage": 2443, + "Ġstudents": 2444, + "ĠIntern": 2445, + "Ġmethod": 2446, + "ĠAug": 2447, + "ĠPer": 2448, + "ĠAg": 2449, + "Ġpolicy": 2450, + "ĠSw": 2451, + "Ġexec": 2452, + "Ġaccept": 2453, + "ume": 2454, + "ribut": 2455, + "Ġwords": 2456, + "Ġfinal": 2457, + "Ġchanges": 2458, + "ĠDemocr": 2459, + "Ġfriends": 2460, + "Ġrespect": 2461, + "Ġep": 2462, + "Ġcompan": 2463, + "ivil": 2464, + "Ġdamage": 2465, + "****": 2466, + "ogle": 2467, + "vironment": 2468, + "Ġneg": 2469, + "ental": 2470, + "Ġap": 2471, + "Ġtotal": 2472, + "ival": 2473, + "!\"": 2474, + "lim": 2475, + "Ġneeds": 2476, + "Ġagre": 2477, + "Ġdevelopment": 2478, + "Ġage": 2479, + "iple": 2480, + "21": 2481, + "Ġresults": 2482, + "ĠAf": 2483, + "Sh": 2484, + "Ġgun": 2485, + "ĠObama": 2486, + "roll": 2487, + "Ġ@": 2488, + "Ġrights": 2489, + "ĠBrit": 2490, + "Ġrunning": 2491, + "Ġwasn": 2492, + "Ġport": 2493, + "Ġrate": 2494, + "Ġpretty": 2495, + "Ġtarget": 2496, + "Ġsaw": 2497, + "Ġcirc": 2498, + "Ġworks": 2499, + "icro": 2500, + "alt": 2501, + "over": 2502, + "www": 2503, + "That": 2504, + "lier": 2505, + "Ġeveryone": 2506, + "ude": 2507, + "Ġpie": 2508, + "iddle": 2509, + "rael": 2510, + "Ġrad": 2511, + "Ġblock": 2512, + "Ġwalk": 2513, + "To": 2514, + "ãģ": 2515, + "nes": 2516, + "ĠAust": 2517, + "aul": 2518, + "rote": 2519, + "ĠSouth": 2520, + "ession": 2521, + "oph": 2522, + "Ġshows": 2523, + "Ġsite": 2524, + "Ġjo": 2525, + "Ġrisk": 2526, + "clus": 2527, + "lt": 2528, + "Ġinj": 2529, + "iding": 2530, + "ĠSpe": 2531, + "Ġchall": 2532, + "irm": 2533, + "Ġ22": 2534, + "itting": 2535, + "str": 2536, + "Ġhy": 2537, + "LE": 2538, + "key": 2539, + "Ġbegan": 2540, + "atur": 2541, + "ashington": 2542, + "lam": 2543, + "ĠDav": 2544, + "bit": 2545, + "Ġsize": 2546, + "ĠPar": 2547, + "38": 2548, + "ournal": 2549, + "face": 2550, + "Ġdecision": 2551, + "Ġlarg": 2552, + "Ġjud": 2553, + "rect": 2554, + "Ġcontinue": 2555, + "ĠOct": 2556, + "overed": 2557, + "ĠInt": 2558, + "========": 2559, + "Ġparent": 2560, + "ĠWill": 2561, + "Ġeasy": 2562, + "Ġdrug": 2563, + "anger": 2564, + "Ġsense": 2565, + "Ġdi": 2566, + "iday": 2567, + "Ġenergy": 2568, + "istic": 2569, + "Ġassoci": 2570, + "arter": 2571, + "obal": 2572, + "eks": 2573, + "ĠEl": 2574, + "urch": 2575, + "Ġgirl": 2576, + "oe": 2577, + "itle": 2578, + "Ġ28": 2579, + "ĠChe": 2580, + "Ġrequest": 2581, + "Ġsoon": 2582, + "Ġhost": 2583, + "ky": 2584, + "Ġstates": 2585, + "omes": 2586, + "Ġmaterial": 2587, + "lex": 2588, + "Ġmoment": 2589, + "Ġansw": 2590, + "onse": 2591, + "Ġespecially": 2592, + "Ġnorm": 2593, + "Ġservices": 2594, + "pite": 2595, + "ran": 2596, + "Ġrole": 2597, + "44": 2598, + "):": 2599, + "Ġcred": 2600, + "Cl": 2601, + "________": 2602, + "Ġmat": 2603, + "Ġlog": 2604, + "ĠClinton": 2605, + "OU": 2606, + "Ġoffice": 2607, + "Ġ26": 2608, + "Ġcharg": 2609, + "Ġtrack": 2610, + "ma": 2611, + "Ġheart": 2612, + "Ġball": 2613, + "Ġpersonal": 2614, + "Ġbuilding": 2615, + "na": 2616, + "set": 2617, + "body": 2618, + "ĠBlack": 2619, + "Ġincrease": 2620, + "itten": 2621, + "Ġneeded": 2622, + "36": 2623, + "32": 2624, + "=\"": 2625, + "Ġlost": 2626, + "Ġbecame": 2627, + "Ġgroups": 2628, + "ĠMus": 2629, + "Ġwrote": 2630, + "ĠPe": 2631, + "Ġprop": 2632, + "joy": 2633, + "é": 2634, + "ĠWhite": 2635, + "Ġdead": 2636, + ".'": 2637, + "Ġhttp": 2638, + "Ġwebs": 2639, + "OS": 2640, + "Ġinside": 2641, + "Ġwrong": 2642, + "Ġstatement": 2643, + "Ġ...": 2644, + "yl": 2645, + "Ġfilm": 2646, + "Ġmusic": 2647, + "Ġshare": 2648, + "ification": 2649, + "Ġrelease": 2650, + "Ġforward": 2651, + "Ġstay": 2652, + "Ġcomput": 2653, + "itte": 2654, + "ser": 2655, + "Ġoriginal": 2656, + "Ġcard": 2657, + "Ġcand": 2658, + "Ġdiv": 2659, + "atural": 2660, + "Ġfavor": 2661, + "OM": 2662, + "Ġcases": 2663, + "uses": 2664, + "Ġsection": 2665, + "Ġleave": 2666, + "ging": 2667, + "oved": 2668, + "ĠWashington": 2669, + "39": 2670, + "ĠGl": 2671, + "Ġrequired": 2672, + "action": 2673, + "apan": 2674, + "oor": 2675, + "iter": 2676, + "ĠKing": 2677, + "Ġcountries": 2678, + "ĠGerman": 2679, + "lling": 2680, + "Ġ27": 2681, + "34": 2682, + "Ġquestions": 2683, + "Ġprim": 2684, + "Ġcell": 2685, + "Ġshoot": 2686, + "Ġanyone": 2687, + "ĠWest": 2688, + "Ġaffect": 2689, + "epend": 2690, + "Ġonline": 2691, + "ĠIsrael": 2692, + "ĠSeptember": 2693, + "Ġability": 2694, + "Ġcontent": 2695, + "ises": 2696, + "Ġreve": 2697, + "Ġlaun": 2698, + "Ġindic": 2699, + "Ġforce": 2700, + "cast": 2701, + "Ġsold": 2702, + "aving": 2703, + "fl": 2704, + "Ġsoft": 2705, + "Ġcompanies": 2706, + "ceed": 2707, + "Ġarticle": 2708, + "Ġaud": 2709, + "Ġrev": 2710, + "Ġeduc": 2711, + "Ġplaying": 2712, + "05": 2713, + "Ġheld": 2714, + "ctor": 2715, + "Ġreleased": 2716, + "Ġfederal": 2717, + "37": 2718, + "Ġadminist": 2719, + "Ġinterview": 2720, + "Ġinstall": 2721, + "Ġreceived": 2722, + "Ġsource": 2723, + "uk": 2724, + "Ph": 2725, + "Ġserious": 2726, + "Ġcreated": 2727, + "Ġcause": 2728, + "Ġimmedi": 2729, + "Ġdefin": 2730, + "uel": 2731, + "ĠDepartment": 2732, + "ctions": 2733, + "ĠCour": 2734, + "ĠNow": 2735, + "ze": 2736, + "ites": 2737, + "itution": 2738, + "Ġlate": 2739, + "Ġspeak": 2740, + "ners": 2741, + "Ġlegal": 2742, + "ari": 2743, + "ĠCor": 2744, + "Ġweeks": 2745, + "Ġmodel": 2746, + "Ġpred": 2747, + "Ġexact": 2748, + "BC": 2749, + "ĠBy": 2750, + "ING": 2751, + "osing": 2752, + "Ġtakes": 2753, + "Ġregard": 2754, + "Ġopportun": 2755, + "Ġprice": 2756, + "Ġ198": 2757, + "ĠApr": 2758, + "fully": 2759, + "Ġord": 2760, + "Ġproblems": 2761, + "ruction": 2762, + "ham": 2763, + "ĠCount": 2764, + "lege": 2765, + "Ġleaders": 2766, + "ET": 2767, + "lev": 2768, + "Ġdeep": 2769, + "ological": 2770, + "ese": 2771, + "haps": 2772, + "ĠSome": 2773, + "Ġpers": 2774, + "Ġcontract": 2775, + "Ġrelationship": 2776, + "sp": 2777, + "oud": 2778, + "Ġbase": 2779, + "48": 2780, + "mit": 2781, + "Ad": 2782, + "ancial": 2783, + "Ġconsum": 2784, + "Ġpotential": 2785, + "Ġlangu": 2786, + "rem": 2787, + "eth": 2788, + "Ġrelig": 2789, + "ressed": 2790, + "66": 2791, + "Ġlink": 2792, + "Ġlower": 2793, + "ayer": 2794, + "ĠJune": 2795, + "Ġfem": 2796, + "unt": 2797, + "erc": 2798, + "urd": 2799, + "Ġcontact": 2800, + "Ġill": 2801, + "Ġmother": 2802, + "Ġestab": 2803, + "htt": 2804, + "ĠMarch": 2805, + "ĠBro": 2806, + "ĠChina": 2807, + "Ġ29": 2808, + "Ġsqu": 2809, + "Ġprovided": 2810, + "Ġaverage": 2811, + "asons": 2812, + "Ġ2011": 2813, + "Ġexam": 2814, + "lin": 2815, + "55": 2816, + "ned": 2817, + "Ġperfect": 2818, + "Ġtou": 2819, + "alse": 2820, + "ux": 2821, + "Ġbuy": 2822, + "Ġshot": 2823, + "Ġcollect": 2824, + "Ġphot": 2825, + "Ġplayed": 2826, + "Ġsurpr": 2827, + "Ġofficials": 2828, + "Ġsimple": 2829, + "avy": 2830, + "Ġindustry": 2831, + "Ġhands": 2832, + "ground": 2833, + "Ġpull": 2834, + "Ġround": 2835, + "Ġuser": 2836, + "Ġrange": 2837, + "uary": 2838, + "Ġprivate": 2839, + "ops": 2840, + "ees": 2841, + "Ġways": 2842, + "ĠMich": 2843, + "Ġveh": 2844, + "Ġexcept": 2845, + "Ġterms": 2846, + "imum": 2847, + "pper": 2848, + "ION": 2849, + "ores": 2850, + "ĠDragon": 2851, + "oul": 2852, + "Ġden": 2853, + "Ġperformance": 2854, + "Ġbill": 2855, + "cil": 2856, + "47": 2857, + "Ġenvironment": 2858, + "Ġexc": 2859, + "add": 2860, + "Ġworth": 2861, + "Ġpict": 2862, + "Ġchance": 2863, + "Ġ2018": 2864, + "bor": 2865, + "Ġspeed": 2866, + "iction": 2867, + "Ġalleg": 2868, + "ĠJapan": 2869, + "atory": 2870, + "reet": 2871, + "Ġmatch": 2872, + "ĠII": 2873, + "Ġstru": 2874, + "order": 2875, + "Ġste": 2876, + "Ġliving": 2877, + "Ġstruct": 2878, + "ino": 2879, + "Ġsepar": 2880, + "hern": 2881, + "Ġresponse": 2882, + "Ġenjoy": 2883, + "Ġvia": 2884, + "AD": 2885, + "uments": 2886, + "acebook": 2887, + "Ġmember": 2888, + "ibr": 2889, + "izing": 2890, + "Ġtool": 2891, + "ĠMon": 2892, + "ĠWhile": 2893, + "hood": 2894, + "ĠAng": 2895, + "ĠDef": 2896, + "Ġoffer": 2897, + "Tr": 2898, + "aur": 2899, + "Ġturned": 2900, + "ĠJuly": 2901, + "down": 2902, + "anced": 2903, + "Ġrecently": 2904, + "ĠEar": 2905, + "Ġce": 2906, + "ĠStar": 2907, + "ĠCong": 2908, + "rought": 2909, + "Ġblood": 2910, + "Ġhope": 2911, + "Ġcomment": 2912, + "aint": 2913, + "Ġarri": 2914, + "iles": 2915, + "Ġparticip": 2916, + "ought": 2917, + "ription": 2918, + "08": 2919, + "49": 2920, + "Ġgave": 2921, + "Ġselect": 2922, + "Ġkilled": 2923, + "sych": 2924, + "Ġgoes": 2925, + "ij": 2926, + "Ġcoll": 2927, + "Ġimpact": 2928, + "atives": 2929, + "ĠSer": 2930, + "09": 2931, + "ĠAugust": 2932, + "Ġboy": 2933, + "de": 2934, + "ĠDes": 2935, + "Ġfelt": 2936, + "US": 2937, + "Ġexpected": 2938, + "Ġimage": 2939, + "ĠMark": 2940, + "ccording": 2941, + "oice": 2942, + "EC": 2943, + "ĠMag": 2944, + "ened": 2945, + "hold": 2946, + "ĠPost": 2947, + "Ġprevent": 2948, + "No": 2949, + "Ġinvolved": 2950, + "Ġeyes": 2951, + "Ġquickly": 2952, + "At": 2953, + "unk": 2954, + "Ġbehav": 2955, + "Ġur": 2956, + "Ġled": 2957, + "come": 2958, + "ey": 2959, + "Ġcandid": 2960, + "Ġearlier": 2961, + "Ġfocus": 2962, + "ety": 2963, + "Pro": 2964, + "ledge": 2965, + "ixed": 2966, + "illed": 2967, + "Ġpopular": 2968, + "AP": 2969, + "Ġsett": 2970, + "light": 2971, + "Ġvarious": 2972, + "inks": 2973, + "Ġlevels": 2974, + "Ġroad": 2975, + "ellig": 2976, + "ables": 2977, + "hel": 2978, + "ittee": 2979, + "ĠGener": 2980, + "ype": 2981, + "Ġheard": 2982, + "icles": 2983, + "Ġmis": 2984, + "Ġusers": 2985, + "ĠSan": 2986, + "Ġimprove": 2987, + "Ġfather": 2988, + "Ġsearch": 2989, + "They": 2990, + "vil": 2991, + "Ġprofess": 2992, + "Ġknew": 2993, + "Ġloss": 2994, + "Ġevents": 2995, + "65": 2996, + "Ġbillion": 2997, + "07": 2998, + "02": 2999, + "ĠNews": 3000, + "ĠAM": 3001, + "Ġcover": 3002, + "where": 3003, + "ension": 3004, + "Ġbott": 3005, + "Ġareas": 3006, + "ences": 3007, + "ope": 3008, + "ĠTwitter": 3009, + "ael": 3010, + "Ġgets": 3011, + "ĠGoogle": 3012, + "Ġsn": 3013, + "iant": 3014, + "Ġvote": 3015, + "Ġnearly": 3016, + "Ġincluded": 3017, + "Ġrecogn": 3018, + "zz": 3019, + "mm": 3020, + "aled": 3021, + "Ġhappened": 3022, + "04": 3023, + "Ġhot": 3024, + "Ġwhose": 3025, + "Ġcivil": 3026, + "Ġsuff": 3027, + "oes": 3028, + "itiz": 3029, + "ĠSyri": 3030, + "Ġrespond": 3031, + "Ġhon": 3032, + "Ġfeatures": 3033, + "Ġeconomic": 3034, + "ĠApril": 3035, + "rim": 3036, + "Ġtechnology": 3037, + "Ġoption": 3038, + "aging": 3039, + "Ġpurch": 3040, + "Re": 3041, + "Ġlat": 3042, + "chie": 3043, + "isl": 3044, + "Ġrecomm": 3045, + "uf": 3046, + "Ġtraining": 3047, + "Ġeffects": 3048, + "Ġfast": 3049, + "Ġ2010": 3050, + "Ġoccur": 3051, + "Ġwebsite": 3052, + "Ġemail": 3053, + "Ġsens": 3054, + "ech": 3055, + "Ġoil": 3056, + "Ġinflu": 3057, + "Ġcurrently": 3058, + "ĠSch": 3059, + "ĠAdd": 3060, + "Ġgoal": 3061, + "Ġscient": 3062, + "Ġconv": 3063, + "100": 3064, + "emy": 3065, + "Ġdecided": 3066, + "Ġtravel": 3067, + "Ġmention": 3068, + "LL": 3069, + "03": 3070, + "Ġelection": 3071, + "Ġphone": 3072, + "Ġlooks": 3073, + "Ġsituation": 3074, + "Ġcy": 3075, + "Ġhor": 3076, + "bed": 3077, + "ĠCourt": 3078, + "aily": 3079, + "aves": 3080, + "Ġquality": 3081, + "ĠComp": 3082, + "wise": 3083, + "Ġtable": 3084, + "Ġstaff": 3085, + "ĠWind": 3086, + "ett": 3087, + "Ġtried": 3088, + "idered": 3089, + "Ġaddition": 3090, + "Ġbox": 3091, + "Ġlack": 3092, + "arily": 3093, + "Ġwide": 3094, + "Ġmid": 3095, + "Ġboard": 3096, + "ysis": 3097, + "Ġanti": 3098, + "ha": 3099, + "Ġdig": 3100, + "ening": 3101, + "Ġdro": 3102, + "Con": 3103, + "68": 3104, + "Ġslow": 3105, + "based": 3106, + "sequ": 3107, + "Ġpath": 3108, + "Ex": 3109, + "aker": 3110, + "Ġworked": 3111, + "Ġpen": 3112, + "Ġengine": 3113, + "Ġlooked": 3114, + "ĠSuper": 3115, + "ĠServ": 3116, + "Ġvictim": 3117, + "Un": 3118, + "Ġproperty": 3119, + "Ġintrodu": 3120, + "Ġexecut": 3121, + "ĠPM": 3122, + "Le": 3123, + "Ġcolor": 3124, + "ĠMore": 3125, + "Ġ60": 3126, + "Ġnetwork": 3127, + "Ġdate": 3128, + "cul": 3129, + "idge": 3130, + "Ġextra": 3131, + "31": 3132, + "Ġsle": 3133, + "67": 3134, + "Ġwond": 3135, + "Ġreports": 3136, + "just": 3137, + "ĠAustral": 3138, + "Ġcapital": 3139, + "Ġens": 3140, + "Ġcommand": 3141, + "Ġallowed": 3142, + "Ġprep": 3143, + "Ġcapt": 3144, + "hib": 3145, + "Ġnumbers": 3146, + "chan": 3147, + "Ġfair": 3148, + "mp": 3149, + "oms": 3150, + "Ġreach": 3151, + "With": 3152, + "tain": 3153, + "Ġbroad": 3154, + "Ġcouple": 3155, + "ecause": 3156, + "lying": 3157, + "ĠFeb": 3158, + "Ġscreen": 3159, + "Ġlives": 3160, + "Ġprior": 3161, + "ĠCongress": 3162, + "Ar": 3163, + "Ġapproach": 3164, + "Ġemer": 3165, + "aries": 3166, + "ĠDis": 3167, + "serv": 3168, + "ĠNe": 3169, + "Ġbuilt": 3170, + "cies": 3171, + "Ġrepe": 3172, + "Ġrules": 3173, + "force": 3174, + "ĠPal": 3175, + "Ġfinancial": 3176, + "Ġconsidered": 3177, + "ĠChar": 3178, + "nces": 3179, + "ĠIS": 3180, + "Ġbrought": 3181, + "Ġbi": 3182, + "iers": 3183, + "ĠSim": 3184, + "OP": 3185, + "Ġproducts": 3186, + "Ġvisit": 3187, + "Ġdocument": 3188, + "Ġconduct": 3189, + "Ġcompletely": 3190, + "ining": 3191, + "ĠCalif": 3192, + "ibly": 3193, + "Ġwritten": 3194, + "ĠTV": 3195, + "ements": 3196, + "Ġdraw": 3197, + "One": 3198, + "Ġpublished": 3199, + "Ġsecret": 3200, + "rain": 3201, + "het": 3202, + "ĠFacebook": 3203, + "onday": 3204, + "ĠUp": 3205, + "Ġsexual": 3206, + "Ġthous": 3207, + "ĠPat": 3208, + "Ġess": 3209, + "Ġstandard": 3210, + "Ġarm": 3211, + "ges": 3212, + "ection": 3213, + "Ġfell": 3214, + "Ġforeign": 3215, + "ani": 3216, + "ĠFriday": 3217, + "Ġregular": 3218, + "inary": 3219, + "Ġincreased": 3220, + "Ġusually": 3221, + "Ġdemon": 3222, + "Ġdark": 3223, + "Ġadditional": 3224, + "rol": 3225, + "ĠOf": 3226, + "Ġproduction": 3227, + "!!": 3228, + "undred": 3229, + "Ġinternational": 3230, + "idents": 3231, + "ĠFree": 3232, + "roup": 3233, + "Ġrace": 3234, + "Ġmach": 3235, + "Ġhuge": 3236, + "All": 3237, + "lear": 3238, + "ovember": 3239, + "Ġtown": 3240, + "Ġattention": 3241, + "ĠOff": 3242, + "yond": 3243, + "ĠThen": 3244, + "field": 3245, + "Ġterror": 3246, + "raz": 3247, + "ĠBo": 3248, + "Ġmeeting": 3249, + "ĠPark": 3250, + "Ġarrest": 3251, + "Ġfear": 3252, + "Ġaw": 3253, + "ĠVal": 3254, + "oring": 3255, + "',": 3256, + "Ġextreme": 3257, + "arr": 3258, + "Ġworkers": 3259, + "After": 3260, + "Ġ31": 3261, + "net": 3262, + "ament": 3263, + "Ġdirectly": 3264, + "Ġpopulation": 3265, + "ube": 3266, + "ĠOctober": 3267, + "ĠIN": 3268, + "ĠJanuary": 3269, + "59": 3270, + "ĠDavid": 3271, + "Ġcross": 3272, + "cember": 3273, + "ĠFirst": 3274, + "Ġmessage": 3275, + "irit": 3276, + "Ġnation": 3277, + "Ġpoll": 3278, + "isions": 3279, + "Ġanswer": 3280, + "ny": 3281, + "isode": 3282, + "Ġcarry": 3283, + "ĠRussia": 3284, + "Ġhear": 3285, + "ength": 3286, + "roy": 3287, + "Ġnatural": 3288, + "inally": 3289, + "Ġdog": 3290, + "mitted": 3291, + "Ġtrade": 3292, + "Ġsubst": 3293, + "Ġmultiple": 3294, + "ĠAfric": 3295, + "Ġfans": 3296, + "Ġsort": 3297, + "Ġglobal": 3298, + "ication": 3299, + "ĠWed": 3300, + "ara": 3301, + "Ġachie": 3302, + "Ġlanguage": 3303, + "vey": 3304, + "Ġtal": 3305, + "Ġnecessary": 3306, + "Ġdetails": 3307, + "Ġsen": 3308, + "ĠSund": 3309, + "ĠReg": 3310, + "ĠRec": 3311, + "06": 3312, + "Ġsil": 3313, + "ressive": 3314, + "Ġmedical": 3315, + "unch": 3316, + "ornia": 3317, + "Ġund": 3318, + "fort": 3319, + "ocks": 3320, + "ĠMonday": 3321, + "uesday": 3322, + "craft": 3323, + "77": 3324, + "urt": 3325, + "Ġver": 3326, + "ĠHill": 3327, + "Ġreceive": 3328, + "Ġmorning": 3329, + "estern": 3330, + "Ġbank": 3331, + "Ġsat": 3332, + "irth": 3333, + "ĠHigh": 3334, + "Ġdevice": 3335, + "ĠTHE": 3336, + "ĠCenter": 3337, + "Ġsafe": 3338, + "Ġple": 3339, + "ĠCanada": 3340, + "Ġsystems": 3341, + "Ġassist": 3342, + "Ġsurv": 3343, + "Ġbattle": 3344, + "ĠSoc": 3345, + "vertis": 3346, + "She": 3347, + "Ġpaper": 3348, + "Ġgrowth": 3349, + "Ġcast": 3350, + "Sc": 3351, + "Ġplans": 3352, + "lled": 3353, + "Ġparts": 3354, + "Ġwall": 3355, + "Ġmovement": 3356, + "Ġpractice": 3357, + "imately": 3358, + "Ġdisplay": 3359, + "Ġsometimes": 3360, + "omp": 3361, + "ĠPaul": 3362, + "ĠYes": 3363, + "king": 3364, + "58": 3365, + "oly": 3366, + "Ġson": 3367, + "Ġavoid": 3368, + "okes": 3369, + "ĠJew": 3370, + "Ġtowards": 3371, + "asc": 3372, + "Ġ//": 3373, + "ĠKore": 3374, + "Ġtalking": 3375, + "Ġcorrect": 3376, + "Ġspent": 3377, + "icks": 3378, + "iable": 3379, + "eared": 3380, + "Ġterm": 3381, + "Ġwants": 3382, + "oming": 3383, + "Ġut": 3384, + "Ġdoub": 3385, + "Ġforces": 3386, + "Ġplease": 3387, + "69": 3388, + "ĠNovember": 3389, + "atform": 3390, + "ondon": 3391, + "Ġones": 3392, + "Ġimmediately": 3393, + "ĠRussian": 3394, + "ĠMet": 3395, + "Ġdeg": 3396, + "Ġparents": 3397, + "CH": 3398, + "ĠAmericans": 3399, + "aly": 3400, + "ĠMod": 3401, + "Ġshown": 3402, + "Ġconditions": 3403, + "Ġstuff": 3404, + "Ġreb": 3405, + "ĠYour": 3406, + "Ġincludes": 3407, + "nown": 3408, + "ĠSam": 3409, + "Ġexperien": 3410, + "mission": 3411, + "ĠEven": 3412, + "aught": 3413, + "Ġannounced": 3414, + "ĠRepublican": 3415, + "Ġdetermin": 3416, + "Ġdescribed": 3417, + "ĠCounty": 3418, + "()": 3419, + "Ġdoor": 3420, + "Ġchanged": 3421, + "Ġneigh": 3422, + "ĠHere": 3423, + "Ġclean": 3424, + "Ġpan": 3425, + "ĠDecember": 3426, + "ĠEuropean": 3427, + "iring": 3428, + "apter": 3429, + "Ġclub": 3430, + "ĠTuesday": 3431, + "Ġpaid": 3432, + "ĠNet": 3433, + "Ġattacks": 3434, + "Ġcharacters": 3435, + "Ġalone": 3436, + "Ġdirector": 3437, + "dom": 3438, + "Ġ35": 3439, + "Ġload": 3440, + "Ġrout": 3441, + "ĠCalifornia": 3442, + "Ġfinally": 3443, + "Ġrac": 3444, + "Ġcontr": 3445, + "Ġexactly": 3446, + "resh": 3447, + "pri": 3448, + "ĠIslam": 3449, + "Ġnature": 3450, + "Ġcareer": 3451, + "Ġlatest": 3452, + "Ġconvers": 3453, + "ĠSl": 3454, + "pose": 3455, + "cient": 3456, + "ĠInc": 3457, + "ivity": 3458, + "88": 3459, + "ĠAtt": 3460, + "ĠMor": 3461, + "nesday": 3462, + "Ġweight": 3463, + "ken": 3464, + "Ġnote": 3465, + "Ġteams": 3466, + "Ġ\\": 3467, + "airs": 3468, + "ĠGreen": 3469, + "Ġhundred": 3470, + "onent": 3471, + "Ġstreng": 3472, + "Ġconsist": 3473, + "icated": 3474, + "Ġregul": 3475, + "Ġlic": 3476, + "astic": 3477, + "Ġten": 3478, + "ursday": 3479, + "elligence": 3480, + "ously": 3481, + "ĠUK": 3482, + "BI": 3483, + "Ġcosts": 3484, + "Ġindepend": 3485, + "ĠAP": 3486, + "Ġnormal": 3487, + "Ġhom": 3488, + "Ġobvious": 3489, + "Ġswe": 3490, + "Ġstar": 3491, + "Ġready": 3492, + "acher": 3493, + "Ġimplement": 3494, + "gest": 3495, + "Ġsong": 3496, + "ĠGet": 3497, + "ĠLab": 3498, + "Ġinteresting": 3499, + "using": 3500, + "Ġgiving": 3501, + "ĠSunday": 3502, + "Ġetc": 3503, + "Ġmiddle": 3504, + "Ġremember": 3505, + "right": 3506, + "osition": 3507, + "utions": 3508, + "Ġmax": 3509, + "46": 3510, + "Ġyourself": 3511, + "Ġdemand": 3512, + "Ġtreatment": 3513, + "Ġdanger": 3514, + "ĠCons": 3515, + "Ġguy": 3516, + "ĠBritish": 3517, + "Ġphysical": 3518, + "Ġrelated": 3519, + "Ġremain": 3520, + "Ġcouldn": 3521, + "Ġrefer": 3522, + "Ġcitiz": 3523, + "box": 3524, + "ENT": 3525, + "board": 3526, + "Ġinn": 3527, + "IG": 3528, + "ero": 3529, + "ĠStreet": 3530, + "ospital": 3531, + "rench": 3532, + "chers": 3533, + "Ġstra": 3534, + "OL": 3535, + "ager": 3536, + "ĠAN": 3537, + "Ġeasily": 3538, + "IA": 3539, + "enge": 3540, + "iny": 3541, + "Ġclos": 3542, + "ocked": 3543, + "Ġuses": 3544, + "ĠCoun": 3545, + "Im": 3546, + "uild": 3547, + "??": 3548, + "more": 3549, + "Ġang": 3550, + "Ġwrite": 3551, + "olute": 3552, + "57": 3553, + "Ġleader": 3554, + "Ġreading": 3555, + "": 3784, + "Ġfigure": 3785, + "Ġdisapp": 3786, + "enty": 3787, + "Ġsoftware": 3788, + "Ġult": 3789, + "Ġofficers": 3790, + "New": 3791, + "Is": 3792, + "Ġremains": 3793, + "ĠIndia": 3794, + "Ġpsych": 3795, + "rief": 3796, + "Ġcat": 3797, + "esc": 3798, + "Ġobserv": 3799, + "Ġstage": 3800, + "ĠDark": 3801, + "Ġenter": 3802, + "change": 3803, + "Ġpassed": 3804, + "Ġdespite": 3805, + "ĠOut": 3806, + "Ġmovie": 3807, + "rs": 3808, + "Ġvoice": 3809, + "mine": 3810, + "ĠPlay": 3811, + "Ġtoward": 3812, + "ĠTer": 3813, + "Ġregion": 3814, + "Ġvalues": 3815, + "orters": 3816, + "Ġmount": 3817, + "Ġofficer": 3818, + "ĠOther": 3819, + "ban": 3820, + "Ġhous": 3821, + "wood": 3822, + "room": 3823, + "IV": 3824, + "ĠSun": 3825, + "see": 3826, + "ĠOver": 3827, + "rog": 3828, + "90": 3829, + "Ġlay": 3830, + "ĠTur": 3831, + "awn": 3832, + "Ġpressure": 3833, + "ĠSub": 3834, + "Ġbooks": 3835, + "edom": 3836, + "ĠSand": 3837, + "AA": 3838, + "ago": 3839, + "Ġreasons": 3840, + "ford": 3841, + "Ġactivity": 3842, + "UT": 3843, + "Now": 3844, + "ĠSenate": 3845, + "cell": 3846, + "night": 3847, + "Ġcalls": 3848, + "inter": 3849, + "Ġletter": 3850, + "ĠRob": 3851, + "ĠJe": 3852, + "Ġchoose": 3853, + "ĠLaw": 3854, + "Get": 3855, + "Be": 3856, + "Ġrob": 3857, + "Ġtypes": 3858, + "Ġplatform": 3859, + "Ġquarter": 3860, + "RA": 3861, + "ĠTime": 3862, + "Ġmaybe": 3863, + "ĠCr": 3864, + "95": 3865, + "pre": 3866, + "Ġmoving": 3867, + "Ġlif": 3868, + "Ġgold": 3869, + "Ġsom": 3870, + "Ġpatients": 3871, + "Ġtruth": 3872, + "ĠKe": 3873, + "urance": 3874, + "antly": 3875, + "mar": 3876, + "Ġcharge": 3877, + "ĠGreat": 3878, + "Ġcele": 3879, + "--------------------------------": 3880, + "Ġrock": 3881, + "roid": 3882, + "ancy": 3883, + "Ġcredit": 3884, + "aud": 3885, + "By": 3886, + "ĠEvery": 3887, + "Ġmoved": 3888, + "inger": 3889, + "ribution": 3890, + "Ġnames": 3891, + "Ġstraight": 3892, + "ĠHealth": 3893, + "ĠWell": 3894, + "Ġfeature": 3895, + "Ġrule": 3896, + "Ġsche": 3897, + "inated": 3898, + "ĠMichael": 3899, + "berg": 3900, + "41": 3901, + "iled": 3902, + "band": 3903, + "Ġclick": 3904, + "ĠAngel": 3905, + "onents": 3906, + "ÂŃ": 3907, + "ĠIraq": 3908, + "ĠSaturday": 3909, + "Ġaware": 3910, + "part": 3911, + "Ġpattern": 3912, + "OW": 3913, + "ĠLet": 3914, + "Ġgrad": 3915, + "igned": 3916, + "Ġassociated": 3917, + "Ġstyle": 3918, + "no": 3919, + "iation": 3920, + "aith": 3921, + "ilies": 3922, + "Ġstories": 3923, + "uration": 3924, + "Ġindividuals": 3925, + "ĠâĢ¦": 3926, + "miss": 3927, + "ĠAssoci": 3928, + "ishing": 3929, + "aby": 3930, + "Ġsummer": 3931, + "ĠBen": 3932, + "Ġ32": 3933, + "Ġarch": 3934, + "uty": 3935, + "ĠTexas": 3936, + "hol": 3937, + "Ġfully": 3938, + "Ġmill": 3939, + "Ġfollowed": 3940, + "ĠBill": 3941, + "ĠIndian": 3942, + "ĠSecret": 3943, + "ĠBel": 3944, + "ĠFebruary": 3945, + "Ġjobs": 3946, + "Ġseemed": 3947, + "ĠGovern": 3948, + "ipped": 3949, + "Ġreality": 3950, + "Ġlines": 3951, + "Ġpark": 3952, + "Ġmeasure": 3953, + "ĠOur": 3954, + "IM": 3955, + "Ġbrother": 3956, + "Ġgrowing": 3957, + "Ġban": 3958, + "Ġestim": 3959, + "Ġcry": 3960, + "ĠSchool": 3961, + "Ġmechan": 3962, + "ĠOF": 3963, + "ĠWindows": 3964, + "Ġrates": 3965, + "ĠOh": 3966, + "Ġpositive": 3967, + "Ġculture": 3968, + "istics": 3969, + "ica": 3970, + "Ġhar": 3971, + "ya": 3972, + "itely": 3973, + "ipp": 3974, + "Ġmap": 3975, + "encies": 3976, + "ĠWilliam": 3977, + "II": 3978, + "akers": 3979, + "56": 3980, + "ĠMart": 3981, + "ĠRem": 3982, + "Ġaltern": 3983, + "itude": 3984, + "Ġcoach": 3985, + "rowd": 3986, + "Don": 3987, + "Ġkids": 3988, + "Ġjournal": 3989, + "Ġcorpor": 3990, + "Ġfalse": 3991, + "Ġweb": 3992, + "Ġsleep": 3993, + "Ġcontain": 3994, + "Ġsto": 3995, + "Ġbed": 3996, + "iverse": 3997, + "ĠRich": 3998, + "ĠChinese": 3999, + "Ġpun": 4000, + "Ġmeant": 4001, + "known": 4002, + "Ġnotice": 4003, + "Ġfavorite": 4004, + "aven": 4005, + "Ġcondition": 4006, + "Ġpurpose": 4007, + "))": 4008, + "Ġorganization": 4009, + "Ġchalleng": 4010, + "Ġmanufact": 4011, + "Ġsusp": 4012, + "ĠAc": 4013, + "Ġcritic": 4014, + "unes": 4015, + "uclear": 4016, + "Ġmer": 4017, + "vention": 4018, + "Ġ80": 4019, + "Ġmist": 4020, + "ĠUs": 4021, + "ĠTor": 4022, + "http": 4023, + "olf": 4024, + "Ġlarger": 4025, + "Ġadvant": 4026, + "Ġresear": 4027, + "Ġactions": 4028, + "ml": 4029, + "Ġkept": 4030, + "Ġaim": 4031, + ",'": 4032, + "col": 4033, + "Ġbenefits": 4034, + "ifying": 4035, + "Ġactual": 4036, + "ĠInternational": 4037, + "Ġvehicle": 4038, + "Ġchief": 4039, + "Ġefforts": 4040, + "ĠLeague": 4041, + "ĠMost": 4042, + "Ġwait": 4043, + "Ġadult": 4044, + "Ġoverall": 4045, + "Ġspeech": 4046, + "Ġhighly": 4047, + "Ġfemale": 4048, + "Ġerror": 4049, + "Ġeffective": 4050, + "54": 4051, + "Ġencour": 4052, + "well": 4053, + "Ġfailed": 4054, + "Ġconserv": 4055, + "Ġprograms": 4056, + "Ġtrou": 4057, + "Ġahead": 4058, + "500": 4059, + "vertisement": 4060, + "IP": 4061, + "ĠFound": 4062, + "pir": 4063, + "Ġ%": 4064, + "Ġcrime": 4065, + "ander": 4066, + "Ġlocation": 4067, + "ĠIran": 4068, + "Ġbehavior": 4069, + "azing": 4070, + "Ġrare": 4071, + "Ġemb": 4072, + "Ġcaused": 4073, + "Ġship": 4074, + "Ġactive": 4075, + "Ġcontribut": 4076, + "Ġgreen": 4077, + "Ġacqu": 4078, + "Ġreflect": 4079, + "venue": 4080, + "Ġfirm": 4081, + "Ġbirth": 4082, + "].": 4083, + "Ġclearly": 4084, + "Ġemot": 4085, + "Ġagency": 4086, + "riage": 4087, + "Ġmemory": 4088, + "98": 4089, + "SA": 4090, + "ĠSee": 4091, + "acing": 4092, + "CC": 4093, + "Ġbiggest": 4094, + "Ġrap": 4095, + "Ġbasic": 4096, + "Ġband": 4097, + "eat": 4098, + "Ġsuspect": 4099, + "ĠMac": 4100, + "Ġ90": 4101, + "mark": 4102, + "istan": 4103, + "Ġspread": 4104, + "ams": 4105, + "ki": 4106, + "asy": 4107, + "rav": 4108, + "ĠRober": 4109, + "Ġdemonstr": 4110, + "rated": 4111, + "Ġabsolute": 4112, + "Ġplaces": 4113, + "Ġimpl": 4114, + "ibrary": 4115, + "Ġcards": 4116, + "Ġdestroy": 4117, + "Ġvirt": 4118, + "vere": 4119, + "Ġappeared": 4120, + "yan": 4121, + "point": 4122, + "Ġbeg": 4123, + "Ġtemper": 4124, + "spe": 4125, + "anted": 4126, + "ears": 4127, + "ĠDirect": 4128, + "Ġlength": 4129, + "Ġblog": 4130, + "amb": 4131, + "Ġinteg": 4132, + "Ġresources": 4133, + "acc": 4134, + "iful": 4135, + "Ġspot": 4136, + "Ġforced": 4137, + "Ġthousands": 4138, + "ĠMinister": 4139, + "Ġqual": 4140, + "ĠFrench": 4141, + "atically": 4142, + "Ġgenerally": 4143, + "Ġdrink": 4144, + "Ġthus": 4145, + "IL": 4146, + "odes": 4147, + "Ġappropri": 4148, + "ĠRead": 4149, + "Ġwhom": 4150, + "Ġeye": 4151, + "Ġcollege": 4152, + "Ġ45": 4153, + "irection": 4154, + "Ġensure": 4155, + "Ġapparent": 4156, + "iders": 4157, + "Ġreligious": 4158, + "Ġminor": 4159, + "olic": 4160, + "Ġtro": 4161, + "ĠWhy": 4162, + "ribute": 4163, + "met": 4164, + "Ġprimary": 4165, + "Ġdeveloped": 4166, + "Ġpeace": 4167, + "Ġskin": 4168, + "ste": 4169, + "ava": 4170, + "Ġblue": 4171, + "Ġfamilies": 4172, + "Ġir": 4173, + "Ġapply": 4174, + "Ġinform": 4175, + "ĠSmith": 4176, + "CT": 4177, + "ii": 4178, + "Ġlimit": 4179, + "Ġresist": 4180, + "................": 4181, + "umn": 4182, + "Ġconflic": 4183, + "Ġtwe": 4184, + "udd": 4185, + "ĠTom": 4186, + "Ġliter": 4187, + "que": 4188, + "bon": 4189, + "Ġhair": 4190, + "Ġeventually": 4191, + "Ġpus": 4192, + "Ġhelped": 4193, + "Ġagg": 4194, + "orney": 4195, + "ĠApple": 4196, + "Ġfit": 4197, + "ĠSur": 4198, + "Ġprem": 4199, + "Ġsales": 4200, + "Ġseconds": 4201, + "Ġstrength": 4202, + "Ġfeeling": 4203, + "¿½": 4204, + "Ġtour": 4205, + "Ġknows": 4206, + "oom": 4207, + "Ġexerc": 4208, + "Ġsomew": 4209, + "�": 4210, + ">>": 4211, + "Ġspokes": 4212, + "Ġideas": 4213, + "Ġregist": 4214, + "soft": 4215, + "ĠDel": 4216, + "ĠPC": 4217, + "Ġpropos": 4218, + "Ġlaunch": 4219, + "Ġbottom": 4220, + "TH": 4221, + "ĠPlease": 4222, + "vest": 4223, + "itz": 4224, + "ĠInter": 4225, + "Ġscript": 4226, + "Ġrat": 4227, + "arning": 4228, + "Ġil": 4229, + "ĠJer": 4230, + "ĠAre": 4231, + "Ġwhatever": 4232, + "oken": 4233, + "cience": 4234, + "Ġmode": 4235, + "Ġagree": 4236, + "Ġsources": 4237, + "Ġinitial": 4238, + "Ġrestrict": 4239, + "Ġwonder": 4240, + "usion": 4241, + "####": 4242, + "ĠSil": 4243, + "ville": 4244, + "Ġburn": 4245, + "tw": 4246, + "asion": 4247, + "Ġ£": 4248, + "Ġnor": 4249, + "uing": 4250, + "Ġreached": 4251, + "Ġsun": 4252, + "Ġcateg": 4253, + "igration": 4254, + "Ġcook": 4255, + "Ġpromot": 4256, + "Ġmale": 4257, + "Ġclimate": 4258, + "Ġfix": 4259, + "Ġalleged": 4260, + "UR": 4261, + "alled": 4262, + "Ġimages": 4263, + "Cont": 4264, + "ota": 4265, + "Ġschools": 4266, + "ios": 4267, + "Ġdrop": 4268, + "Ġstream": 4269, + "ĠMo": 4270, + "Ġpreviously": 4271, + "aling": 4272, + "Ġpet": 4273, + "Ġdouble": 4274, + "Ġ(@": 4275, + "annel": 4276, + "Ġdefault": 4277, + "ties": 4278, + "Ġrank": 4279, + "ĠDec": 4280, + "ĠCouncil": 4281, + "Ġweapon": 4282, + "Ġstock": 4283, + "Ġanaly": 4284, + "ĠStr": 4285, + "Ġpicture": 4286, + "ĠPolice": 4287, + "ference": 4288, + "Ġcentury": 4289, + "Ġcitizens": 4290, + "Ġonto": 4291, + "Ġexpand": 4292, + "Ġhero": 4293, + "ĠSol": 4294, + "Ġwild": 4295, + "Ġupdate": 4296, + "Ġcustomers": 4297, + "ront": 4298, + "def": 4299, + "Ġlik": 4300, + "Ġcriminal": 4301, + "ĠChristian": 4302, + "SP": 4303, + "76": 4304, + "Ġleaving": 4305, + "Ġotherwise": 4306, + "ĠDist": 4307, + "Ġbasis": 4308, + "52": 4309, + "53": 4310, + "icip": 4311, + "ĠBer": 4312, + "Ġrecommend": 4313, + "Ġfloor": 4314, + "Ġcrowd": 4315, + "oles": 4316, + "Ġ70": 4317, + "Ġcentral": 4318, + "ĠEv": 4319, + "Ġdream": 4320, + "Ġdownload": 4321, + "Ġconfir": 4322, + "ĠThom": 4323, + "Ġwindow": 4324, + "Ġhappens": 4325, + "Ġunit": 4326, + "Ġtend": 4327, + "Ġspl": 4328, + "Ġbecomes": 4329, + "Ġfighting": 4330, + "Ġpredict": 4331, + "ĠPress": 4332, + "ĠPower": 4333, + "Ġheavy": 4334, + "aked": 4335, + "Ġfan": 4336, + "orter": 4337, + "ategy": 4338, + "BA": 4339, + "izes": 4340, + "Ġspend": 4341, + "Here": 4342, + "Ġ2007": 4343, + "Ġadop": 4344, + "ĠHam": 4345, + "Ġfootball": 4346, + "ĠPort": 4347, + "oday": 4348, + "51": 4349, + "ampions": 4350, + "Ġtransfer": 4351, + "ht": 4352, + "Ġ38": 4353, + "term": 4354, + "acity": 4355, + "Ġbur": 4356, + "],": 4357, + "ternal": 4358, + "rig": 4359, + "but": 4360, + "Ġtherefore": 4361, + "ĠBecause": 4362, + "resp": 4363, + "rey": 4364, + "Ġmission": 4365, + "Some": 4366, + "Ġnoted": 4367, + "Ġassum": 4368, + "Ġdisease": 4369, + "Ġedit": 4370, + "Ġprogress": 4371, + "rd": 4372, + "ĠBrown": 4373, + "ocal": 4374, + "Ġadding": 4375, + "Ġraised": 4376, + "ĠAny": 4377, + "Ġtick": 4378, + "Ġseeing": 4379, + "ĠPeople": 4380, + "Ġagreement": 4381, + "Ġserver": 4382, + "Ġwat": 4383, + "Ġdebate": 4384, + "Ġsupposed": 4385, + "iling": 4386, + "Ġlargest": 4387, + "Ġsuccessful": 4388, + "ĠPri": 4389, + "ĠDemocratic": 4390, + "Ġjump": 4391, + "ĠSyria": 4392, + "Ġowners": 4393, + "Ġoffers": 4394, + "Ġshooting": 4395, + "Ġeffic": 4396, + "sey": 4397, + "Ġhaven": 4398, + "verse": 4399, + "tered": 4400, + "ĠLight": 4401, + "imal": 4402, + "ĠBig": 4403, + "Ġdefend": 4404, + "Ġbeat": 4405, + "Ġrecords": 4406, + "%)": 4407, + "Ġscen": 4408, + "Ġemployees": 4409, + "Ġdevices": 4410, + "hem": 4411, + "Ġcommer": 4412, + "ĠMex": 4413, + "Ġbenefit": 4414, + "ĠProf": 4415, + "Ġilleg": 4416, + "Ġsurface": 4417, + "ĠAlso": 4418, + "Ġharm": 4419, + "ingly": 4420, + "wide": 4421, + "ĠAlex": 4422, + "Ġshut": 4423, + "ĠCur": 4424, + "Ġlose": 4425, + "pm": 4426, + "Ġchallenge": 4427, + "semb": 4428, + "Ġstation": 4429, + "Ġintelligence": 4430, + "Ġaccur": 4431, + "ĠFlor": 4432, + "Ġrequires": 4433, + "ĠMal": 4434, + "bum": 4435, + "Ġhospital": 4436, + "Ġspirit": 4437, + "Ġoffered": 4438, + "Ġproduce": 4439, + "ĠCommun": 4440, + "Ġcreating": 4441, + "Ġcris": 4442, + "spect": 4443, + "Ġended": 4444, + "Ġdaily": 4445, + "Ġvoters": 4446, + "lands": 4447, + "ias": 4448, + "ih": 4449, + "ona": 4450, + "Ġsmart": 4451, + "ĠOffice": 4452, + "ĠLord": 4453, + "rial": 4454, + "ĠInternet": 4455, + "Ġcircum": 4456, + "Ġextremely": 4457, + "'.": 4458, + "Ġopinion": 4459, + "ĠMil": 4460, + "Ġgain": 4461, + "BS": 4462, + "ĠFin": 4463, + "yp": 4464, + "Ġuseful": 4465, + "Ġbudget": 4466, + "Ġcomfort": 4467, + "isf": 4468, + "Ġbackground": 4469, + "eline": 4470, + "Ġepisode": 4471, + "Ġenemy": 4472, + "Ġtrial": 4473, + "Ġestablish": 4474, + "date": 4475, + "ĠCap": 4476, + "Ġcontinues": 4477, + "Ġshowing": 4478, + "ĠUnion": 4479, + "with": 4480, + "Ġposted": 4481, + "ĠSystem": 4482, + "Ġeat": 4483, + "rian": 4484, + "Ġrise": 4485, + "ĠGermany": 4486, + "ils": 4487, + "Ġsigned": 4488, + "Ġvill": 4489, + "Ġgrand": 4490, + "mor": 4491, + "ĠEngland": 4492, + "Ġprojects": 4493, + "umber": 4494, + "Ġconference": 4495, + "za": 4496, + "Ġresponsible": 4497, + "ĠArab": 4498, + "Ġlearned": 4499, + "âĢĶâĢĶ": 4500, + "ipping": 4501, + "ĠGeorge": 4502, + "OC": 4503, + "Ġreturned": 4504, + "ĠAustralia": 4505, + "Ġbrief": 4506, + "Qu": 4507, + "Ġbrand": 4508, + "illing": 4509, + "abled": 4510, + "Ġhighest": 4511, + "Ġtrain": 4512, + "ĠCommission": 4513, + "while": 4514, + "Ġnom": 4515, + "ception": 4516, + "Ġmut": 4517, + "ĠBlue": 4518, + "Ġincident": 4519, + "vant": 4520, + "86": 4521, + "ĠID": 4522, + "Ġnuclear": 4523, + "74": 4524, + "ĠLike": 4525, + "ĠRE": 4526, + "ĠMicro": 4527, + "li": 4528, + "mail": 4529, + "Ġcharges": 4530, + "89": 4531, + "Ġadjust": 4532, + "ado": 4533, + "Ġearth": 4534, + "NA": 4535, + "Ġprices": 4536, + "PA": 4537, + "Ġdraft": 4538, + "Ġruns": 4539, + "Ġcandidate": 4540, + "enses": 4541, + "Ġmanagement": 4542, + "ĠPhil": 4543, + "ĠMiss": 4544, + "Ġteach": 4545, + "gram": 4546, + "Ġunderstanding": 4547, + "ait": 4548, + "icago": 4549, + "Add": 4550, + "ĠEp": 4551, + "secut": 4552, + "Ġseparate": 4553, + "Ġinstance": 4554, + "Ġeth": 4555, + "Ġunless": 4556, + "********": 4557, + "ĠFore": 4558, + "inate": 4559, + "Ġoperations": 4560, + "Sp": 4561, + "Ġfaith": 4562, + "gar": 4563, + "ĠChurch": 4564, + "ronic": 4565, + "Ġconfig": 4566, + "osure": 4567, + "Ġactivities": 4568, + "Ġtraditional": 4569, + "Ġ36": 4570, + "Ġdirection": 4571, + "Ġmachine": 4572, + "Ġsurround": 4573, + "Ġpush": 4574, + "unction": 4575, + "ĠEU": 4576, + "Ġeasier": 4577, + "Ġargument": 4578, + "GB": 4579, + "Ġmicro": 4580, + "Ġspending": 4581, + "izations": 4582, + "Ġtheory": 4583, + "adow": 4584, + "Ġcalling": 4585, + "ĠLast": 4586, + "Ġder": 4587, + "Ġinfluence": 4588, + "Ġcommit": 4589, + "Ġphoto": 4590, + "Ġunc": 4591, + "istry": 4592, + "gn": 4593, + "aste": 4594, + "acks": 4595, + "Ġdisp": 4596, + "ady": 4597, + "do": 4598, + "ĠGood": 4599, + "Ġ`": 4600, + "Ġwish": 4601, + "Ġrevealed": 4602, + "³³": 4603, + "lig": 4604, + "Ġenforce": 4605, + "ĠCommittee": 4606, + "Ġchem": 4607, + "Ġmiles": 4608, + "Ġinterested": 4609, + "Ġsolution": 4610, + "icy": 4611, + "inct": 4612, + "Ġ->": 4613, + "ĠDet": 4614, + "Ġremoved": 4615, + "Ġcompar": 4616, + "eah": 4617, + "Ġplant": 4618, + "ĠSince": 4619, + "Ġachieve": 4620, + "Ġadvantage": 4621, + "Ġslightly": 4622, + "bing": 4623, + "Ġplaced": 4624, + "under": 4625, + "2015": 4626, + "ĠMad": 4627, + "Ġtim": 4628, + "oses": 4629, + "Ġcru": 4630, + "ĠRock": 4631, + "Ġmostly": 4632, + "Ġnegative": 4633, + "Ġsetting": 4634, + "Ġproduced": 4635, + "Ġmur": 4636, + "Ġconnection": 4637, + "ĠMer": 4638, + "Ġdriver": 4639, + "Ġexecutive": 4640, + "Ġassault": 4641, + "Ġborn": 4642, + "ĠVer": 4643, + "tained": 4644, + "Ġstructure": 4645, + "Ġreduce": 4646, + "Ġdecades": 4647, + "Ġded": 4648, + "uke": 4649, + "ĠMany": 4650, + "idden": 4651, + "Ġleague": 4652, + "Se": 4653, + "Ġjoin": 4654, + "Ġdisco": 4655, + "Ġdie": 4656, + "cks": 4657, + "actions": 4658, + "Ġassess": 4659, + "agn": 4660, + "Ġgoals": 4661, + "ours": 4662, + "IR": 4663, + "Ġsenior": 4664, + "iller": 4665, + "mod": 4666, + "ipment": 4667, + "ocol": 4668, + "uy": 4669, + "ĠQue": 4670, + "Ġparties": 4671, + "irgin": 4672, + "Ġlearning": 4673, + "itable": 4674, + "Ġstreet": 4675, + "Ġcamera": 4676, + "App": 4677, + "Ġskills": 4678, + "bre": 4679, + "cious": 4680, + "Ġcelebr": 4681, + "ĠFranc": 4682, + "Ġexisting": 4683, + "Ġwilling": 4684, + "lor": 4685, + "Ġid": 4686, + "ĠSpace": 4687, + "Ġcritical": 4688, + "ĠLa": 4689, + "ortunately": 4690, + "Ġserve": 4691, + "Ġcold": 4692, + "Ġspecies": 4693, + "TS": 4694, + "Ġanimals": 4695, + "ĠBay": 4696, + "Ġolder": 4697, + "ĠUnder": 4698, + "estic": 4699, + "ĠTre": 4700, + "Ġteacher": 4701, + "Ġprefer": 4702, + "vis": 4703, + "Ġthread": 4704, + "ĠMatt": 4705, + "Ġmanager": 4706, + "ãĥ»": 4707, + "Ġprofessional": 4708, + "ĠVol": 4709, + "Ġnotes": 4710, + "These": 4711, + "ula": 4712, + "Ġfresh": 4713, + "ented": 4714, + "uzz": 4715, + "edy": 4716, + "clusion": 4717, + "ĠRel": 4718, + "Ġdoubt": 4719, + "EO": 4720, + "Ġopened": 4721, + "ĠBit": 4722, + "Advertisement": 4723, + "Ġguess": 4724, + "ĠUN": 4725, + "Ġsequ": 4726, + "Ġexplain": 4727, + "otten": 4728, + "Ġattract": 4729, + "aks": 4730, + "Ġstring": 4731, + "Ġcontext": 4732, + "ossible": 4733, + "ĠRepublicans": 4734, + "Ġsolid": 4735, + "Ġcities": 4736, + "Ġasking": 4737, + "Ġrandom": 4738, + "ups": 4739, + "uries": 4740, + "arant": 4741, + "dden": 4742, + "gl": 4743, + "ĠFlorida": 4744, + "Ġdepend": 4745, + "ĠScott": 4746, + "Ġ33": 4747, + "ĠiT": 4748, + "icon": 4749, + "Ġmentioned": 4750, + "Ġ2000": 4751, + "Ġclaimed": 4752, + "Ġdefinitely": 4753, + "ulf": 4754, + "Ġcore": 4755, + "Ġopening": 4756, + "ĠConst": 4757, + "which": 4758, + "ĠTra": 4759, + "AG": 4760, + "72": 4761, + "Ġbelieved": 4762, + "ada": 4763, + "Ġ48": 4764, + "ĠSecurity": 4765, + "yright": 4766, + "ĠPet": 4767, + "ĠLou": 4768, + "Ġholding": 4769, + "================": 4770, + "Ġice": 4771, + "Ġbrow": 4772, + "Ġauthorities": 4773, + "host": 4774, + "word": 4775, + "Ġscore": 4776, + "ĠDiv": 4777, + "Ġcells": 4778, + "Ġtransl": 4779, + "Ġneighbor": 4780, + "Ġremove": 4781, + "uct": 4782, + "Ġdistrict": 4783, + "ĠAccording": 4784, + "Ġworse": 4785, + "Ġconcerns": 4786, + "Ġpresidential": 4787, + "Ġpolicies": 4788, + "ĠHall": 4789, + "73": 4790, + "Ġhus": 4791, + "AY": 4792, + "Ġ2006": 4793, + "ĠJud": 4794, + "Ġindependent": 4795, + "ĠJustice": 4796, + "iliar": 4797, + "print": 4798, + "ighter": 4799, + "Ġprotection": 4800, + "zen": 4801, + "Ġsudden": 4802, + "house": 4803, + "ĠJes": 4804, + "PR": 4805, + "ĠInf": 4806, + "Ġbul": 4807, + "Ġ_": 4808, + "ĠService": 4809, + "ĠPR": 4810, + "Ġstrategy": 4811, + "ffect": 4812, + "Ġgirls": 4813, + "Ġmissing": 4814, + "oyal": 4815, + "ĠTeam": 4816, + "ulated": 4817, + "Ġdat": 4818, + "Ġpolitics": 4819, + "abor": 4820, + "According": 4821, + "Ġspell": 4822, + "Ġgraph": 4823, + "orthern": 4824, + "TC": 4825, + "Ab": 4826, + "Ġlabor": 4827, + "isher": 4828, + "Ġkick": 4829, + "ĠiTunes": 4830, + "Ġsteps": 4831, + "poses": 4832, + "Ġsmaller": 4833, + "En": 4834, + "bert": 4835, + "Ġroll": 4836, + "Ġresearchers": 4837, + "Ġclosed": 4838, + "Ġtransport": 4839, + "Ġlawy": 4840, + "________________": 4841, + "ĠChicago": 4842, + "Ġaspect": 4843, + "Ġnone": 4844, + "Ġmarriage": 4845, + "96": 4846, + "Ġelements": 4847, + "ĠFre": 4848, + "ĠSal": 4849, + "Ġdram": 4850, + "FC": 4851, + "top": 4852, + "equ": 4853, + "Ġhearing": 4854, + "Ġsupported": 4855, + "Ġtesting": 4856, + "cohol": 4857, + "Ġmassive": 4858, + "Ġstick": 4859, + "Ġguard": 4860, + "isco": 4861, + "phone": 4862, + "From": 4863, + "However": 4864, + "Ġborder": 4865, + "Ġcopy": 4866, + "ography": 4867, + "list": 4868, + "71": 4869, + "Ġowner": 4870, + "class": 4871, + "ruit": 4872, + "rate": 4873, + "ĠOnce": 4874, + "Ġdigital": 4875, + "Ġtask": 4876, + "ERS": 4877, + "Ġincred": 4878, + "tes": 4879, + "++": 4880, + "ĠFrance": 4881, + "Ġbreat": 4882, + "owl": 4883, + "Ġissued": 4884, + "ĠWestern": 4885, + "Ġdetect": 4886, + "Ġpartners": 4887, + "Ġshared": 4888, + "ĠCall": 4889, + "Ġcancer": 4890, + "ache": 4891, + "ribe": 4892, + "Ġexplained": 4893, + "Ġheat": 4894, + "{\"": 4895, + "Ġinvestment": 4896, + "ĠBook": 4897, + "Ġwood": 4898, + "Ġtools": 4899, + "ĠAlthough": 4900, + "Ġbelief": 4901, + "Ġcrisis": 4902, + "Ġge": 4903, + "ĠMP": 4904, + "Ġoperation": 4905, + "type": 4906, + "~~": 4907, + "ga": 4908, + "Ġcontains": 4909, + "anta": 4910, + "Ġexpress": 4911, + "ĠGroup": 4912, + "ĠJournal": 4913, + "ka": 4914, + "Ġamb": 4915, + "ĠUSA": 4916, + "Ġfinding": 4917, + "Ġfunding": 4918, + "how": 4919, + "Ġestablished": 4920, + "ideos": 4921, + "Ġdegree": 4922, + "Ġdangerous": 4923, + "anging": 4924, + "Ġfreedom": 4925, + "pport": 4926, + "outhern": 4927, + "Ġchurch": 4928, + "Ġcatch": 4929, + "ĠTwo": 4930, + "Ġpresence": 4931, + "ĠGuard": 4932, + "Up": 4933, + "Ġauthority": 4934, + "ĠProject": 4935, + "Ġbutton": 4936, + "Ġconsequ": 4937, + "Ġvalid": 4938, + "Ġweak": 4939, + "Ġstarts": 4940, + "Ġreference": 4941, + "ĠMem": 4942, + "\")": 4943, + "UN": 4944, + "orage": 4945, + "ĠOpen": 4946, + "Ġcollection": 4947, + "ym": 4948, + "gency": 4949, + "Ġbeautiful": 4950, + "ros": 4951, + "Ġtells": 4952, + "Ġwaiting": 4953, + "nel": 4954, + "Ġproviding": 4955, + "ĠDemocrats": 4956, + "Ġdaughter": 4957, + "Ġmaster": 4958, + "Ġpurposes": 4959, + "ĠJapanese": 4960, + "Ġequal": 4961, + "Ġturns": 4962, + "Ġdocuments": 4963, + "Ġwatching": 4964, + "Res": 4965, + "Ġran": 4966, + "2014": 4967, + "Ġreject": 4968, + "ĠKorea": 4969, + "Ġvictims": 4970, + "Level": 4971, + "erences": 4972, + "Ġwitness": 4973, + "Ġ34": 4974, + "Ġreform": 4975, + "coming": 4976, + "Ġoccup": 4977, + "Ġcaught": 4978, + "Ġtraffic": 4979, + "ading": 4980, + "Ġmodels": 4981, + "ario": 4982, + "Ġserved": 4983, + "Ġbatter": 4984, + "uate": 4985, + "ĠSecretary": 4986, + "Ġagreed": 4987, + "Ġtruly": 4988, + "ynam": 4989, + "ĠRet": 4990, + "Ġunits": 4991, + "ĠResearch": 4992, + "hand": 4993, + "azine": 4994, + "ĠMike": 4995, + "Ġvariety": 4996, + "otal": 4997, + "Ġamazing": 4998, + "Ġconfirmed": 4999, + "Ġentirely": 5000, + "Ġpurchase": 5001, + "Ġelement": 5002, + "Ġcash": 5003, + "Ġdetermine": 5004, + "De": 5005, + "Ġcars": 5006, + "ĠWall": 5007, + "âĸ": 5008, + "Ġviews": 5009, + "Ġdrugs": 5010, + "Ġdepartment": 5011, + "ĠStep": 5012, + "uit": 5013, + "Ġ39": 5014, + "asure": 5015, + "ĠClass": 5016, + "Ġcovered": 5017, + "ĠBank": 5018, + "Ġmere": 5019, + "uana": 5020, + "Ġmulti": 5021, + "Ġmix": 5022, + "Ġunlike": 5023, + "levision": 5024, + "Ġstopped": 5025, + "Ġsem": 5026, + "ĠGal": 5027, + "ules": 5028, + "Ġwel": 5029, + "ĠJohnson": 5030, + "la": 5031, + "Ġskill": 5032, + "Ġbecoming": 5033, + "rie": 5034, + "Ġappropriate": 5035, + "fe": 5036, + "ellow": 5037, + "ĠProt": 5038, + "ulate": 5039, + "ocation": 5040, + "Ġweekend": 5041, + "odies": 5042, + "Ġsites": 5043, + "Ġanimal": 5044, + "ĠTim": 5045, + "Ġscale": 5046, + "Ġcharged": 5047, + "Ġinstruct": 5048, + "illa": 5049, + "Ġmethods": 5050, + "Ġcert": 5051, + "Ġjudge": 5052, + "ĠHel": 5053, + "Ġdollars": 5054, + "Ġstanding": 5055, + "ĠSqu": 5056, + "Ġdebt": 5057, + "liam": 5058, + "Ġdriving": 5059, + "ĠSum": 5060, + "ĠEdition": 5061, + "Ġalbum": 5062, + "andon": 5063, + "IF": 5064, + "ĠUk": 5065, + "63": 5066, + "ader": 5067, + "Ġcommercial": 5068, + "esh": 5069, + "ĠGovernment": 5070, + "Ġdiscovered": 5071, + "Ġoutput": 5072, + "ĠHillary": 5073, + "ĠCarol": 5074, + "Ġ2005": 5075, + "Ġabuse": 5076, + "ancing": 5077, + "Ġswitch": 5078, + "Ġannual": 5079, + "Tw": 5080, + "Ġstated": 5081, + "agement": 5082, + "inner": 5083, + "Ġdemocr": 5084, + "Ġresidents": 5085, + "Ġallowing": 5086, + "Ġfactors": 5087, + "odd": 5088, + "Ġfuck": 5089, + "emies": 5090, + "Ġoccurred": 5091, + "oti": 5092, + "Ġnorth": 5093, + "ĠPublic": 5094, + "Ġinjury": 5095, + "Ġinsurance": 5096, + "CL": 5097, + "olly": 5098, + "ãĢ": 5099, + "Ġrepeated": 5100, + "Ġarms": 5101, + "anged": 5102, + "Ġconstruction": 5103, + "Ġfle": 5104, + "PU": 5105, + "icians": 5106, + "Ġforms": 5107, + "ĠMcC": 5108, + "antic": 5109, + "Ġmental": 5110, + "pire": 5111, + "Ġequipment": 5112, + "Ġfant": 5113, + "Ġdiscussion": 5114, + "Ġregarding": 5115, + "kin": 5116, + "arp": 5117, + "Ġchair": 5118, + "ogue": 5119, + "Ġproceed": 5120, + "ĠId": 5121, + "Our": 5122, + "Ġmurder": 5123, + "Man": 5124, + "Ġ49": 5125, + "asp": 5126, + "Ġsupply": 5127, + "Ġinput": 5128, + "Ġwealth": 5129, + "liament": 5130, + "Ġproced": 5131, + "orial": 5132, + "ĠStat": 5133, + "ĠNFL": 5134, + "hens": 5135, + "ĠInstitute": 5136, + "Ġputting": 5137, + "ournament": 5138, + "etic": 5139, + "Ġlocated": 5140, + "Ġkid": 5141, + "eria": 5142, + "run": 5143, + "Ġprinc": 5144, + "Ġ!": 5145, + "going": 5146, + "ĠBet": 5147, + "Ġclot": 5148, + "Ġtelling": 5149, + "Ġproposed": 5150, + "iot": 5151, + "orry": 5152, + "Ġfunds": 5153, + "gment": 5154, + "ĠLife": 5155, + "Ġbaby": 5156, + "ĠBack": 5157, + "Ġspoke": 5158, + "Image": 5159, + "Ġearn": 5160, + "ĠAT": 5161, + "gu": 5162, + "Ġexchange": 5163, + "ĠLin": 5164, + "oving": 5165, + "Ġpair": 5166, + "More": 5167, + "azon": 5168, + "Ġarrested": 5169, + "Ġkilling": 5170, + "can": 5171, + "ĠCard": 5172, + "yd": 5173, + "Ġidentified": 5174, + "Ġmobile": 5175, + "Ġthanks": 5176, + "onym": 5177, + "ĠForm": 5178, + "Ġhundreds": 5179, + "ĠChris": 5180, + "ĠCat": 5181, + "Ġtrend": 5182, + "hat": 5183, + "ĠAv": 5184, + "oman": 5185, + "Ġelectric": 5186, + "ĠWil": 5187, + "SE": 5188, + "Of": 5189, + "Ġrestaur": 5190, + "oted": 5191, + "Ġtrig": 5192, + "Ġnine": 5193, + "Ġbomb": 5194, + "Why": 5195, + "¯": 5196, + "Ġcoverage": 5197, + "Ġappeal": 5198, + "ĠRobert": 5199, + "ĠSup": 5200, + "Ġfinished": 5201, + "Ġflow": 5202, + "Ġdeliver": 5203, + "Ġcalcul": 5204, + "Ġphotos": 5205, + "Ġphil": 5206, + "Ġpieces": 5207, + "Ġappre": 5208, + "kes": 5209, + "Ġrough": 5210, + "Do": 5211, + "Ġpartner": 5212, + "Ġconcerned": 5213, + "Ġ37": 5214, + "ĠGen": 5215, + "Col": 5216, + "ctors": 5217, + "Ġ=>": 5218, + "state": 5219, + "Ġsuggested": 5220, + "ĠForce": 5221, + "CE": 5222, + "Ġherself": 5223, + "ĠPlan": 5224, + "works": 5225, + "ooth": 5226, + "rency": 5227, + "Ġcorner": 5228, + "Ġhusband": 5229, + "Ġinternet": 5230, + "ĠAut": 5231, + "ems": 5232, + "osen": 5233, + "ĠAtl": 5234, + "gen": 5235, + "Ġbalance": 5236, + "62": 5237, + "Ġsounds": 5238, + "text": 5239, + "Ġarr": 5240, + "oves": 5241, + "Ġmillions": 5242, + "Ġradio": 5243, + "Ġsatisf": 5244, + "ĠDam": 5245, + "Mr": 5246, + "Go": 5247, + "Spe": 5248, + "Ġcombat": 5249, + "rant": 5250, + "ĠGree": 5251, + "Ġfuel": 5252, + "Ġdistance": 5253, + "Ġtests": 5254, + "Ġdecre": 5255, + "ĠEr": 5256, + "Ġmanaged": 5257, + "DS": 5258, + "Ġtit": 5259, + "Ġmeasures": 5260, + "ĠLiber": 5261, + "Ġattend": 5262, + "ashed": 5263, + "ĠJose": 5264, + "ĠNight": 5265, + "dit": 5266, + "ĠNov": 5267, + "ĠEnd": 5268, + "outs": 5269, + "Ġgeneration": 5270, + "Ġadvoc": 5271, + "yth": 5272, + "Ġconversation": 5273, + "ĠSky": 5274, + "active": 5275, + "cel": 5276, + "rier": 5277, + "ĠFrank": 5278, + "Ġgender": 5279, + "Ġconcent": 5280, + "Ġcarried": 5281, + "anda": 5282, + "ĠVirgin": 5283, + "Ġarrived": 5284, + "icide": 5285, + "aded": 5286, + "Ġfailure": 5287, + "Ġminimum": 5288, + "lets": 5289, + "Ġworst": 5290, + "Ġkeeping": 5291, + "Ġintended": 5292, + "Ġillegal": 5293, + "Ġsubsc": 5294, + "Ġdetermined": 5295, + "Ġtrip": 5296, + "Yes": 5297, + "Ġraise": 5298, + "Ġ~": 5299, + "Ġfeels": 5300, + "Ġpackage": 5301, + "ĠJo": 5302, + "hi": 5303, + "2016": 5304, + "real": 5305, + "Ġfra": 5306, + "Ġsymb": 5307, + "Me": 5308, + "ucky": 5309, + "pret": 5310, + "ĠKh": 5311, + "ĠEdit": 5312, + "ĠWeb": 5313, + "emic": 5314, + "ĠColor": 5315, + "Ġjustice": 5316, + "Int": 5317, + "Ġfarm": 5318, + "cknow": 5319, + "\">": 5320, + "eless": 5321, + "Ġreduced": 5322, + "Ġ500": 5323, + "xx": 5324, + "ĠRad": 5325, + "ĠWood": 5326, + "Ġclin": 5327, + "Ġhyp": 5328, + "iler": 5329, + "ura": 5330, + "kins": 5331, + "85": 5332, + "61": 5333, + "ĠTheir": 5334, + "ĠMary": 5335, + "Ġsan": 5336, + "Ġnovel": 5337, + "ĠWho": 5338, + "Ġcapacity": 5339, + "Ġimpossible": 5340, + "Ġplays": 5341, + "Ġminister": 5342, + "ijuana": 5343, + "icate": 5344, + "ĠSet": 5345, + "Ġfram": 5346, + "Ġing": 5347, + "Ġcommunities": 5348, + "ĠFBI": 5349, + "ita": 5350, + "Ġbon": 5351, + "Ġstrateg": 5352, + "Ġinterests": 5353, + "lock": 5354, + "gers": 5355, + "mas": 5356, + "ĠAND": 5357, + "Ġconflict": 5358, + "Ġrequirements": 5359, + "Ġsac": 5360, + "Ġoperating": 5361, + "ini": 5362, + "related": 5363, + "Ġcommitted": 5364, + "Ġrelatively": 5365, + "Ġsouth": 5366, + "¯¯": 5367, + "Ġafford": 5368, + "Ġidentity": 5369, + "Ġdecisions": 5370, + "Ġaccused": 5371, + "place": 5372, + "Ġvictory": 5373, + "och": 5374, + "iat": 5375, + "Name": 5376, + "Com": 5377, + "tion": 5378, + "eds": 5379, + "Ġseek": 5380, + "Ġtight": 5381, + "ĠImages": 5382, + "Ġiniti": 5383, + "Ġhumans": 5384, + "Ġfamiliar": 5385, + "Ġaudience": 5386, + "Ġinternal": 5387, + "venture": 5388, + "Ġsides": 5389, + "ĠTO": 5390, + "Ġdim": 5391, + "Ġconclud": 5392, + "Ġappoint": 5393, + "Ġenforcement": 5394, + "ĠJim": 5395, + "ĠAssociation": 5396, + "Ġcircumst": 5397, + "ĠCanadian": 5398, + "Ġjoined": 5399, + "Ġdifferences": 5400, + "ĠLos": 5401, + "Ġprotest": 5402, + "Ġtwice": 5403, + "win": 5404, + "Ġglass": 5405, + "arsh": 5406, + "ĠArmy": 5407, + "Ġexpression": 5408, + "Ġdecide": 5409, + "Ġplanning": 5410, + "ania": 5411, + "Ġhandle": 5412, + "ĠMicrosoft": 5413, + "ĠNor": 5414, + "Ġmaximum": 5415, + "ĠRev": 5416, + "Ġsea": 5417, + "Ġeval": 5418, + "Ġhelps": 5419, + "ref": 5420, + "Ġbound": 5421, + "Ġmouth": 5422, + "Ġstandards": 5423, + "Ġclim": 5424, + "ĠCamp": 5425, + "ĠFox": 5426, + "cles": 5427, + "Ġarmy": 5428, + "ĠTechn": 5429, + "acking": 5430, + "xy": 5431, + "SS": 5432, + "Ġ42": 5433, + "Ġbug": 5434, + "ĠUkrain": 5435, + "ĠMax": 5436, + "ĠJones": 5437, + "ĠShow": 5438, + "lo": 5439, + "Ġplanet": 5440, + "Ġ75": 5441, + "Ġwinning": 5442, + "Ġfaster": 5443, + "Ġspect": 5444, + "Ġbroken": 5445, + "TR": 5446, + "Ġdefined": 5447, + "Ġhealthy": 5448, + "Ġcompetition": 5449, + "https": 5450, + "ĠIsland": 5451, + "ĠFe": 5452, + "Ġannounce": 5453, + "ĠCup": 5454, + "ĠInstead": 5455, + "Ġclient": 5456, + "Ġpossibly": 5457, + "section": 5458, + "ocket": 5459, + "look": 5460, + "Ġfinish": 5461, + "Ġcrew": 5462, + "Ġreserv": 5463, + "Ġeditor": 5464, + "Ġhate": 5465, + "Ġsale": 5466, + "Ġcontrovers": 5467, + "Ġpages": 5468, + "wing": 5469, + "Ġnumer": 5470, + "Ġopposition": 5471, + "Ġ2004": 5472, + "Ġrefuge": 5473, + "Ġflight": 5474, + "Ġapart": 5475, + "ĠLat": 5476, + "Americ": 5477, + "ĠAfrica": 5478, + "Ġapplications": 5479, + "ĠPalest": 5480, + "ĠBur": 5481, + "Ġgar": 5482, + "ĠSocial": 5483, + "Ġupgr": 5484, + "Ġshape": 5485, + "Ġspeaking": 5486, + "ansion": 5487, + "ao": 5488, + "ĠSn": 5489, + "Ġworry": 5490, + "ĠBritain": 5491, + "Please": 5492, + "roud": 5493, + "Ġhun": 5494, + "Ġintroduced": 5495, + "Ġdiet": 5496, + "Ind": 5497, + "ĠSecond": 5498, + "Ġfunctions": 5499, + "uts": 5500, + "ĠEach": 5501, + "ĠJeff": 5502, + "Ġstress": 5503, + "Ġaccounts": 5504, + "Ġguarant": 5505, + "ĠAnn": 5506, + "edia": 5507, + "Ġhonest": 5508, + "Ġtree": 5509, + "ĠAfrican": 5510, + "ĠBush": 5511, + "},": 5512, + "Ġsch": 5513, + "ĠOnly": 5514, + "Ġfif": 5515, + "igan": 5516, + "Ġexercise": 5517, + "ĠExp": 5518, + "Ġscientists": 5519, + "Ġlegislation": 5520, + "ĠWork": 5521, + "ĠSpr": 5522, + "ÃĤ": 5523, + "ĠHuman": 5524, + "Ġè": 5525, + "Ġsurvey": 5526, + "Ġrich": 5527, + "rip": 5528, + "Ġmaintain": 5529, + "Ġflo": 5530, + "Ġleadership": 5531, + "stream": 5532, + "ĠIslamic": 5533, + "Ġ01": 5534, + "ĠCollege": 5535, + "Ġmagic": 5536, + "ĠPrime": 5537, + "Ġfigures": 5538, + "2017": 5539, + "inder": 5540, + "xual": 5541, + "ĠDead": 5542, + "Ġabsolutely": 5543, + "Ġfourth": 5544, + "Ġpresented": 5545, + "respond": 5546, + "rible": 5547, + "Ġalcohol": 5548, + "ato": 5549, + "ĠDE": 5550, + "porary": 5551, + "Ġgrab": 5552, + "Ġvari": 5553, + "Ġquant": 5554, + "ĠPhoto": 5555, + "Ġplus": 5556, + "rick": 5557, + "arks": 5558, + "Ġalternative": 5559, + "Ġpil": 5560, + "Ġapprox": 5561, + "that": 5562, + "Ġobjects": 5563, + "ĠRo": 5564, + "ĠAndroid": 5565, + "Ġsignificantly": 5566, + "ĠRoad": 5567, + "kay": 5568, + "Read": 5569, + "avor": 5570, + "Ġacknow": 5571, + "ĠHD": 5572, + "ĠSing": 5573, + "Or": 5574, + "ĠMont": 5575, + "Ġuns": 5576, + "prof": 5577, + "Ġnegoti": 5578, + "ĠArch": 5579, + "iki": 5580, + "Ġtelevision": 5581, + "ĠJewish": 5582, + "Ġcommittee": 5583, + "Ġmotor": 5584, + "Ġappearance": 5585, + "Ġsitting": 5586, + "Ġstrike": 5587, + "ĠDown": 5588, + "comp": 5589, + "ĠHist": 5590, + "Ġfold": 5591, + "acement": 5592, + "ĠLouis": 5593, + "Ġbelong": 5594, + "ĠâĢ¢": 5595, + "Ġmort": 5596, + "Ġprepared": 5597, + "Ġ64": 5598, + "ĠMaster": 5599, + "Ġindeed": 5600, + "ĠDen": 5601, + "Ġrent": 5602, + "TA": 5603, + "ourney": 5604, + "arc": 5605, + "Su": 5606, + "97": 5607, + "Ġadvice": 5608, + "Ġchanging": 5609, + "Ġlisted": 5610, + "Ġlaunched": 5611, + "isation": 5612, + "ĠPeter": 5613, + "ishes": 5614, + "Ġlived": 5615, + "ĠMel": 5616, + "ĠSupreme": 5617, + "ĠFederal": 5618, + "Ġ);": 5619, + "ructure": 5620, + "Ġsets": 5621, + "Ġphilos": 5622, + "uous": 5623, + "ĠÂł": 5624, + "Ġapplied": 5625, + "ĠNOT": 5626, + "Ġhousing": 5627, + "ĠMount": 5628, + "Ġodd": 5629, + "Ġsust": 5630, + "DA": 5631, + "fficient": 5632, + "Ġ?": 5633, + "olved": 5634, + "Ġpowers": 5635, + "Ġthr": 5636, + "Ġremaining": 5637, + "ĠWater": 5638, + "LC": 5639, + "Ġcauses": 5640, + "ãģ®": 5641, + "Ġmanner": 5642, + "ads": 5643, + "Ġsuggests": 5644, + "Ġends": 5645, + "standing": 5646, + "fig": 5647, + "ĠDun": 5648, + "idth": 5649, + "Ġgay": 5650, + "Ġtermin": 5651, + "ĠAngeles": 5652, + "MS": 5653, + "Ġscientific": 5654, + "Ġcoal": 5655, + "apers": 5656, + "bar": 5657, + "ĠThomas": 5658, + "Ġsym": 5659, + "ĠRun": 5660, + "this": 5661, + "PC": 5662, + "igrants": 5663, + "Ġminute": 5664, + "ĠDistrict": 5665, + "cellent": 5666, + "Ġleaves": 5667, + "Ġcompleted": 5668, + "amin": 5669, + "Ġfocused": 5670, + "Ġmonitor": 5671, + "Ġvehicles": 5672, + "MA": 5673, + "ĠMass": 5674, + "ĠGrand": 5675, + "Ġaffected": 5676, + "itutional": 5677, + "Ġconstruct": 5678, + "Ġfollows": 5679, + "Ġton": 5680, + "reens": 5681, + "Ġhomes": 5682, + "ĠExt": 5683, + "ĠLevel": 5684, + "rast": 5685, + "ĠIr": 5686, + "Ġelim": 5687, + "Ġlargely": 5688, + "ĠJoe": 5689, + "Ġvotes": 5690, + "alls": 5691, + "Ġbusinesses": 5692, + "ĠFoundation": 5693, + "ĠCentral": 5694, + "Ġyards": 5695, + "Ġmaterials": 5696, + "ulner": 5697, + "Ġguide": 5698, + "Ġcloser": 5699, + "ums": 5700, + "Ġsports": 5701, + "eder": 5702, + "Just": 5703, + "Ġtaxes": 5704, + "84": 5705, + "ĠOld": 5706, + "Ġdecade": 5707, + "ola": 5708, + "Ġvir": 5709, + "Ġdropped": 5710, + "Ġdelay": 5711, + "itect": 5712, + "Ġsecure": 5713, + "stein": 5714, + "level": 5715, + "Ġtreated": 5716, + "Ġfiled": 5717, + "aine": 5718, + "Ġvan": 5719, + "Ġmir": 5720, + "Ġcolumn": 5721, + "icted": 5722, + "eper": 5723, + "Ġrot": 5724, + "Ġconsult": 5725, + "Ġentry": 5726, + "Ġmarijuana": 5727, + "ĠDou": 5728, + "Ġapparently": 5729, + "oking": 5730, + "clusive": 5731, + "Ġincreases": 5732, + "ano": 5733, + "Ġspecifically": 5734, + "Ġtele": 5735, + "ensions": 5736, + "Ġreligion": 5737, + "abilities": 5738, + "Ġframe": 5739, + "ĠNote": 5740, + "ĠLee": 5741, + "Ġhelping": 5742, + "Ġedge": 5743, + "oston": 5744, + "Ġorganizations": 5745, + "Ãĥ": 5746, + "ĠBoth": 5747, + "hips": 5748, + "Ġbigger": 5749, + "Ġboost": 5750, + "ĠStand": 5751, + "Ġrow": 5752, + "uls": 5753, + "abase": 5754, + "Ġrid": 5755, + "Let": 5756, + "aren": 5757, + "rave": 5758, + "Ġstret": 5759, + "PD": 5760, + "Ġvision": 5761, + "Ġwearing": 5762, + "Ġappreci": 5763, + "Ġaward": 5764, + "ĠUse": 5765, + "Ġfactor": 5766, + "war": 5767, + "ulations": 5768, + ")(": 5769, + "Ġgod": 5770, + "Ġterrit": 5771, + "Ġparam": 5772, + "asts": 5773, + "87": 5774, + "Ġenemies": 5775, + "ĠGames": 5776, + "FF": 5777, + "Ġaccident": 5778, + "Well": 5779, + "ĠMartin": 5780, + "TER": 5781, + "Ġath": 5782, + "ĠHell": 5783, + "Ġforg": 5784, + "Ġveter": 5785, + "ĠMedic": 5786, + "free": 5787, + "Ġstars": 5788, + "Ġexpensive": 5789, + "Ġacad": 5790, + "rawn": 5791, + "ĠWhe": 5792, + "Ġlock": 5793, + "Ġformat": 5794, + "Ġsoldiers": 5795, + "sm": 5796, + "Ġagent": 5797, + "Ġresponsibility": 5798, + "ora": 5799, + "ĠScience": 5800, + "Ġrapid": 5801, + "Ġtough": 5802, + "ĠJesus": 5803, + "Ġbelieves": 5804, + "ML": 5805, + "Ġwear": 5806, + "lete": 5807, + "ÃĥÃĤ": 5808, + "ĠDri": 5809, + "Ġcommission": 5810, + "ĠBob": 5811, + "Oh": 5812, + "aped": 5813, + "Ġwarm": 5814, + "ÃĥÃĤÃĥÃĤ": 5815, + "Ġ2003": 5816, + "ortion": 5817, + "Ġhasn": 5818, + "uster": 5819, + "Ġunivers": 5820, + "ĠIll": 5821, + "Ġking": 5822, + "ologies": 5823, + "94": 5824, + "ĠTem": 5825, + "ĠMos": 5826, + "Ġpatient": 5827, + "ĠMexico": 5828, + "cean": 5829, + "ĠDeath": 5830, + "ĠSanders": 5831, + "you": 5832, + "ĠCast": 5833, + "ĠCompany": 5834, + "pty": 5835, + "Ġhappening": 5836, + "FP": 5837, + "ĠBattle": 5838, + "Ġbought": 5839, + "Am": 5840, + "Mod": 5841, + "Us": 5842, + "uters": 5843, + "ĠCre": 5844, + "ĠThose": 5845, + "Ġ44": 5846, + "iser": 5847, + "Ġsoul": 5848, + "ĠTop": 5849, + "ĠHarry": 5850, + "ĠAw": 5851, + "Ġseat": 5852, + "ffee": 5853, + "Ġrevolution": 5854, + "Ġ(\"": 5855, + "ĠDuring": 5856, + "ette": 5857, + "Ġring": 5858, + "Ġoffensive": 5859, + "Ġreturns": 5860, + "Ġvideos": 5861, + "Ġdiscl": 5862, + "Ġfamous": 5863, + "enced": 5864, + "ĠSign": 5865, + "ĠRiver": 5866, + "Ġ300": 5867, + "PM": 5868, + "ĠBus": 5869, + "ĠCH": 5870, + "Ġcandidates": 5871, + "arden": 5872, + "Ġpercentage": 5873, + "Ġvisual": 5874, + "Ġthank": 5875, + "Ġtrouble": 5876, + "nergy": 5877, + "Ġ2001": 5878, + "Ġprove": 5879, + "ashion": 5880, + "Ġenh": 5881, + "ĠLong": 5882, + "UM": 5883, + "Ġconnected": 5884, + "Ġpossibility": 5885, + "Over": 5886, + "Ġexpert": 5887, + "Ġlibrary": 5888, + "arts": 5889, + "ĠDirector": 5890, + "Ġfellow": 5891, + "92": 5892, + "irty": 5893, + "Ġdry": 5894, + "Ġsigns": 5895, + "ĠLove": 5896, + "Ġquiet": 5897, + "foot": 5898, + "Ġpure": 5899, + "ĠHun": 5900, + "Ġfilled": 5901, + "phas": 5902, + "ĠElect": 5903, + "endment": 5904, + "ĠExpl": 5905, + "Ġunable": 5906, + "ns": 5907, + "mo": 5908, + "Ġvast": 5909, + "obe": 5910, + "Ġidentify": 5911, + "apping": 5912, + "ĠCarolina": 5913, + "gress": 5914, + "Ġprote": 5915, + "Ġfish": 5916, + "Ġcircumstances": 5917, + "razy": 5918, + "ĠPhot": 5919, + "Ġbodies": 5920, + "ĠMur": 5921, + "Ġdeveloping": 5922, + "ĠAR": 5923, + "Ġexperienced": 5924, + "Ġsubstant": 5925, + "ĠBoard": 5926, + "esome": 5927, + "Ġdomestic": 5928, + "Ġcombined": 5929, + "ĠPut": 5930, + "Ġchemical": 5931, + "ĠChild": 5932, + "Ġpool": 5933, + "ĠCy": 5934, + "Ġegg": 5935, + "cons": 5936, + "sters": 5937, + "Ġhurt": 5938, + "Ġmarkets": 5939, + "Ġconservative": 5940, + "Ġsupporters": 5941, + "Ġagencies": 5942, + "idel": 5943, + "Ob": 5944, + "urb": 5945, + "Ġ43": 5946, + "ĠDefense": 5947, + "ye": 5948, + "ĠAp": 5949, + "dule": 5950, + "Ġtemperature": 5951, + "Ġconducted": 5952, + "ĠChief": 5953, + "Ġpulled": 5954, + "Ġfol": 5955, + "Last": 5956, + "onto": 5957, + "osis": 5958, + "VER": 5959, + "Des": 5960, + "ĠPan": 5961, + "First": 5962, + "Ġadvance": 5963, + "Ġlicense": 5964, + "rors": 5965, + "ĠJon": 5966, + "Ġimagine": 5967, + "Ġhell": 5968, + "Ġfixed": 5969, + "Ġincor": 5970, + "osite": 5971, + "ĠLog": 5972, + "icken": 5973, + "]:": 5974, + "Ġsurprise": 5975, + "hab": 5976, + "Ġcraft": 5977, + "olt": 5978, + "ĠJul": 5979, + "Ġdial": 5980, + "Ġrelevant": 5981, + "Ġentered": 5982, + "Ġleads": 5983, + "ĠAD": 5984, + "ĠClean": 5985, + "Ġpictures": 5986, + "essor": 5987, + "Ġalt": 5988, + "Ġpaying": 5989, + "Per": 5990, + "ĠMarket": 5991, + "Ġupdates": 5992, + "amily": 5993, + "ĠType": 5994, + "ĠHome": 5995, + "Ġ55": 5996, + "sembly": 5997, + "rome": 5998, + "83": 5999, + "Ġgreatest": 6000, + "Ġheight": 6001, + "Ġheav": 6002, + "aints": 6003, + "Ġlisten": 6004, + "aser": 6005, + "ĠSH": 6006, + "Ġcapable": 6007, + "acle": 6008, + "Ġperspect": 6009, + "inating": 6010, + "Ġoffering": 6011, + "rypt": 6012, + "ĠDevelop": 6013, + "abin": 6014, + "rc": 6015, + "Ġbright": 6016, + "alty": 6017, + "arrow": 6018, + "Ġsuppl": 6019, + "inding": 6020, + "acked": 6021, + "gypt": 6022, + "ĠAnother": 6023, + "pg": 6024, + "ĠVirginia": 6025, + "ĠLu": 6026, + "Ġplanned": 6027, + "Ġpit": 6028, + "Ġsweet": 6029, + "Type": 6030, + "ĠDi": 6031, + "Ġtypically": 6032, + "ĠFrancisco": 6033, + "Ġprospect": 6034, + "ĠDan": 6035, + "Ġteen": 6036, + "rees": 6037, + "Ġsched": 6038, + "Ġhol": 6039, + "Ġscr": 6040, + "Ġlots": 6041, + "life": 6042, + "Ġnewsp": 6043, + "Ġforget": 6044, + "ĠNone": 6045, + "ĠMiddle": 6046, + "ĠRyan": 6047, + "edd": 6048, + "Ġsevere": 6049, + "Ġsuit": 6050, + "ller": 6051, + "93": 6052, + "Ġcorrespond": 6053, + "Ġexplos": 6054, + "uations": 6055, + "Ġflag": 6056, + "game": 6057, + "rid": 6058, + "Ġprin": 6059, + "ĠData": 6060, + "Ġdeploy": 6061, + "ĠEnter": 6062, + "suit": 6063, + "ghan": 6064, + "ĠMen": 6065, + "Ġthoughts": 6066, + "Ġmatters": 6067, + "Ġadapt": 6068, + "ĠAri": 6069, + "Ġfill": 6070, + "Ġforth": 6071, + "Ġsam": 6072, + "Ġ41": 6073, + "Ġpayment": 6074, + "ĠHor": 6075, + "Ġspring": 6076, + "duc": 6077, + "Ġlosing": 6078, + "Ġbringing": 6079, + "FO": 6080, + "ala": 6081, + "Ġdistribution": 6082, + "hered": 6083, + "bour": 6084, + "ĠIsraeli": 6085, + "oma": 6086, + "Ġcombination": 6087, + "Ġplenty": 6088, + "VE": 6089, + "Can": 6090, + "ĠHaw": 6091, + "Ġperman": 6092, + "ĠSpecial": 6093, + "Ġtow": 6094, + "Ġseeking": 6095, + "Ġexamples": 6096, + "Ġclasses": 6097, + "cr": 6098, + "Ġbeer": 6099, + "Ġmoves": 6100, + "ĠIP": 6101, + "ĠKn": 6102, + "Ġpanel": 6103, + "Even": 6104, + "Ġproperly": 6105, + "Ġris": 6106, + "Ġplug": 6107, + "Ġestimated": 6108, + "Every": 6109, + "Ġdefensive": 6110, + "agraph": 6111, + "Ġpregn": 6112, + "Ġinstit": 6113, + "ĠVict": 6114, + "Ġvolume": 6115, + "Ġpositions": 6116, + "Ġlinks": 6117, + "ĠProgram": 6118, + "ĠWeek": 6119, + "agues": 6120, + "Ġtransform": 6121, + "ker": 6122, + "ĠCEO": 6123, + "Ġcas": 6124, + "Ġopponent": 6125, + "Ġtweet": 6126, + "ĠCode": 6127, + "Ġshop": 6128, + "Ġfly": 6129, + "Ġtalks": 6130, + "Ġbag": 6131, + "Phone": 6132, + "Ġaid": 6133, + "Ġplants": 6134, + "Ġ65": 6135, + "Ġattorney": 6136, + "arters": 6137, + "quest": 6138, + "ĠMagic": 6139, + "Ġbegins": 6140, + "Ġmyster": 6141, + "Ġenvironmental": 6142, + "Ġstorage": 6143, + "NN": 6144, + "Ġmarg": 6145, + "Ġske": 6146, + "Ġmetal": 6147, + "elly": 6148, + "Ġordered": 6149, + "Ġremained": 6150, + "Ġloved": 6151, + "Ġprompt": 6152, + "Ġupdated": 6153, + "Ġexperts": 6154, + "Ġwalking": 6155, + "Ġancient": 6156, + "Ġperformed": 6157, + "ATE": 6158, + "Ġneither": 6159, + "iency": 6160, + "Ġmanufacture": 6161, + "ĠPak": 6162, + "Ġselected": 6163, + "Ġmine": 6164, + "Ġultimately": 6165, + "Ġexplan": 6166, + "Ġlabel": 6167, + "ĠServices": 6168, + "ributed": 6169, + "Trump": 6170, + "Ġsyn": 6171, + "ĠUlt": 6172, + "SC": 6173, + "Ġmeat": 6174, + "Ġgiant": 6175, + "ĠWars": 6176, + "ĠON": 6177, + "Ġadm": 6178, + "Ġinterpret": 6179, + "Ġevening": 6180, + "Ġevil": 6181, + "ĠBoston": 6182, + "ĠWild": 6183, + "ĠÃ": 6184, + "ĠBitcoin": 6185, + "ĠAmazon": 6186, + "Dr": 6187, + "ĠInformation": 6188, + "Ġobviously": 6189, + "Ġadvanced": 6190, + "Photo": 6191, + "olar": 6192, + "Ġweather": 6193, + "Ġsymbol": 6194, + "Ġsole": 6195, + "Ġpotentially": 6196, + "oster": 6197, + "Ġoriginally": 6198, + "mun": 6199, + "300": 6200, + "aze": 6201, + "essions": 6202, + "Ġdeck": 6203, + "Ġstood": 6204, + "Ġyouth": 6205, + "ĠBern": 6206, + "Rep": 6207, + "ĠTest": 6208, + "Ġbasically": 6209, + "otic": 6210, + "Ġinvolve": 6211, + "olit": 6212, + "lyn": 6213, + "See": 6214, + "Ġaircraft": 6215, + "Ġconfirm": 6216, + "EW": 6217, + "Ġmessages": 6218, + "ĠRichard": 6219, + "Ġkit": 6220, + "Ġprohib": 6221, + "Ġvulner": 6222, + "isters": 6223, + "Ġexistence": 6224, + "Ġturning": 6225, + "ĠSP": 6226, + "Ġdesire": 6227, + "Ġflat": 6228, + "Ġment": 6229, + "season": 6230, + "anges": 6231, + "Ġneighborhood": 6232, + "ĠLake": 6233, + "ATION": 6234, + "Ġpointed": 6235, + "bur": 6236, + "Ġinnov": 6237, + "ucks": 6238, + "UL": 6239, + "Ġprofessor": 6240, + "Ġexpressed": 6241, + "AB": 6242, + "icious": 6243, + "Ġ2002": 6244, + "ĠDev": 6245, + "Ġsession": 6246, + "Ġbare": 6247, + "sen": 6248, + "Ġdiss": 6249, + "ĠCath": 6250, + "ĠPass": 6251, + "ĠPoint": 6252, + "Ġdoctor": 6253, + "orrow": 6254, + "ailed": 6255, + "ĠRub": 6256, + "ĠDC": 6257, + "ĠCharl": 6258, + "person": 6259, + "Ġwriter": 6260, + "ighters": 6261, + "ureau": 6262, + "Ġoblig": 6263, + "Ġrecorded": 6264, + "Ġbroke": 6265, + "Ġorders": 6266, + "ilty": 6267, + "Ġmotion": 6268, + "inity": 6269, + "law": 6270, + "adium": 6271, + "Ġimmigration": 6272, + "Ġcontrast": 6273, + "Ġbatt": 6274, + "Ġexcellent": 6275, + "Ġtechnical": 6276, + "ami": 6277, + "Ġtun": 6278, + "Ġcloud": 6279, + "ĠYear": 6280, + "geon": 6281, + "Ġcreation": 6282, + "Ġstrange": 6283, + "Ġauth": 6284, + "Ġfort": 6285, + "born": 6286, + "Ġextent": 6287, + "ĠToday": 6288, + "ĠClub": 6289, + "Ġrain": 6290, + "Ġsample": 6291, + "Ġaccepted": 6292, + "Ġtact": 6293, + "Ġfired": 6294, + "ĠSon": 6295, + "Ġstands": 6296, + "Ġboot": 6297, + "Ġ47": 6298, + "Ġstatements": 6299, + "Ġversions": 6300, + "Ġselling": 6301, + "ounded": 6302, + "Ġ1990": 6303, + "Ġweren": 6304, + "ĠWatch": 6305, + "Ġexperiment": 6306, + "Post": 6307, + "Ġretail": 6308, + "uled": 6309, + "Inst": 6310, + "unte": 6311, + "ãĥ¼": 6312, + "Ġdepart": 6313, + "Ġbond": 6314, + "ivery": 6315, + "ompl": 6316, + "Ġreaction": 6317, + "ĠSyrian": 6318, + "ĠPac": 6319, + "apped": 6320, + "aniel": 6321, + "DP": 6322, + "Ġresolution": 6323, + "Ġreact": 6324, + "Ġapproved": 6325, + "onom": 6326, + "mond": 6327, + "ĠOffic": 6328, + "---": 6329, + "Ġreplace": 6330, + "Ġtack": 6331, + "Ġsport": 6332, + "Ġchain": 6333, + "Ġemergency": 6334, + "rad": 6335, + "ĠPalestin": 6336, + "Ġ46": 6337, + "Ġautomatically": 6338, + "Ġroute": 6339, + "Ġpal": 6340, + "Ġbanks": 6341, + "ĠParis": 6342, + "ĠMedia": 6343, + "road": 6344, + "icing": 6345, + "ixt": 6346, + "isted": 6347, + "Ġgrew": 6348, + "Ġcoord": 6349, + "ĠWhere": 6350, + "omin": 6351, + "Ġsubs": 6352, + "��": 6353, + "Ġ±": 6354, + "Ġcorporate": 6355, + "Ġselection": 6356, + "noon": 6357, + "ĠReport": 6358, + "cs": 6359, + "cluding": 6360, + "orders": 6361, + "anche": 6362, + "ĠIts": 6363, + "Ġslowly": 6364, + "ĠEgypt": 6365, + "ĠAcc": 6366, + "Ġcolle": 6367, + "iques": 6368, + "EX": 6369, + "Ġattempts": 6370, + "url": 6371, + "ĠCross": 6372, + "Ġfindings": 6373, + "ĠSC": 6374, + "ĠOR": 6375, + "Ġindex": 6376, + "ensity": 6377, + "ĠWay": 6378, + "ĠLand": 6379, + "Ġshock": 6380, + "dis": 6381, + "Ġdynam": 6382, + "Ġcart": 6383, + "mosp": 6384, + "Since": 6385, + "iest": 6386, + "ĠBoy": 6387, + "Ġstorm": 6388, + "ĠContin": 6389, + "2013": 6390, + "hew": 6391, + "ilit": 6392, + "Ġessential": 6393, + "iquid": 6394, + "Other": 6395, + "ivered": 6396, + "Ġreasonable": 6397, + "Act": 6398, + "Ġsubsequ": 6399, + "ĠPack": 6400, + "ĠFort": 6401, + "Ġconsidering": 6402, + "Ġuniversity": 6403, + "log": 6404, + "Ġmarried": 6405, + "Ġillust": 6406, + "ĠTrue": 6407, + "£ı": 6408, + "Ġnumerous": 6409, + "rastructure": 6410, + "Ġseriously": 6411, + "Ġreferred": 6412, + "ua": 6413, + "Ġconsistent": 6414, + "onna": 6415, + "ĠReal": 6416, + "ruption": 6417, + "ciples": 6418, + "Ġfacts": 6419, + "91": 6420, + "otes": 6421, + "erg": 6422, + "Then": 6423, + "Ġaccompl": 6424, + "Note": 6425, + "Ġrevenue": 6426, + "Ġpassing": 6427, + "Ġmal": 6428, + "een": 6429, + "ĠYet": 6430, + "Ġgather": 6431, + "terday": 6432, + "ework": 6433, + "ĠAuthor": 6434, + "Pe": 6435, + "Ġoptim": 6436, + "Ġrub": 6437, + "Ġè£ı": 6438, + "Ġunknown": 6439, + "stone": 6440, + "Ġunion": 6441, + "olve": 6442, + "Ġopportunities": 6443, + "Ġbrowser": 6444, + "ĠWal": 6445, + "ĠCost": 6446, + "Ġreporting": 6447, + "sts": 6448, + "pet": 6449, + "Ġsand": 6450, + "Ġsuddenly": 6451, + "Ġsurprising": 6452, + "ĠVR": 6453, + "Ġsomewhat": 6454, + "ĠBas": 6455, + "ulture": 6456, + "izz": 6457, + "ĠCD": 6458, + "Ġchallenges": 6459, + "Ġsettings": 6460, + "Ġexperiences": 6461, + "ĠFull": 6462, + "Ġcann": 6463, + "Ġreceiving": 6464, + "EST": 6465, + "Ġjoint": 6466, + "Ġcultural": 6467, + "Ġast": 6468, + "82": 6469, + "astern": 6470, + "ceived": 6471, + "ĠCru": 6472, + "Ġbull": 6473, + "pired": 6474, + "amm": 6475, + "Ġfacing": 6476, + "power": 6477, + "Ġboss": 6478, + "ĠHol": 6479, + "Ġinstr": 6480, + "Ġincreasingly": 6481, + "Ġshift": 6482, + "Ġstreets": 6483, + "ĠWilliams": 6484, + "abb": 6485, + "Ġlie": 6486, + "Ġlaugh": 6487, + "ĠCa": 6488, + "PL": 6489, + "Ġadults": 6490, + "Ġcustomer": 6491, + "Ġobtained": 6492, + "Ġsupporting": 6493, + "html": 6494, + "fire": 6495, + "Ġdetailed": 6496, + "Ġpicked": 6497, + "ĠRight": 6498, + "lder": 6499, + "EE": 6500, + "stood": 6501, + "ĠKim": 6502, + "Ġwire": 6503, + "Ġsight": 6504, + "Ġdevelopers": 6505, + "Ġpersons": 6506, + "Ġsad": 6507, + "Ġcup": 6508, + "Ġwarning": 6509, + "Ġboys": 6510, + "long": 6511, + "Ġbird": 6512, + "fo": 6513, + "Ġwal": 6514, + "Ġobserved": 6515, + "Ġzone": 6516, + "iveness": 6517, + "Ġchannel": 6518, + "cript": 6519, + "Ġrefused": 6520, + "ĠAgain": 6521, + "Ġsuc": 6522, + "Ġspokesman": 6523, + "ĠRef": 6524, + "rite": 6525, + "ouston": 6526, + "ãĥ³": 6527, + "ĠSher": 6528, + "Ġacts": 6529, + "ĠName": 6530, + "Ġstruggle": 6531, + "arry": 6532, + "ometimes": 6533, + "Ġdiscrim": 6534, + "HT": 6535, + "Ġcategory": 6536, + "Ġrealize": 6537, + "Ġemployee": 6538, + "ĠAfghan": 6539, + "enger": 6540, + "Ġguns": 6541, + "ĠSteve": 6542, + "ĠMot": 6543, + "ĠOl": 6544, + "oked": 6545, + "Ġthick": 6546, + "Ġfairly": 6547, + "illy": 6548, + "Ġsurve": 6549, + "ĠMat": 6550, + "weight": 6551, + "âĶ": 6552, + "Ġtroops": 6553, + "Ġagents": 6554, + "Ġbattery": 6555, + "Ġmotiv": 6556, + "á": 6557, + "Sec": 6558, + "den": 6559, + "overy": 6560, + "LS": 6561, + "Ġflu": 6562, + "Ġconfident": 6563, + "ĠOper": 6564, + "Ġempty": 6565, + "Ġphen": 6566, + "Ġsector": 6567, + "Ġexcited": 6568, + "Ġremote": 6569, + "aph": 6570, + "oen": 6571, + "Ġdestroyed": 6572, + "Ġmoral": 6573, + "ĠHP": 6574, + "ĠRon": 6575, + "Ġdress": 6576, + "ĠBat": 6577, + "Ġlit": 6578, + "ĠMS": 6579, + "Ġaf": 6580, + "HL": 6581, + "rum": 6582, + "isms": 6583, + "Ġshouldn": 6584, + "Ġsympt": 6585, + "ĠToronto": 6586, + "hetic": 6587, + "Ġcarbon": 6588, + "Ġinstalled": 6589, + "Ġviolent": 6590, + "Ġsolar": 6591, + "ja": 6592, + "Ġpractices": 6593, + "Ġride": 6594, + "ĠPenn": 6595, + "Ġimproved": 6596, + "Ġaudio": 6597, + "Ġbehavi": 6598, + "ĠPS": 6599, + "Ġeating": 6600, + "Data": 6601, + "ĠReview": 6602, + "pass": 6603, + "claim": 6604, + "uated": 6605, + "angers": 6606, + "chen": 6607, + "Ġproperties": 6608, + "Ġanywhere": 6609, + "Another": 6610, + "Ġblow": 6611, + "ĠJackson": 6612, + "Ġproud": 6613, + "Ġplane": 6614, + "lines": 6615, + "Ġsquare": 6616, + "Ġproof": 6617, + "ansas": 6618, + "Ġtalked": 6619, + "makers": 6620, + "Ġsister": 6621, + "Ġholds": 6622, + "Ġresident": 6623, + "Ġ==": 6624, + "Ġresistance": 6625, + "Ġsplit": 6626, + "Ġprosecut": 6627, + "Ġconfidence": 6628, + "resents": 6629, + "Ġcuts": 6630, + "Ġexception": 6631, + "Ġzero": 6632, + "Getty": 6633, + "Ġcopyright": 6634, + "Ġtotally": 6635, + "ormal": 6636, + "ifications": 6637, + "ĠAustralian": 6638, + "Ġsick": 6639, + "Ġ150": 6640, + "Ġhousehold": 6641, + "Ġfees": 6642, + "Ġdrivers": 6643, + "ogen": 6644, + "ĠNY": 6645, + "Ġnecessarily": 6646, + "Ġregulations": 6647, + "earing": 6648, + "sl": 6649, + "Ġperspective": 6650, + "care": 6651, + "icial": 6652, + "His": 6653, + "Ġescape": 6654, + "Ġsurprised": 6655, + "ĠVan": 6656, + "urrent": 6657, + "Ġvac": 6658, + "81": 6659, + "ĠThus": 6660, + "Ġemphas": 6661, + "ĠChampions": 6662, + "ĠIce": 6663, + "Ġnarr": 6664, + "Ġheads": 6665, + "Ġcausing": 6666, + "bel": 6667, + "fortunately": 6668, + "ĠMa": 6669, + "Ġtargets": 6670, + "cipl": 6671, + "Ġafternoon": 6672, + "Ġadds": 6673, + "ĠMaybe": 6674, + "ĠFour": 6675, + "essed": 6676, + "plete": 6677, + "Ġusual": 6678, + "cho": 6679, + "ingu": 6680, + "Ġwithd": 6681, + "ĠEnergy": 6682, + "ĠEconom": 6683, + "OO": 6684, + "Ġarticles": 6685, + "Ġinjured": 6686, + "Ġmanage": 6687, + "Ġexplains": 6688, + "Ġdiagn": 6689, + "Rec": 6690, + "atures": 6691, + "Ġlinked": 6692, + "Ġdiscussed": 6693, + "Ġexplo": 6694, + "Ġoccasion": 6695, + "athan": 6696, + "Ġopposite": 6697, + "Ġfaces": 6698, + "Ġdenied": 6699, + "ĠKnight": 6700, + "Ġnut": 6701, + "Ġapproximately": 6702, + "Ġdisappoint": 6703, + "onymous": 6704, + "ĠBest": 6705, + "ĠLo": 6706, + "ĠHy": 6707, + "ĠAff": 6708, + "Ġvoting": 6709, + "anwhile": 6710, + "ĠIII": 6711, + "Ġinstitutions": 6712, + "agram": 6713, + "ĠDaily": 6714, + "Ġdrag": 6715, + "Ġnearby": 6716, + "Ġguilty": 6717, + "Ġconver": 6718, + "Pre": 6719, + "ship": 6720, + "Ġreward": 6721, + "Ġphilosoph": 6722, + "ĠSS": 6723, + "ugh": 6724, + "Ġapps": 6725, + "friend": 6726, + "Ġupper": 6727, + "Ġadvert": 6728, + "Ġsnow": 6729, + "Ġfrust": 6730, + "Ġourselves": 6731, + "Fr": 6732, + "ĠDie": 6733, + "ampion": 6734, + "Ġdismiss": 6735, + "Ġcere": 6736, + "Ġsignal": 6737, + "from": 6738, + "Ġ).": 6739, + "Ġ52": 6740, + "Ġcrimes": 6741, + "itors": 6742, + "estival": 6743, + "useum": 6744, + "Ġcouncil": 6745, + "ĠSaud": 6746, + "May": 6747, + "ĠGun": 6748, + "ician": 6749, + "ether": 6750, + "Ġsufficient": 6751, + "ĠHen": 6752, + "sole": 6753, + "Ġhistorical": 6754, + "ĠFar": 6755, + "ĠTurn": 6756, + "Ġpin": 6757, + "Ġsucceed": 6758, + "mat": 6759, + "lymp": 6760, + "Ġtradition": 6761, + "ĠOk": 6762, + "Ġcro": 6763, + "Ġdescription": 6764, + "alle": 6765, + "Ġsky": 6766, + "Te": 6767, + "Ġwidely": 6768, + "Ġwave": 6769, + "Ġdefinition": 6770, + "ĠJews": 6771, + "Ġcycle": 6772, + "Ġrefere": 6773, + "Ġbrings": 6774, + "usal": 6775, + "Ġalive": 6776, + "Ġfrequently": 6777, + "Ġintention": 6778, + "ĠControl": 6779, + "lv": 6780, + "ystem": 6781, + "Ġprivacy": 6782, + "gent": 6783, + "rence": 6784, + "ĠQuest": 6785, + "ĠChristmas": 6786, + "Ġrail": 6787, + "Ġcooper": 6788, + "Ġtested": 6789, + "ĠCapt": 6790, + "asks": 6791, + "Ġcomfortable": 6792, + "Ġdelivered": 6793, + "scape": 6794, + "Ġdepth": 6795, + "ĠGOP": 6796, + "Ġwrites": 6797, + "Ġassets": 6798, + "Ġsav": 6799, + "iments": 6800, + "Ġtransition": 6801, + "Ġartist": 6802, + "ĠLook": 6803, + "Ġlob": 6804, + "Ġcomponents": 6805, + "arity": 6806, + "Ġwalked": 6807, + "Ġroot": 6808, + "Ġparticipants": 6809, + "Ġnoticed": 6810, + "Ġresc": 6811, + "Ġnav": 6812, + "ĠAdminist": 6813, + "da": 6814, + "utral": 6815, + "plate": 6816, + "Ġimportance": 6817, + "Ġassert": 6818, + "iously": 6819, + "cription": 6820, + "Ġinjuries": 6821, + "ĠCheck": 6822, + "Ġregistered": 6823, + "Ġintent": 6824, + "Ġmissed": 6825, + "ographic": 6826, + "Ġsentence": 6827, + "ounter": 6828, + "Ġassistance": 6829, + "evin": 6830, + "Ġdatabase": 6831, + "Ġbuildings": 6832, + "Ġclassic": 6833, + "Ġthinks": 6834, + "ĠOhio": 6835, + "Pr": 6836, + "ugg": 6837, + "Ġfee": 6838, + "pan": 6839, + "Ġeffectively": 6840, + "Ġfacility": 6841, + "Ġbear": 6842, + "Ġchapter": 6843, + "Ġdogs": 6844, + "ĠColumb": 6845, + "Ġlatter": 6846, + "itial": 6847, + "Ġadmitted": 6848, + "TV": 6849, + "ĠGeorg": 6850, + "Ġposts": 6851, + "\\\\": 6852, + "Ġlawyer": 6853, + "Ġequival": 6854, + "Ġmand": 6855, + "Ġcontrolled": 6856, + "ĠWalk": 6857, + "ĠAndrew": 6858, + "Ġmenu": 6859, + "amental": 6860, + "Ġprotected": 6861, + "va": 6862, + "Ġadministr": 6863, + "oral": 6864, + "Ġrein": 6865, + "ĠSar": 6866, + "Ġamounts": 6867, + "Ġnative": 6868, + "ĠMoon": 6869, + "Ġrepresents": 6870, + "Ġabandon": 6871, + "Ġcarrying": 6872, + "Ġtank": 6873, + "mary": 6874, + "Ġdeclared": 6875, + "Tube": 6876, + "Ġhat": 6877, + "Ġpunish": 6878, + "ellect": 6879, + "mes": 6880, + "Ġuniverse": 6881, + "ĠRod": 6882, + "phy": 6883, + "Ġinfrastructure": 6884, + "Ġ51": 6885, + "Ġopposed": 6886, + "ownt": 6887, + "ca": 6888, + "ĠMake": 6889, + "Ġhardware": 6890, + "Ġcoffee": 6891, + "Rel": 6892, + "bal": 6893, + "world": 6894, + "ĠSaf": 6895, + "ĠSea": 6896, + "inals": 6897, + "Ġowned": 6898, + "Ġhall": 6899, + "ersion": 6900, + "Ġdescribe": 6901, + "ĠPot": 6902, + "Ġportion": 6903, + "Ġatmosp": 6904, + "Ġgovernments": 6905, + "Ġdepending": 6906, + "Ġoffense": 6907, + "Ġtrick": 6908, + "awa": 6909, + "ĠLine": 6910, + "ĠVis": 6911, + "ĠHard": 6912, + "ĠOrig": 6913, + "ĠClick": 6914, + "Ġdesk": 6915, + "ĠValley": 6916, + "ĠSov": 6917, + "Ġmovies": 6918, + "Ġremark": 6919, + "Ġmail": 6920, + "Ġconscious": 6921, + "Ġruling": 6922, + "ĠRights": 6923, + "Ġmedic": 6924, + "hent": 6925, + "ĠWomen": 6926, + "><": 6927, + "Ġreplaced": 6928, + "ĠPrem": 6929, + "ĠThanks": 6930, + "Ġrenew": 6931, + "ĠBall": 6932, + "iform": 6933, + "Ġshots": 6934, + "Comm": 6935, + "Ġarmed": 6936, + "Ġconstant": 6937, + "Ġtaste": 6938, + "Ġrealized": 6939, + "Ġbuff": 6940, + "Ġmo": 6941, + "Ġefficient": 6942, + "Most": 6943, + "oration": 6944, + "ifies": 6945, + "Ġcommunication": 6946, + "Ġflood": 6947, + "Ġconsequences": 6948, + "Ġanyway": 6949, + "igg": 6950, + "ĠGM": 6951, + "ĠThank": 6952, + "Ġiron": 6953, + "Ġevolution": 6954, + "ĠCop": 6955, + "twitter": 6956, + "Ġ95": 6957, + "Ġrelationships": 6958, + "adel": 6959, + "ĠYoung": 6960, + "Ġproposal": 6961, + "ayers": 6962, + "uilding": 6963, + "ĠHot": 6964, + "ORE": 6965, + "cos": 6966, + "Ġcollabor": 6967, + "PG": 6968, + "axy": 6969, + "Ġknowing": 6970, + "Ġsupports": 6971, + "owed": 6972, + "Ġcontrols": 6973, + "Ġmerely": 6974, + "umer": 6975, + "Ġathlet": 6976, + "Ġfashion": 6977, + "path": 6978, + "Ġgift": 6979, + "Ġera": 6980, + "AND": 6981, + "Ġkinds": 6982, + "ĠKorean": 6983, + "Ġlegit": 6984, + "ulous": 6985, + "Ġessentially": 6986, + "Ġtherap": 6987, + "nic": 6988, + "Ġsuffered": 6989, + "Ġhur": 6990, + "Ġpromise": 6991, + "Ġexcess": 6992, + "Ġoverw": 6993, + "Ġprime": 6994, + "ĠHouston": 6995, + "erry": 6996, + "ĠMs": 6997, + "RS": 6998, + "2012": 6999, + "Ġstores": 7000, + "ĠOlymp": 7001, + "Ġjourney": 7002, + "Although": 7003, + "Sub": 7004, + "ĠEduc": 7005, + "ĠChapter": 7006, + "Ġrequests": 7007, + "Ġconsumers": 7008, + "Ġtiny": 7009, + "Ġisol": 7010, + "ĠFair": 7011, + "ba": 7012, + "ĠYOU": 7013, + "Ġcrash": 7014, + "celer": 7015, + "Ġemotional": 7016, + "Ġgoods": 7017, + "Ġelected": 7018, + "Ġmoder": 7019, + "ĠLinux": 7020, + "Ġblocks": 7021, + "Ġisland": 7022, + "ĠSociety": 7023, + "Ġelections": 7024, + "Ġbroadcast": 7025, + "Ġcheap": 7026, + "Ġnations": 7027, + "Ġseasons": 7028, + "400": 7029, + "Ġwaste": 7030, + "ĠSat": 7031, + "Ġfields": 7032, + "employ": 7033, + "Ġprofile": 7034, + "Ġauthors": 7035, + "ALL": 7036, + "ĠGra": 7037, + "west": 7038, + "ĠTy": 7039, + "Ġdeaths": 7040, + "Ġvacc": 7041, + "Ġformed": 7042, + "Ġdu": 7043, + "Ġongoing": 7044, + "ĠMuslims": 7045, + "elf": 7046, + "igure": 7047, + "Ġassume": 7048, + "ĠUkraine": 7049, + "water": 7050, + "Ġcoast": 7051, + "Ġvoted": 7052, + "gor": 7053, + "ĠAS": 7054, + "ĠMichigan": 7055, + "aza": 7056, + "ĠArm": 7057, + "iro": 7058, + "Ġflex": 7059, + "asters": 7060, + "''": 7061, + "Ġwelcome": 7062, + "arl": 7063, + "Ġlocations": 7064, + "igation": 7065, + "ĠFil": 7066, + "Ġbuying": 7067, + "Ġarchitect": 7068, + "Ġharder": 7069, + "ĠCub": 7070, + "Ġinterface": 7071, + "Ġrestaurant": 7072, + "Ġdiscover": 7073, + "Ġexceed": 7074, + "Ġfavour": 7075, + "gery": 7076, + "Ġduty": 7077, + "Ġpitch": 7078, + "ador": 7079, + "ĠMach": 7080, + "boy": 7081, + "Ġresponded": 7082, + "Ġextended": 7083, + "hers": 7084, + "Many": 7085, + "raid": 7086, + "ifer": 7087, + "ĠIns": 7088, + "Ser": 7089, + "Ġmedium": 7090, + "she": 7091, + "ĠSports": 7092, + "Ġmagazine": 7093, + "utation": 7094, + "Ġlimits": 7095, + "ĠGall": 7096, + "Ġexternal": 7097, + "razil": 7098, + "Ġyounger": 7099, + "tle": 7100, + "Ġremind": 7101, + "ĠCON": 7102, + "Ġimmediate": 7103, + "Ġhidden": 7104, + "Ġvolunte": 7105, + "Ġsimpl": 7106, + "odcast": 7107, + "Ġphase": 7108, + "dr": 7109, + "Ġplot": 7110, + "Ġexposure": 7111, + "RI": 7112, + "ograp": 7113, + "vin": 7114, + "anish": 7115, + "ĠAcad": 7116, + "ĠEngine": 7117, + "Ġexpansion": 7118, + "ĠPay": 7119, + "Your": 7120, + "Ġpushed": 7121, + "ĠEll": 7122, + "ĠHead": 7123, + "Ġmarketing": 7124, + "ĠAC": 7125, + "ket": 7126, + "Ġhits": 7127, + "Ġgro": 7128, + "ĠAge": 7129, + "ĠScot": 7130, + "][": 7131, + "Ġstim": 7132, + "ĠiPhone": 7133, + "ĪĴ": 7134, + "Ġnarrow": 7135, + "ĠGetty": 7136, + "ĠTurkey": 7137, + "Ġperfectly": 7138, + "Ġenable": 7139, + "utch": 7140, + "Ġprecise": 7141, + "Ġregime": 7142, + "Ġshif": 7143, + "Ġcompens": 7144, + "gun": 7145, + "div": 7146, + "Ġchosen": 7147, + "ĠKen": 7148, + "Any": 7149, + "Ġtrees": 7150, + "Ġrecommended": 7151, + "ĠRen": 7152, + "uable": 7153, + "ĠHT": 7154, + "Follow": 7155, + "EG": 7156, + "ĠHand": 7157, + "ĠKenn": 7158, + "Ġarguments": 7159, + "Ġexists": 7160, + "Ġbike": 7161, + "ĠConserv": 7162, + "Ġbreaking": 7163, + "ĠGar": 7164, + "Ġcrazy": 7165, + "Ġvirtual": 7166, + "aylor": 7167, + "ixel": 7168, + "Ġ1980": 7169, + "Ġpermission": 7170, + "ĠSeries": 7171, + "Ġconsumer": 7172, + "Ġclosely": 7173, + "called": 7174, + "Ġ54": 7175, + "Ġhopes": 7176, + "Ġarray": 7177, + "ĠWin": 7178, + "ĠLabour": 7179, + "Ġspons": 7180, + "ĠIre": 7181, + "Ġpow": 7182, + "Ġreaders": 7183, + "Ġemployment": 7184, + "Ġcreature": 7185, + "Ġresulting": 7186, + "Ġaccurate": 7187, + "Ġmoments": 7188, + "Ġargued": 7189, + "Ġped": 7190, + "During": 7191, + "Ġ53": 7192, + "ĠTal": 7193, + "Ġsought": 7194, + "Ġsuffering": 7195, + "Ġicon": 7196, + "lee": 7197, + "Ġ($": 7198, + "alian": 7199, + "°": 7200, + "Ġpra": 7201, + "Ġbonus": 7202, + "(\"": 7203, + "ko": 7204, + "Ġacting": 7205, + "DE": 7206, + "fall": 7207, + "Ġcomparison": 7208, + "Ġsmooth": 7209, + "ĠNAS": 7210, + "upp": 7211, + "ĠJoseph": 7212, + "eping": 7213, + "ĠTake": 7214, + "ĠMid": 7215, + "Ġsending": 7216, + "fast": 7217, + "ĠFall": 7218, + "Ġdealing": 7219, + "user": 7220, + "ĠOrgan": 7221, + "Co": 7222, + "Ġattached": 7223, + "Ġsees": 7224, + "%.": 7225, + "Ġtypical": 7226, + "ART": 7227, + "Ġfinds": 7228, + "ĠAsia": 7229, + "umin": 7230, + "ĠCore": 7231, + "ĠEnt": 7232, + "inent": 7233, + "uce": 7234, + "ĠBlood": 7235, + "ĠNever": 7236, + "Ġemails": 7237, + "Ġhighlight": 7238, + "Ġconfront": 7239, + "atus": 7240, + "uted": 7241, + "Ġunus": 7242, + "Ġtopic": 7243, + "ĠAdam": 7244, + "Ġble": 7245, + "ati": 7246, + "Ġunderstood": 7247, + "Set": 7248, + "struct": 7249, + "TP": 7250, + "Ġmob": 7251, + "aa": 7252, + "ĠStart": 7253, + "pected": 7254, + "sell": 7255, + "Ġdedicated": 7256, + "ĠCA": 7257, + "uan": 7258, + "Ġsongs": 7259, + "escription": 7260, + "Ġtech": 7261, + "Ġrape": 7262, + "Ġaside": 7263, + "Ġgrant": 7264, + "Ġ56": 7265, + "sub": 7266, + "Ġargue": 7267, + "Ġcontaining": 7268, + "Ġschedule": 7269, + "Ġliberal": 7270, + "Ġpublicly": 7271, + "Ġheavily": 7272, + "ĠUt": 7273, + "iner": 7274, + "ĠSection": 7275, + "ĠCare": 7276, + "weet": 7277, + "ls": 7278, + "Dis": 7279, + "âĶĢ": 7280, + "ĠFollow": 7281, + "Back": 7282, + "ĠIT": 7283, + "Ġbes": 7284, + "ji": 7285, + "ĠHit": 7286, + "ested": 7287, + "Ġeverybody": 7288, + "ĠSwed": 7289, + "Ġfemin": 7290, + "Ġfacilities": 7291, + "Ġconven": 7292, + "Comp": 7293, + "ĠOS": 7294, + "core": 7295, + "Ġanx": 7296, + "Ġdivision": 7297, + "ĠCam": 7298, + "ĠStan": 7299, + "mates": 7300, + "Ġexplore": 7301, + "plom": 7302, + "Ġshares": 7303, + "pload": 7304, + "anes": 7305, + "Ġideal": 7306, + "eters": 7307, + "ĠBase": 7308, + "Ġplastic": 7309, + "Ġdistinct": 7310, + "ĠNetwork": 7311, + "ĠSeattle": 7312, + "Ġtrading": 7313, + "ensus": 7314, + "intend": 7315, + "Ġexhib": 7316, + "Ġinitially": 7317, + "ĠFood": 7318, + "Ġthousand": 7319, + "ĠBusiness": 7320, + "acter": 7321, + "Ġparagraph": 7322, + "Ġroughly": 7323, + "Ġwww": 7324, + "Ġcreative": 7325, + "ĠConf": 7326, + "Ġconsumption": 7327, + "Ġfilms": 7328, + "agan": 7329, + "Ġobtain": 7330, + "Ġtall": 7331, + "Ġtor": 7332, + "Ġacknowled": 7333, + "Ġgrown": 7334, + "alo": 7335, + "KE": 7336, + "Ġ400": 7337, + "enders": 7338, + "taining": 7339, + "UG": 7340, + "Ġsuicide": 7341, + "Ġwatched": 7342, + "ĠList": 7343, + "ali": 7344, + "rehens": 7345, + "Ġsurrounding": 7346, + "Ġpip": 7347, + "Ġflying": 7348, + "ĠJava": 7349, + "ordan": 7350, + "Ġserving": 7351, + "inations": 7352, + "post": 7353, + "Ġsho": 7354, + "Av": 7355, + "Ġjail": 7356, + "zy": 7357, + "Ġ1999": 7358, + "Ġ>": 9609, + "orous": 9610, + "Ġfirms": 9611, + "screen": 9612, + "una": 9613, + "Ġembarrass": 9614, + "ulse": 9615, + "Ġletting": 9616, + "Ġthrew": 9617, + "iley": 9618, + "Ġchannels": 9619, + "lan": 9620, + "ĠVegas": 9621, + "Ġsear": 9622, + "Ġfantastic": 9623, + "arre": 9624, + "uzzle": 9625, + "ĠDer": 9626, + "Those": 9627, + "Ġswing": 9628, + "Ġsheet": 9629, + "index": 9630, + "cover": 9631, + "ogan": 9632, + "Ġvariables": 9633, + "ĠTech": 9634, + "Ġspoken": 9635, + "achel": 9636, + "ĠDa": 9637, + "ĠMountain": 9638, + "Ġloaded": 9639, + "Ġfootage": 9640, + "version": 9641, + "Ġunl": 9642, + "ĠPhoenix": 9643, + "Ġthrowing": 9644, + "Ġfiring": 9645, + "Ġtracking": 9646, + "Ġwidth": 9647, + "Ġstruggling": 9648, + "rooms": 9649, + "otion": 9650, + "Ġmonthly": 9651, + "ĠServer": 9652, + "Ġeggs": 9653, + "open": 9654, + "MC": 9655, + "Ġ1993": 9656, + "Ġhired": 9657, + "Ġstayed": 9658, + "ĠAllen": 9659, + "Ġstro": 9660, + "Ġ98": 9661, + "step": 9662, + "ĠTurkish": 9663, + "Ġfabric": 9664, + "isting": 9665, + "ĠDom": 9666, + "Ġdates": 9667, + "Ġpron": 9668, + "Ġbasketball": 9669, + "Ġlucky": 9670, + "ĠArabia": 9671, + "Ġassumed": 9672, + "esty": 9673, + "Ġaffairs": 9674, + "Ġglad": 9675, + "ĠIndeed": 9676, + "ĠFA": 9677, + "ĠWord": 9678, + "Ġjoining": 9679, + "ifice": 9680, + "pread": 9681, + "irts": 9682, + "ĠSelect": 9683, + "Ġpopulations": 9684, + "aware": 9685, + "Ġnose": 9686, + "Ġcomplaints": 9687, + "start": 9688, + "Ġscoring": 9689, + "Thanks": 9690, + "Ġmining": 9691, + "Ġvisitors": 9692, + "SH": 9693, + "Ġdamaged": 9694, + "Ġcharacteristics": 9695, + "ĠPent": 9696, + "DC": 9697, + "Ġ83": 9698, + "ĠSix": 9699, + "rates": 9700, + "Ġflags": 9701, + "ĠBrew": 9702, + "dog": 9703, + "Mark": 9704, + "////": 9705, + "Ġexecution": 9706, + "Ġjoke": 9707, + "phones": 9708, + "Ġtestimony": 9709, + "Ġobst": 9710, + "QL": 9711, + "ĠCut": 9712, + "Ġstudied": 9713, + "ĠNintendo": 9714, + "icket": 9715, + "ĠNBC": 9716, + "Ġlad": 9717, + "ĠBra": 9718, + "ĠMoh": 9719, + "Ġkernel": 9720, + "Ġoverwhelming": 9721, + "Ġaged": 9722, + "Ġapplicable": 9723, + "ĠCond": 9724, + "Ġroads": 9725, + "ĠBlock": 9726, + "made": 9727, + "odge": 9728, + "Ġcommands": 9729, + "Ġoffices": 9730, + "veland": 9731, + "Ġtut": 9732, + "Ġreceiver": 9733, + "ĠFro": 9734, + "Ġshopping": 9735, + "ĠiP": 9736, + "ĠStre": 9737, + "ĠABC": 9738, + "Ġentertainment": 9739, + "ĠBow": 9740, + "orted": 9741, + "Mc": 9742, + "Ġreads": 9743, + "grad": 9744, + "ĠCollect": 9745, + "ĠâĪĴ": 9746, + "ĠCapital": 9747, + "ederation": 9748, + "Ġemployer": 9749, + "Ġinvolvement": 9750, + "Ġanxiety": 9751, + "alia": 9752, + "Ġroof": 9753, + "ĠAmong": 9754, + "ĠDemocrat": 9755, + "Ġstats": 9756, + "ĠVill": 9757, + "Ġconstitutional": 9758, + "Ġreferring": 9759, + "itty": 9760, + "Ġtackle": 9761, + "outube": 9762, + "Ġbacked": 9763, + "ĠHong": 9764, + "ĠBroad": 9765, + "Ġele": 9766, + "ĠOtt": 9767, + "Ġ1992": 9768, + "hour": 9769, + "achusetts": 9770, + "Cal": 9771, + "Ġdefeated": 9772, + "Ġ81": 9773, + "esp": 9774, + "Ġseemingly": 9775, + "was": 9776, + "ĠJenn": 9777, + "ĠKurd": 9778, + "Ġgene": 9779, + "Ġdiscount": 9780, + "Ret": 9781, + "ECT": 9782, + "();": 9783, + "Ġclubs": 9784, + "Ġsid": 9785, + "ĠMarsh": 9786, + "Check": 9787, + "Ġpp": 9788, + "ĠEag": 9789, + "idespread": 9790, + "Ġbeings": 9791, + "FT": 9792, + "Ġintroduction": 9793, + "ĠChange": 9794, + "ARD": 9795, + "Ġ110": 9796, + "adows": 9797, + "ierce": 9798, + "Ġmeal": 9799, + "author": 9800, + "ĠBang": 9801, + "lahoma": 9802, + "Ġranks": 9803, + "2011": 9804, + "????": 9805, + "max": 9806, + "Ġcollapse": 9807, + "Ġopens": 9808, + "Ġecho": 9809, + "Ġsoph": 9810, + "Ġracist": 9811, + "Ġenormous": 9812, + "Ġwaves": 9813, + "Ġtap": 9814, + "Ġcomprehensive": 9815, + ".--": 9816, + "ĠRoy": 9817, + "Ġfarmers": 9818, + "Related": 9819, + "aired": 9820, + "rones": 9821, + "ĠCrim": 9822, + "Ġproportion": 9823, + "Ġdesigns": 9824, + "Ġnegotiations": 9825, + "Ġvirtually": 9826, + "ĠBatman": 9827, + "Ġwarn": 9828, + "Ġlegitimate": 9829, + "mate": 9830, + "Ġconvention": 9831, + ",,": 9832, + "netic": 9833, + "ĠSD": 9834, + "Ġconsistently": 9835, + "Ġcompensation": 9836, + "Ġpunishment": 9837, + "Ġye": 9838, + "Ġtie": 9839, + "ĠBureau": 9840, + "irlf": 9841, + "ĠBu": 9842, + "ĠAren": 9843, + "ĠPhilipp": 9844, + "Ġknife": 9845, + "Ġmemories": 9846, + "ĠRoss": 9847, + "Ġangle": 9848, + "Ġ86": 9849, + "ĠThunder": 9850, + "Ġrend": 9851, + "ĠTour": 9852, + "Ġcounts": 9853, + "sung": 9854, + "ĠImp": 9855, + "Ġeducational": 9856, + "Ġaccessible": 9857, + "COM": 9858, + "Ġdrew": 9859, + "yer": 9860, + "Gl": 9861, + "amine": 9862, + "ORT": 9863, + "OB": 9864, + "IB": 9865, + "master": 9866, + "Ġtrials": 9867, + "ogy": 9868, + "har": 9869, + "ĠTrust": 9870, + "Ġpreferred": 9871, + "irlfriend": 9872, + "ĠNev": 9873, + "Ġbin": 9874, + "Ġcow": 9875, + "Page": 9876, + "Ġsignature": 9877, + "ĠBL": 9878, + "700": 9879, + "Ġretired": 9880, + "Ġbytes": 9881, + "Ġneighb": 9882, + "ĠLegend": 9883, + "Ġdevast": 9884, + "Ġsuspected": 9885, + "isons": 9886, + "ĠPokémon": 9887, + "scale": 9888, + "Ġcapabilities": 9889, + "Ġrevel": 9890, + "Ġcheese": 9891, + "dy": 9892, + "igrant": 9893, + "Ġfailing": 9894, + "bits": 9895, + "ĠHeroes": 9896, + "ĠGhost": 9897, + "ĠScient": 9898, + "Ġappointed": 9899, + "uri": 9900, + "Ġinstitution": 9901, + "Ġexpanded": 9902, + "greg": 9903, + "Ġmonitoring": 9904, + "Ġpodcast": 9905, + "Ġcoalition": 9906, + "Ġ96": 9907, + "Jo": 9908, + "Ġstolen": 9909, + "ĠSab": 9910, + "Ġstops": 9911, + "Ġholiday": 9912, + "Ġintr": 9913, + "Car": 9914, + "Black": 9915, + "ĠLGBT": 9916, + "Ġwarming": 9917, + "ĠAnderson": 9918, + "Ġ89": 9919, + "Ġproducer": 9920, + "Med": 9921, + "Ġaccuracy": 9922, + "ĠMarvel": 9923, + "izabeth": 9924, + "ĠPatrick": 9925, + "mony": 9926, + "Ġmini": 9927, + "acles": 9928, + "Ġovert": 9929, + "they": 9930, + "Ġmembership": 9931, + "ĠVen": 9932, + "Ġexch": 9933, + "Ġremoval": 9934, + "ĠDave": 9935, + "TY": 9936, + "mad": 9937, + "ĠFind": 9938, + "Ġadequ": 9939, + "Ġec": 9940, + "Ġteeth": 9941, + "Ġemotion": 9942, + "Ġperm": 9943, + "Ġsolely": 9944, + "db": 9945, + "Ġextraord": 9946, + "IGHT": 9947, + "cal": 9948, + "Ġguidelines": 9949, + "Ġdying": 9950, + "Ġsuspended": 9951, + "ĠPremier": 9952, + "ĠAnthony": 9953, + "elve": 9954, + "Ġdad": 9955, + "ĠEth": 9956, + "ĠFootball": 9957, + "Ġabandoned": 9958, + "Ġ<<": 9959, + "Ġmarch": 9960, + "Ġhorror": 9961, + "âĢ¦\"": 9962, + "Ġchildhood": 9963, + "Ġcampaigns": 9964, + "Ġlunch": 9965, + "ĠAlbert": 9966, + "block": 9967, + "âĸĪâĸĪ": 9968, + "ounding": 9969, + "Ġbone": 9970, + "organ": 9971, + "aders": 9972, + "ĠFlash": 9973, + "ĠDrive": 9974, + "Ġtonight": 9975, + "Ġwars": 9976, + "ĠFL": 9977, + "Ġformation": 9978, + "const": 9979, + "News": 9980, + "Ġcompe": 9981, + "orious": 9982, + "ĠStaff": 9983, + "Ġdiscussions": 9984, + "ĠProtection": 9985, + "ĠJam": 9986, + "Ġcriteria": 9987, + "Ġinstallation": 9988, + "Ġaccomplish": 9989, + "izza": 9990, + "Ġpublisher": 9991, + "Ġrescue": 9992, + "ĠTry": 9993, + "ULL": 9994, + "ĠSom": 9995, + "ĠHop": 9996, + "oret": 9997, + "ths": 9998, + "ordon": 9999, + "Ġpocket": 10000, + "ĠInv": 10001, + "Download": 10002, + "ĠCrime": 10003, + "Ġbene": 10004, + "ĠGuide": 10005, + "ĠAssembly": 10006, + "Ġparameters": 10007, + "IE": 10008, + "ĠAlexander": 10009, + "Ġconcert": 10010, + "ĠSche": 10011, + "Ġshoes": 10012, + "Ġvisiting": 10013, + "Ġrecall": 10014, + "Ġbub": 10015, + "Ġrural": 10016, + "Ġconcrete": 10017, + "ĠRos": 10018, + "Next": 10019, + "Russ": 10020, + "Ġloans": 10021, + "ĠShield": 10022, + "Ġtrem": 10023, + "hemat": 10024, + "kg": 10025, + "ĠHarris": 10026, + "isition": 10027, + "ĠMove": 10028, + "ĠFC": 10029, + "Ġfate": 10030, + "ĠCho": 10031, + "Ġtired": 10032, + "Ġprincipal": 10033, + "hist": 10034, + "iences": 10035, + "athy": 10036, + "Ġsevent": 10037, + "Ġmood": 10038, + "Ġstrategic": 10039, + "Ġdiseases": 10040, + "Ġforum": 10041, + "Ġtempor": 10042, + "Ġheadquarters": 10043, + "Par": 10044, + "ige": 10045, + "flix": 10046, + "Ġguitar": 10047, + "Ġ94": 10048, + "Only": 10049, + "Ġreleases": 10050, + "roph": 10051, + "================================": 10052, + "Ġ600": 10053, + "ĠContinue": 10054, + "igate": 10055, + "ĠCrit": 10056, + "system": 10057, + "Ġdisabled": 10058, + "Ġunexpected": 10059, + "ithub": 10060, + "Ġunclear": 10061, + "ĠEst": 10062, + "Ġcontrad": 10063, + "Ġstrategies": 10064, + "ventures": 10065, + "Ġpassage": 10066, + "AME": 10067, + "Ġimproving": 10068, + "Ġreveals": 10069, + "Ġdecrease": 10070, + "ova": 10071, + "Ġannoy": 10072, + "ĠShort": 10073, + "ĠLibrary": 10074, + "Ġcyber": 10075, + "nell": 10076, + "ĠHur": 10077, + "ĠCB": 10078, + "Ġphotograp": 10079, + "UI": 10080, + "Ġsed": 10081, + "Ge": 10082, + "Ġ87": 10083, + "Ġdiverse": 10084, + "Ġencouraged": 10085, + "Ġconspiracy": 10086, + "Ġbirds": 10087, + "Ġoperator": 10088, + "Ġhandful": 10089, + "Ġclassified": 10090, + "?)": 10091, + "Ġdramatic": 10092, + "Ġinvestigators": 10093, + "ito": 10094, + "Ġwidespread": 10095, + "ĠRoom": 10096, + "----------------------------------------------------------------": 10097, + "Ġcollective": 10098, + "Ġjournalist": 10099, + "String": 10100, + "Ġtemperatures": 10101, + "ila": 10102, + "Ġguid": 10103, + "Ġinspect": 10104, + "Ġmissile": 10105, + "ĠMayor": 10106, + "Ġmanual": 10107, + "Ġsimultane": 10108, + "Ġratings": 10109, + "Ġsuck": 10110, + "Ġ97": 10111, + "Ġuniversal": 10112, + "Ġpharm": 10113, + "Ġdisrupt": 10114, + "iano": 10115, + "AV": 10116, + "Ġft": 10117, + "Ġstatist": 10118, + "olds": 10119, + "ĠWalker": 10120, + "php": 10121, + "Ġundert": 10122, + "ĠLas": 10123, + "ishop": 10124, + "ntil": 10125, + "reshold": 10126, + "ĠWhether": 10127, + "Ms": 10128, + "Ġdeny": 10129, + "ĠCloud": 10130, + "Ġprovider": 10131, + "Ġsurviv": 10132, + "ĠUpdate": 10133, + "has": 10134, + "Ġmistakes": 10135, + "charge": 10136, + "pled": 10137, + "rity": 10138, + "Ġnode": 10139, + "ĠMassachusetts": 10140, + "ools": 10141, + "lication": 10142, + "Ġfails": 10143, + "emale": 10144, + "ori": 10145, + "backs": 10146, + "Ġshirt": 10147, + "Ġ''": 10148, + "ĠNAT": 10149, + "Ġwaters": 10150, + "elson": 10151, + "Ġease": 10152, + "Ġscar": 10153, + "Ġcontents": 10154, + "mind": 10155, + "Ġcontribution": 10156, + "Ġshr": 10157, + "Ġhanded": 10158, + "Ġstability": 10159, + "Ġtrave": 10160, + "Em": 10161, + "Ġmirror": 10162, + "123": 10163, + "Ġweigh": 10164, + "Ġfiction": 10165, + "ouver": 10166, + "istant": 10167, + "rition": 10168, + "ĠFed": 10169, + "Ġphysically": 10170, + "Ġstake": 10171, + "ĠArticle": 10172, + "ĠArc": 10173, + "ĠLewis": 10174, + "ĠMind": 10175, + "Ġdemonstrate": 10176, + "Ġprofits": 10177, + "vision": 10178, + "omic": 10179, + "olid": 10180, + "Ġbattles": 10181, + "Ġdrives": 10182, + "Ġeastern": 10183, + "ĠSony": 10184, + "!!!": 10185, + "aration": 10186, + "vard": 10187, + "ĠGL": 10188, + "portation": 10189, + "Ġ92": 10190, + "Ġlawmakers": 10191, + "Ġprotecting": 10192, + "ĠEPA": 10193, + "Ġyeah": 10194, + "Ġshame": 10195, + "olph": 10196, + "even": 10197, + "xit": 10198, + "Ġattach": 10199, + "Ġrepresenting": 10200, + "Ġobs": 10201, + "ĠUtah": 10202, + "iffs": 10203, + "ĠFreedom": 10204, + "ó": 10205, + "AK": 10206, + "Ġincidents": 10207, + "itage": 10208, + "Ġviewers": 10209, + "cd": 10210, + "Ġmouse": 10211, + "Ġclar": 10212, + "Ġaccordance": 10213, + "Ġbot": 10214, + "cor": 10215, + "ĠSummer": 10216, + "held": 10217, + "Ġinnocent": 10218, + "Ġinitiative": 10219, + "ols": 10220, + "________________________________": 10221, + "Ġspots": 10222, + "pace": 10223, + "Ġconventional": 10224, + "Ġcorporations": 10225, + "Ġblocked": 10226, + "HD": 10227, + "attered": 10228, + "Ġrefers": 10229, + "Ġbuck": 10230, + "ĠDigital": 10231, + "120": 10232, + "Ġtopics": 10233, + "TF": 10234, + "Äģ": 10235, + "brid": 10236, + "reement": 10237, + "Ġunderlying": 10238, + "ĠMember": 10239, + "Ġinvestigating": 10240, + "Ġpregnancy": 10241, + "Ġtouchdown": 10242, + "ĠBand": 10243, + "ĠCaller": 10244, + "Ġinstances": 10245, + "PP": 10246, + "wa": 10247, + "Good": 10248, + "Ġ1991": 10249, + "ĠCold": 10250, + "Ġfears": 10251, + "Ġremarks": 10252, + "ĨĴ": 10253, + "atal": 10254, + "Ġmit": 10255, + "Ġexperiments": 10256, + "ipt": 10257, + "Color": 10258, + "indu": 10259, + "Update": 10260, + "Ġ93": 10261, + "Ag": 10262, + "Ġå": 10263, + "ancouver": 10264, + "Both": 10265, + "Ġjudges": 10266, + "Object": 10267, + "Ġstere": 10268, + "umbn": 10269, + "Ġparticipation": 10270, + "ĠStars": 10271, + "ĠJere": 10272, + "Ġweekly": 10273, + "ĠBan": 10274, + "Ġconversations": 10275, + "ĠPitt": 10276, + "uz": 10277, + "ĠIndiana": 10278, + "ĠKick": 10279, + "Ġinfection": 10280, + "Ġheroes": 10281, + "Ġsettled": 10282, + "Ġstrip": 10283, + "Ġhal": 10284, + "Ġdump": 10285, + "ĠSci": 10286, + "Ġles": 10287, + "Ġreferences": 10288, + "ĠURL": 10289, + "ĠBridge": 10290, + "Ġwanting": 10291, + "Force": 10292, + "Ġexclus": 10293, + "Meanwhile": 10294, + "mn": 10295, + "Ġgentle": 10296, + "maker": 10297, + "senal": 10298, + "ĠGro": 10299, + "ouri": 10300, + "ĠRain": 10301, + "ĠAlliance": 10302, + "Ġlift": 10303, + "ela": 10304, + "SD": 10305, + "ĠCleveland": 10306, + "Ġranked": 10307, + "Ġstadium": 10308, + "Ġdeadly": 10309, + "ä¸": 10310, + "Ġriding": 10311, + "aria": 10312, + "ĠArmor": 10313, + "Ġdocumentation": 10314, + "ĠGreece": 10315, + "reek": 10316, + "Ġlens": 10317, + "ĠSa": 10318, + "Ġgross": 10319, + "ĠEmer": 10320, + "agers": 10321, + "ĠDub": 10322, + "ĠRh": 10323, + "ĠAMD": 10324, + "Ġarrival": 10325, + "Ġdesert": 10326, + "Ġsupplement": 10327, + "ĠResp": 10328, + "Ġknee": 10329, + "Ġmargin": 10330, + "font": 10331, + "ogg": 10332, + "2010": 10333, + "ĠPir": 10334, + "ĠProm": 10335, + "ivals": 10336, + "Ġintake": 10337, + "Ġdifferently": 10338, + "ugs": 10339, + "Ġbits": 10340, + "cluded": 10341, + "Ġsearching": 10342, + "ĠDu": 10343, + "umble": 10344, + "Ġfunctional": 10345, + "ĠBaltimore": 10346, + "ĠCould": 10347, + "Ġdesired": 10348, + "Ġcircuit": 10349, + "ĠLyn": 10350, + "ĠGO": 10351, + "ĠFalse": 10352, + "repre": 10353, + "':": 10354, + "alties": 10355, + "Ġminim": 10356, + "Ġdrove": 10357, + "ĠShould": 10358, + "Ġhip": 10359, + "Ġpros": 10360, + "Ġutility": 10361, + "ĠNature": 10362, + "ĠMode": 10363, + "President": 10364, + "opp": 10365, + "rat": 10366, + "formance": 10367, + "Ġconcentration": 10368, + "Ġfont": 10369, + "ĠBud": 10370, + "Ġamid": 10371, + "Ġrevers": 10372, + "ĠML": 10373, + "Bar": 10374, + "Ġinteraction": 10375, + "Ġjurisd": 10376, + "Ġspells": 10377, + "dep": 10378, + "fil": 10379, + "Ġcivilians": 10380, + "utter": 10381, + "ĠCooper": 10382, + "ĠBelow": 10383, + "Ġentrance": 10384, + "Ġconvert": 10385, + "Ġcontroversy": 10386, + "owered": 10387, + "Ġcontrary": 10388, + "Ġarc": 10389, + "ĠExecutive": 10390, + "ĠOfficer": 10391, + "Ġpackages": 10392, + "Ġprogressive": 10393, + "width": 10394, + "Ġreserved": 10395, + "vol": 10396, + "ĠSamsung": 10397, + "Ġprinted": 10398, + "Ġcenters": 10399, + "Ġintroduce": 10400, + "ĠKennedy": 10401, + "Ġodds": 10402, + "Ġsurely": 10403, + "Ġindependence": 10404, + "Ġpassengers": 10405, + "reprene": 10406, + "ĠBeh": 10407, + "Ġloves": 10408, + "ĠESPN": 10409, + "Ġfacilit": 10410, + "Ġidentical": 10411, + "Ġdoct": 10412, + "Ġpartnership": 10413, + "conf": 10414, + "ĠHide": 10415, + "Ġconfused": 10416, + "ĠCow": 10417, + "Men": 10418, + "Ġwrest": 10419, + "ĠIraqi": 10420, + "Ġholes": 10421, + "ĠStudies": 10422, + "Ġpregnant": 10423, + "hard": 10424, + "Ġsignals": 10425, + "IX": 10426, + "Ġpulling": 10427, + "Ġgraduate": 10428, + "Ġnominee": 10429, + "Date": 10430, + "Ġpermitted": 10431, + "ĠâĤ¬": 10432, + "ĠOklahoma": 10433, + "Start": 10434, + "Ġauthorized": 10435, + "Ġalarm": 10436, + "ĠCos": 10437, + "van": 10438, + "Ġgenerations": 10439, + "cular": 10440, + "Ġdragon": 10441, + "ĠSoftware": 10442, + "ĠEdward": 10443, + "Ġcontroller": 10444, + "Sen": 10445, + "gered": 10446, + "ĠVik": 10447, + "Ġapproached": 10448, + "Thank": 10449, + "Ġcance": 10450, + "Ġformula": 10451, + "ĠSmall": 10452, + "Ġweakness": 10453, + "Ġramp": 10454, + "itudes": 10455, + "jud": 10456, + "Ġbrilliant": 10457, + "Ġaccus": 10458, + "source": 10459, + "Ġ800": 10460, + "ĠEvil": 10461, + "Sw": 10462, + "Ġhomeless": 10463, + "week": 10464, + "iens": 10465, + "rics": 10466, + "ĠThird": 10467, + "TO": 10468, + "Ġorganic": 10469, + "Ġpresentation": 10470, + "agh": 10471, + "ĠDownload": 10472, + "vation": 10473, + "Ġassembly": 10474, + "orable": 10475, + "holders": 10476, + "ĠBernie": 10477, + "ĠHelp": 10478, + "Ġtong": 10479, + "ĠFight": 10480, + "Ġbeach": 10481, + "Book": 10482, + "ĠLic": 10483, + "Ġrush": 10484, + "ĠRound": 10485, + "oup": 10486, + "ĠMarx": 10487, + "Ġcalculated": 10488, + "ĠDevil": 10489, + "ĠSarah": 10490, + "Ġoccasionally": 10491, + "Ġbullet": 10492, + "Available": 10493, + "gate": 10494, + "Ġ91": 10495, + "Ġhosp": 10496, + "Ġpromises": 10497, + "ĠHIV": 10498, + "ĠStadium": 10499, + "ĠStock": 10500, + "ĠCorporation": 10501, + "gage": 10502, + "NG": 10503, + "ĠCredit": 10504, + "Ġsne": 10505, + "ibl": 10506, + "Ġaccum": 10507, + "such": 10508, + "Ġterrorists": 10509, + "Ġconsciousness": 10510, + "ĠZh": 10511, + "Ġdrama": 10512, + "oola": 10513, + "piration": 10514, + "Ġlabour": 10515, + "ĠNin": 10516, + "Ġutter": 10517, + "Ġdemocratic": 10518, + "Ġassass": 10519, + "ilation": 10520, + "Ġgest": 10521, + "Ġabroad": 10522, + "Ġmetab": 10523, + "Ġsorts": 10524, + "Ġflav": 10525, + "UB": 10526, + "Ġmg": 10527, + "ĠNothing": 10528, + "ĠOd": 10529, + "Ġmusical": 10530, + "2009": 10531, + "Ġdrops": 10532, + "ocated": 10533, + "ateral": 10534, + "000000": 10535, + "Ġgre": 10536, + "Ġequality": 10537, + "Ġburden": 10538, + "Ġvig": 10539, + "ĠLeader": 10540, + "------------": 10541, + "Ġceremony": 10542, + "Ġfighter": 10543, + "Ġactors": 10544, + "Ġæ": 10545, + "aman": 10546, + "Fi": 10547, + "Ġalign": 10548, + "puter": 10549, + "Ġelder": 10550, + "ĠNSA": 10551, + "Ġrepresentation": 10552, + "ĠOntario": 10553, + "ITH": 10554, + "usalem": 10555, + "Ġharassment": 10556, + "itzer": 10557, + "Ġsymp": 10558, + "Ġboxes": 10559, + "ĠDR": 10560, + "Ġmanifest": 10561, + "atre": 10562, + "Ġ^": 10563, + "Ġdies": 10564, + "leton": 10565, + "Ġmissions": 10566, + "ethe": 10567, + "Ġresolve": 10568, + "Ġfollowers": 10569, + "Ġasc": 10570, + "Ġkm": 10571, + "lord": 10572, + "ammed": 10573, + "Ġsilent": 10574, + "ĠAssociated": 10575, + "Ġtiming": 10576, + "Ġprisoners": 10577, + "ĠKings": 10578, + "ĠFive": 10579, + "Ġtower": 10580, + "Ġapproaches": 10581, + "Ġprecisely": 10582, + "Ġbureau": 10583, + "ĠMother": 10584, + "ĠIss": 10585, + "Ġkeyboard": 10586, + "itual": 10587, + "Ġfunded": 10588, + "Ġstaying": 10589, + "Ġpsychological": 10590, + "Ġmile": 10591, + "ĠLeon": 10592, + "ĠBarb": 10593, + "will": 10594, + "Ġwider": 10595, + "ĠAtlantic": 10596, + "Ġtill": 10597, + "ĠRome": 10598, + "rot": 10599, + "Ġaccompan": 10600, + "Ġflour": 10601, + "aco": 10602, + "World": 10603, + "ĠExpress": 10604, + "ĠYu": 10605, + "Cor": 10606, + "Ġpleased": 10607, + "party": 10608, + "Ġpointing": 10609, + "Ġinflation": 10610, + "Ġroy": 10611, + "Ġ),": 10612, + "ainer": 10613, + "Ġwedding": 10614, + "ormon": 10615, + "Ġrequiring": 10616, + "Ġqualified": 10617, + "Ġsegment": 10618, + "END": 10619, + "Ġsizes": 10620, + "eals": 10621, + "Ġcorrupt": 10622, + "assador": 10623, + "Ġceleb": 10624, + "Ġdreams": 10625, + "ĠMess": 10626, + "Ġchecking": 10627, + "ĠVersion": 10628, + "Ġpreparing": 10629, + "Ġactively": 10630, + "ĠDiff": 10631, + "Ġlux": 10632, + "ĠWinter": 10633, + "acteria": 10634, + "ĠNE": 10635, + "Ġdeputy": 10636, + "Ġtransgender": 10637, + "Ġsummary": 10638, + "Ġinher": 10639, + "eries": 10640, + "char": 10641, + "ĠYan": 10642, + "Ġknock": 10643, + "ĠPath": 10644, + "Ġlip": 10645, + "roller": 10646, + "Ġimpression": 10647, + "Ġcelebrate": 10648, + "Ġslide": 10649, + "Ġguests": 10650, + "Ġclip": 10651, + "FS": 10652, + "Ġsavings": 10653, + "Ġcaptain": 10654, + "Ġlegacy": 10655, + "ĠDenver": 10656, + "Ġwounded": 10657, + "taboola": 10658, + "ACT": 10659, + "Ġpursue": 10660, + "Ġoxy": 10661, + "Ġq": 10662, + "Ġsemi": 10663, + "ĠNeed": 10664, + "ĠAffairs": 10665, + "Ġobsc": 10666, + "Ġchecked": 10667, + "Ġdual": 10668, + "Code": 10669, + "ĠMD": 10670, + "lem": 10671, + "ulty": 10672, + "Ġ©": 10673, + "ĠElizabeth": 10674, + "Ġcenturies": 10675, + "arded": 10676, + "src": 10677, + "Ġevident": 10678, + "ennis": 10679, + "atin": 10680, + "Ġunemployment": 10681, + "ĠMario": 10682, + "Ġintim": 10683, + "Christ": 10684, + "Ġbiological": 10685, + "Ġsoldier": 10686, + "ĠAdded": 10687, + "Ġmath": 10688, + "ĠGil": 10689, + "Ġbias": 10690, + "Ġdating": 10691, + "ĠOcean": 10692, + "Ġmice": 10693, + "Mus": 10694, + "hire": 10695, + "ĠTes": 10696, + "Server": 10697, + "limited": 10698, + "Size": 10699, + "Ġmeters": 10700, + "Ġrocket": 10701, + "essee": 10702, + "Ġcertificate": 10703, + "ĠIranian": 10704, + "ASS": 10705, + "Ġgrid": 10706, + "Dec": 10707, + "Ġrolling": 10708, + "commun": 10709, + "ĠSweden": 10710, + "bury": 10711, + "Ġtissue": 10712, + "Ġracism": 10713, + "ĠLocal": 10714, + "Ġmystery": 10715, + "Ġexamine": 10716, + "Ġstem": 10717, + "Ġsits": 10718, + "Ġhoped": 10719, + "oting": 10720, + "Ġdialogue": 10721, + "Ġpersu": 10722, + "Watch": 10723, + "lay": 10724, + "MAN": 10725, + "Ġchronic": 10726, + "ĠPortland": 10727, + "market": 10728, + "ĠSEC": 10729, + "Ġparallel": 10730, + "Ġscandal": 10731, + "Ġcarries": 10732, + "Ġphenomenon": 10733, + "human": 10734, + "acker": 10735, + "ĠOx": 10736, + "Ġretirement": 10737, + "tainment": 10738, + "ovie": 10739, + "ĠGear": 10740, + "Ġduties": 10741, + "Ġdose": 10742, + "Ġscroll": 10743, + "MB": 10744, + "inf": 10745, + "Ġsauce": 10746, + "Ġlandscape": 10747, + "reddit": 10748, + "ĠChampionship": 10749, + "ĠReddit": 10750, + "alid": 10751, + "Ġcoin": 10752, + "Ġovers": 10753, + "Ġposting": 10754, + "about": 10755, + "Ġfel": 10756, + "andy": 10757, + "Ġbold": 10758, + "Ġfocusing": 10759, + "effect": 10760, + "GR": 10761, + "Ġdeemed": 10762, + "Ġrecommendations": 10763, + "Ġstepped": 10764, + "Ġvoter": 10765, + "ĠDeep": 10766, + "ĠInstagram": 10767, + "Ġmoderate": 10768, + "ĠMaryland": 10769, + "Ġrestricted": 10770, + "ĠMB": 10771, + "ĠChall": 10772, + "Ġtob": 10773, + "Ġcir": 10774, + "ĠOcc": 10775, + "ĠEver": 10776, + "Ġcollaps": 10777, + "INFO": 10778, + "=-": 10779, + "ĠPict": 10780, + "ĠAccount": 10781, + "nc": 10782, + "Ġought": 10783, + "Ġexport": 10784, + "Ġdrunk": 10785, + "('": 10786, + "Ġwise": 10787, + "ĠMort": 10788, + "necess": 10789, + "Ġancest": 10790, + "ĠIncre": 10791, + "Ġfrequent": 10792, + "mir": 10793, + "Ġinterpretation": 10794, + "Ġdependent": 10795, + "Ġcoins": 10796, + "ĠBol": 10797, + "Video": 10798, + "ĠJustin": 10799, + "Ġfatal": 10800, + "Ġcooking": 10801, + "Ġconfusion": 10802, + "ipher": 10803, + "Ġcustody": 10804, + "ĠMorgan": 10805, + "omach": 10806, + "ĠGovernor": 10807, + "Ġrestaurants": 10808, + "eling": 10809, + "Ġacknowledged": 10810, + "Ġther": 10811, + "Ġgenes": 10812, + "ching": 10813, + "Hey": 10814, + "Ġtactics": 10815, + "ĠMexican": 10816, + "Ġvend": 10817, + "Ġhes": 10818, + "quer": 10819, + "Ġnoting": 10820, + "ĠCameron": 10821, + "Ġtargeting": 10822, + "rock": 10823, + "Ġcredits": 10824, + "Ġemotions": 10825, + "Ġrepresentatives": 10826, + "news": 10827, + "Ġlegislative": 10828, + "Ġremoving": 10829, + "Ġtweeted": 10830, + "ĠCarter": 10831, + "ĠFixed": 10832, + "Ġforcing": 10833, + "Ġspeaker": 10834, + "Ġmales": 10835, + "ĠVietnam": 10836, + "lined": 10837, + "Ġconcepts": 10838, + "Ġvoices": 10839, + "oir": 10840, + "ĠTrib": 10841, + "Whe": 10842, + "ĠJerusalem": 10843, + "ĠSant": 10844, + "Ġcul": 10845, + "Ġlady": 10846, + "ĠHawai": 10847, + "Ġarts": 10848, + "ĠInn": 10849, + "ĠMachine": 10850, + "ĠEmperor": 10851, + "Ġslot": 10852, + "gly": 10853, + "ĠProcess": 10854, + "III": 10855, + "Ġathletes": 10856, + "ĠTemple": 10857, + "ĠRepresent": 10858, + "Ġpresc": 10859, + "Ġtons": 10860, + "Ġgolden": 10861, + "Ġpunch": 10862, + "ĠGR": 10863, + "iverpool": 10864, + "Ġenact": 10865, + "Ġlobby": 10866, + "Ġmos": 10867, + "Ġpicking": 10868, + "Ġlifetime": 10869, + "Ġcognitive": 10870, + "Each": 10871, + "zo": 10872, + "Ġdub": 10873, + "Ġconsists": 10874, + "oln": 10875, + "Ġfestival": 10876, + "amous": 10877, + "Ġintellig": 10878, + "words": 10879, + "ĠSmart": 10880, + "Ġdele": 10881, + "Ġlapt": 10882, + "Ġmagical": 10883, + "ĠSin": 10884, + "bus": 10885, + "urities": 10886, + "ighth": 10887, + "ĠRuby": 10888, + "ĠSure": 10889, + "olving": 10890, + "Ġjun": 10891, + "OST": 10892, + "Ġimposed": 10893, + "Ġastron": 10894, + "Ġcorrel": 10895, + "ĠNS": 10896, + "ĠKit": 10897, + "ĠFuture": 10898, + "burn": 10899, + "Ġimmune": 10900, + "ocus": 10901, + "Ġcourses": 10902, + "ĠString": 10903, + "Ġlean": 10904, + "Ġghost": 10905, + "Ġoutcomes": 10906, + "Ġexpense": 10907, + "Ġeveryday": 10908, + "Ġacceptable": 10909, + "Ah": 10910, + "Ġequipped": 10911, + "Ġorange": 10912, + "FR": 10913, + "ĠDutch": 10914, + "Though": 10915, + "ĠRank": 10916, + "QU": 10917, + "ĠRoberts": 10918, + "what": 10919, + "rend": 10920, + "Ġdisappear": 10921, + "Ġspawn": 10922, + "ĠLam": 10923, + "ois": 10924, + "Ġdeserve": 10925, + "Ġminimal": 10926, + "Ġnervous": 10927, + "ĠWould": 10928, + "Ġrook": 10929, + "ĠVancouver": 10930, + "Ġresign": 10931, + "shire": 10932, + "ĠWorks": 10933, + "ĠBuild": 10934, + "Ġaffordable": 10935, + "ĠGary": 10936, + "ĠArena": 10937, + "Ġhanging": 10938, + "Ġimplications": 10939, + "ĠSong": 10940, + "Ġmaintaining": 10941, + "Ġguards": 10942, + "CON": 10943, + "Ġderived": 10944, + "Ġexecuted": 10945, + "Ġtheories": 10946, + "Ġquoted": 10947, + "ĠAndre": 10948, + "oga": 10949, + "seless": 10950, + "info": 10951, + "ĠBelg": 10952, + "Ġtears": 10953, + "ĠSurv": 10954, + "Ġbirthday": 10955, + "igious": 10956, + "immer": 10957, + "Ġspectrum": 10958, + "Ġarchitecture": 10959, + "Ġrecruit": 10960, + "arma": 10961, + "Table": 10962, + "Ġmonsters": 10963, + "ĠGov": 10964, + "Ġdestination": 10965, + "Ġattractive": 10966, + "Ġfoss": 10967, + "ĠMoreover": 10968, + "Ġpresents": 10969, + "THE": 10970, + "Ġreply": 10971, + "pton": 10972, + "Ġcum": 10973, + "Ġdelight": 10974, + "Ġaffects": 10975, + "Ġdonations": 10976, + "ĠToy": 10977, + "ĠHim": 10978, + "MENT": 10979, + "Ġovercome": 10980, + "itched": 10981, + "ĠFantasy": 10982, + "ĠHat": 10983, + "ĠBeast": 10984, + "bott": 10985, + "Ġinvestigations": 10986, + "Run": 10987, + "Ġhunting": 10988, + "di": 10989, + "fund": 10990, + "Ġsessions": 10991, + "estyle": 10992, + "Ġportray": 10993, + "oids": 10994, + "Yeah": 10995, + "Ġcommunicate": 10996, + "Ġcomedy": 10997, + "ĠYang": 10998, + "Ġbelt": 10999, + "ĠMarine": 11000, + "Ġpredicted": 11001, + "Play": 11002, + "Ġimportantly": 11003, + "Ġremarkable": 11004, + "Ġeliminate": 11005, + "David": 11006, + "Ġbind": 11007, + "VID": 11008, + "Ġadvocates": 11009, + "ĠGaza": 11010, + "imp": 11011, + "DB": 11012, + "ĠNa": 11013, + "ĠSimilar": 11014, + "IES": 11015, + "Ġcharity": 11016, + "vas": 11017, + "math": 11018, + "Ġâĸ": 11019, + "oker": 11020, + "ndum": 11021, + "Ġcaps": 11022, + "ĠHal": 11023, + "2000": 11024, + "ean": 11025, + "Ġfleet": 11026, + "Ġrecre": 11027, + "Right": 11028, + "Ġsleeping": 11029, + "ijing": 11030, + "kind": 11031, + "Ġdesignated": 11032, + "ä": 11033, + "Ġanimation": 11034, + "kee": 11035, + "ĠIntrodu": 11036, + "Ġ/>": 11037, + "Ġdelayed": 11038, + "Ġtremend": 11039, + "Ġcurious": 11040, + "Use": 11041, + "Ġlect": 11042, + "dam": 11043, + "Ġinnovation": 11044, + "ĠPoints": 11045, + "Ġloading": 11046, + "Ġdispute": 11047, + "ctic": 11048, + "irds": 11049, + "ĠBY": 11050, + "Ġnurs": 11051, + "ĠValue": 11052, + "IONS": 11053, + "ĠHum": 11054, + "Ġtemplate": 11055, + "mers": 11056, + "Ġappearances": 11057, + "ĠEntertainment": 11058, + "Ġtranslation": 11059, + "Ġsake": 11060, + "Ġbeneath": 11061, + "Ġinhib": 11062, + "Ġeuro": 11063, + "abetes": 11064, + "Ġstudying": 11065, + "ĠMas": 11066, + "Ġperceived": 11067, + "Ġexamined": 11068, + "Ġeager": 11069, + "Ġcoaches": 11070, + "Ġimper": 11071, + "chi": 11072, + "Ġproduces": 11073, + "\").": 11074, + "ĠEveryone": 11075, + "Ġmunicip": 11076, + "Ġgirlfriend": 11077, + "Ġhire": 11078, + "ĠVice": 11079, + "Ġsuitable": 11080, + "opy": 11081, + "Ġinequ": 11082, + "ĠDuke": 11083, + "fish": 11084, + "first": 11085, + "ĠObs": 11086, + "Ġinterior": 11087, + "ĠBruce": 11088, + "ĠRy": 11089, + "Ġanalys": 11090, + "Ġconsiderable": 11091, + "Ġforecast": 11092, + "Ġfert": 11093, + "orship": 11094, + "ĠDrug": 11095, + "ĠALL": 11096, + ":\"": 11097, + "thur": 11098, + "ĠMail": 11099, + "Ġballot": 11100, + "Ġinstantly": 11101, + "ĠChannel": 11102, + "Ġpicks": 11103, + "Ġ1989": 11104, + "Ġtent": 11105, + "oli": 11106, + "Ġcivilian": 11107, + "bling": 11108, + "ello": 11109, + "bu": 11110, + "Ġinch": 11111, + "Ġlogo": 11112, + "Ġcooperation": 11113, + "Ġwalks": 11114, + "Ġinvestments": 11115, + "Ġimprison": 11116, + "ĠFestival": 11117, + "ĠKy": 11118, + "Ġlegally": 11119, + "Ġgri": 11120, + "charg": 11121, + "Sl": 11122, + "Ġthreatening": 11123, + "duction": 11124, + "flow": 11125, + "Ġdismissed": 11126, + "ibraries": 11127, + "cap": 11128, + "ele": 11129, + "ĠMcG": 11130, + "ĠHarvard": 11131, + "ĠConservative": 11132, + "ĠCBS": 11133, + "png": 11134, + "Ġroots": 11135, + "ĠHaving": 11136, + "umbled": 11137, + "ĠFun": 11138, + "\\/": 11139, + "ĠSearch": 11140, + "plex": 11141, + "Ġdiscussing": 11142, + "Ġcontinu": 11143, + "ĠTai": 11144, + "ĠWik": 11145, + "Free": 11146, + "fit": 11147, + "Ġrefuse": 11148, + "Ġmanaging": 11149, + "Ġsynd": 11150, + "ipedia": 11151, + "walk": 11152, + "Ġprofessionals": 11153, + "Ġguidance": 11154, + "Ġuniversities": 11155, + "Ġassemb": 11156, + "untu": 11157, + "Finally": 11158, + "ASE": 11159, + "ĠAuto": 11160, + "ĠHad": 11161, + "Ġanniversary": 11162, + "LD": 11163, + "ĠDur": 11164, + "ĠUltimate": 11165, + "ihad": 11166, + "product": 11167, + "Ġtransit": 11168, + "Ġrestore": 11169, + "Ġexplaining": 11170, + "Ġasset": 11171, + "Ġtransferred": 11172, + "Ġburst": 11173, + "apolis": 11174, + "ĠMagazine": 11175, + "ĠCra": 11176, + "ĠBR": 11177, + "gged": 11178, + "ĠHE": 11179, + "Mich": 11180, + "bet": 11181, + "ĠLady": 11182, + "ylum": 11183, + "erves": 11184, + "Ġmeets": 11185, + "white": 11186, + "Log": 11187, + "Ġcorresponding": 11188, + "Ġinsisted": 11189, + "GG": 11190, + "Ġsurrounded": 11191, + "Ġtens": 11192, + "Ġlane": 11193, + "Ġcoinc": 11194, + "home": 11195, + "Ġexisted": 11196, + "ected": 11197, + "ĠDouble": 11198, + "lamm": 11199, + "Ġskept": 11200, + "exp": 11201, + "Ġperception": 11202, + "iev": 11203, + "ĠBeing": 11204, + "oft": 11205, + "Ġadopt": 11206, + ".:": 11207, + "];": 11208, + "Windows": 11209, + "Ġsatellite": 11210, + "ASH": 11211, + "Ġinfant": 11212, + "description": 11213, + "ĠMeanwhile": 11214, + "cm": 11215, + "oca": 11216, + "ĠTreat": 11217, + "actor": 11218, + "Ġtobacco": 11219, + "ĠNorm": 11220, + "emption": 11221, + "Ġflesh": 11222, + "Ġje": 11223, + "oop": 11224, + "ĠHeaven": 11225, + "Ġbeating": 11226, + "anim": 11227, + "Ġgathering": 11228, + "Ġcultiv": 11229, + "GO": 11230, + "abe": 11231, + "ĠJonathan": 11232, + "ĠSafety": 11233, + "Ġbadly": 11234, + "prot": 11235, + "Ġchoosing": 11236, + "Ġcontacted": 11237, + "Ġquit": 11238, + "Ġdistur": 11239, + "Ġstir": 11240, + "Ġtoken": 11241, + "Det": 11242, + "ĠPa": 11243, + "Ġfunctionality": 11244, + "003": 11245, + "some": 11246, + "Ġlimitations": 11247, + "Ġmeth": 11248, + "build": 11249, + "config": 11250, + "NT": 11251, + "rell": 11252, + "blem": 11253, + "ĠMom": 11254, + "Ġveterans": 11255, + "ĠHu": 11256, + "Ġtrends": 11257, + "arer": 11258, + "ĠGiven": 11259, + "ĠCaption": 11260, + "may": 11261, + "AST": 11262, + "Ġwondering": 11263, + "ĠClark": 11264, + "normal": 11265, + "Ġseparated": 11266, + "Ġdesp": 11267, + "stic": 11268, + "brew": 11269, + "Ġrelating": 11270, + "ĠNik": 11271, + "ĠFarm": 11272, + "Ġenthusi": 11273, + "good": 11274, + "deb": 11275, + "Ġactivist": 11276, + "Ġmart": 11277, + "Ġexplosion": 11278, + "ĠEconomic": 11279, + "Link": 11280, + "Ġinsight": 11281, + "Ġconvenient": 11282, + "Ġcounterpart": 11283, + "support": 11284, + "ĠVirt": 11285, + "agen": 11286, + "ĠTennessee": 11287, + "ĠSimon": 11288, + "ĠAward": 11289, + "OCK": 11290, + "ĠFigure": 11291, + "Ġoverseas": 11292, + "Ġpride": 11293, + "ĠCas": 11294, + "note": 11295, + "mg": 11296, + "Current": 11297, + "Ġdisplays": 11298, + "content": 11299, + "Ġtraveling": 11300, + "Ġhospitals": 11301, + "ĠFinancial": 11302, + "ĠPast": 11303, + "Ġdefendant": 11304, + "Ġstreaming": 11305, + "mble": 11306, + "ĠBerlin": 11307, + "uki": 11308, + "Ġdistribut": 11309, + "Ġantib": 11310, + "Ġchocolate": 11311, + "ĠCastle": 11312, + "Ġinterrupt": 11313, + "ĠRow": 11314, + "Ġconversion": 11315, + "Ġbugs": 11316, + "ĠRather": 11317, + "liest": 11318, + "LY": 11319, + "ĠJean": 11320, + "common": 11321, + "akh": 11322, + "Ġ130": 11323, + "otton": 11324, + "ĠDean": 11325, + "Ġamendment": 11326, + "Ġgameplay": 11327, + "ĠWarren": 11328, + "oda": 11329, + "Ġhighlights": 11330, + "Ġirre": 11331, + "ĠNATO": 11332, + "Ġballs": 11333, + "Ġdemanding": 11334, + "URE": 11335, + "ĠLuke": 11336, + "Figure": 11337, + "stop": 11338, + "onia": 11339, + "zone": 11340, + "izers": 11341, + "ĠWR": 11342, + "Ġawarded": 11343, + "Ġregulatory": 11344, + "ĠHart": 11345, + "ĠSN": 11346, + "pling": 11347, + "Ġsour": 11348, + "ĠPixel": 11349, + "usive": 11350, + "Ġfet": 11351, + "ĠSent": 11352, + "Ġautomatic": 11353, + "Ġfer": 11354, + "vernment": 11355, + "ĠKhan": 11356, + "TON": 11357, + "father": 11358, + "Ġextraordinary": 11359, + "throp": 11360, + "ĠPython": 11361, + "ĠGPU": 11362, + "Ġsexually": 11363, + "Ġdesktop": 11364, + "itivity": 11365, + "ĠAntonio": 11366, + "Ġorient": 11367, + "Ġears": 11368, + "obby": 11369, + "ouses": 11370, + "vertisements": 11371, + "Ġmanufacturers": 11372, + "icient": 11373, + "minute": 11374, + "Ġconviction": 11375, + "Ġgarden": 11376, + "public": 11377, + "Ġsatisfied": 11378, + "fold": 11379, + "OK": 11380, + "Ġinhab": 11381, + "ĠThink": 11382, + "Ġprogramme": 11383, + "Ġstomach": 11384, + "Ġcoordin": 11385, + "Ġholy": 11386, + "Ġthreshold": 11387, + "Ġrhet": 11388, + "Ġserial": 11389, + "Ġemployers": 11390, + "ĠEverything": 11391, + "rah": 11392, + "Ġbother": 11393, + "Ġbrands": 11394, + "Value": 11395, + "ĠTed": 11396, + "ĠPlanet": 11397, + "Ġpink": 11398, + "ĠFurthermore": 11399, + "sa": 11400, + "PE": 11401, + "reck": 11402, + "ĠUSD": 11403, + "otte": 11404, + "Ġ&&": 11405, + "Ġlanded": 11406, + "gets": 11407, + "Ġproducers": 11408, + "Ġhealthcare": 11409, + "Ġdominant": 11410, + "Ġdestro": 11411, + "Ġamended": 11412, + "chron": 11413, + "Ġfits": 11414, + "ĠSyd": 11415, + "ĠAuthority": 11416, + "ATCH": 11417, + "Ġfights": 11418, + "ĠLLC": 11419, + "Ġ---": 11420, + "ĠCorp": 11421, + "Ġtoxic": 11422, + "specific": 11423, + "ĠCorn": 11424, + "ĠChel": 11425, + "Ġtelephone": 11426, + "ĠPant": 11427, + "Ġmysterious": 11428, + "aunch": 11429, + "odox": 11430, + "media": 11431, + "Ġwitnesses": 11432, + "agu": 11433, + "Ġquestioned": 11434, + "ĠBrexit": 11435, + "ĠRemember": 11436, + "enez": 11437, + "Ġendorse": 11438, + "iatric": 11439, + "ĠIdent": 11440, + "Ġridiculous": 11441, + "110": 11442, + "Ġprayer": 11443, + "Ġscientist": 11444, + "Ġ1950": 11445, + "ĠAqu": 11446, + "Ġunderground": 11447, + "ĠUFC": 11448, + "mare": 11449, + "ĠLater": 11450, + "wich": 11451, + "Ġsubscrib": 11452, + "Ġhosts": 11453, + "Ġerr": 11454, + "Ġgrants": 11455, + "antom": 11456, + "Ġsummon": 11457, + "early": 11458, + "ĠClear": 11459, + "ĠPrim": 11460, + "Ġsuspension": 11461, + "Ġguaranteed": 11462, + "apper": 11463, + "Ġrice": 11464, + "ĠSean": 11465, + "ĠShin": 11466, + "Ġreferendum": 11467, + "Ġfled": 11468, + "rust": 11469, + "Ġ360": 11470, + "tery": 11471, + "Ġshocked": 11472, + "BR": 11473, + "ĠOil": 11474, + "ĠAllah": 11475, + "Ġpartly": 11476, + "Ġignor": 11477, + "Ġtransmission": 11478, + "Ġhomosexual": 11479, + "iversal": 11480, + "Ġhopefully": 11481, + "ãĤ¤": 11482, + "Ġlesson": 11483, + "Leg": 11484, + "Ġ..": 11485, + "Yet": 11486, + "table": 11487, + "appropri": 11488, + "rett": 11489, + "Ġboards": 11490, + "Ġincorrect": 11491, + "Ġbacteria": 11492, + "aru": 11493, + "amac": 11494, + "Ġsnap": 11495, + ".'\"": 11496, + "Ġparad": 11497, + "tem": 11498, + "heart": 11499, + "Ġavailability": 11500, + "Ġwisdom": 11501, + "Ġ(+": 11502, + "Ġpriest": 11503, + "ĠÂłĠÂł": 11504, + "Open": 11505, + "Ġspan": 11506, + "Ġparameter": 11507, + "Ġconvince": 11508, + "Ġ(%)": 11509, + "rac": 11510, + "Ġfo": 11511, + "Ġsafely": 11512, + "Ġconverted": 11513, + "ĠOlympic": 11514, + "Ġreserve": 11515, + "Ġhealing": 11516, + "ĠMine": 11517, + "Max": 11518, + "Ġinherent": 11519, + "ĠGraham": 11520, + "Ġintegrated": 11521, + "Dem": 11522, + "Ġpipeline": 11523, + "Ġapplying": 11524, + "Ġembed": 11525, + "ĠCharlie": 11526, + "Ġcave": 11527, + "2008": 11528, + "Ġconsensus": 11529, + "Ġrewards": 11530, + "Pal": 11531, + "ĠHTML": 11532, + "Ġpopularity": 11533, + "looking": 11534, + "ĠSword": 11535, + "ĠArts": 11536, + "')": 11537, + "Ġelectron": 11538, + "clusions": 11539, + "Ġintegrity": 11540, + "Ġexclusively": 11541, + "Ġgrace": 11542, + "Ġtorture": 11543, + "Ġburned": 11544, + "two": 11545, + "Ġ180": 11546, + "Produ": 11547, + "Ġentreprene": 11548, + "raphics": 11549, + "Ġgym": 11550, + "ricane": 11551, + "ĠTam": 11552, + "Ġadministrative": 11553, + "Ġmanufacturer": 11554, + "Ġvel": 11555, + "ĠNi": 11556, + "Ġisolated": 11557, + "ĠMedicine": 11558, + "Ġbackup": 11559, + "Ġpromoting": 11560, + "Ġcommander": 11561, + "Ġflee": 11562, + "ĠRussell": 11563, + "Ġforgotten": 11564, + "ĠMissouri": 11565, + "Ġresidence": 11566, + "mons": 11567, + "Ġresemb": 11568, + "Ġwand": 11569, + "Ġmeaningful": 11570, + "PT": 11571, + "Ġbol": 11572, + "Ġhelic": 11573, + "Ġwealthy": 11574, + "Ġrifle": 11575, + "strong": 11576, + "rowing": 11577, + "plan": 11578, + "asury": 11579, + "âĢ¦.": 11580, + "Ġexpanding": 11581, + "ĠHamilton": 11582, + "Ġreceives": 11583, + "SI": 11584, + "eatures": 11585, + "ĠAnim": 11586, + "REE": 11587, + "Put": 11588, + "Ġbriefly": 11589, + "rive": 11590, + "Ġstimul": 11591, + "Ġ``(": 11592, + "Ġ__": 11593, + "Ġchip": 11594, + "Ġhaz": 11595, + "Ġprize": 11596, + "ĠThings": 11597, + "ACE": 11598, + "ulin": 11599, + "dict": 11600, + "oku": 11601, + "Ġassociate": 11602, + "ockets": 11603, + "youtube": 11604, + "Story": 11605, + "ategory": 11606, + "Ġmild": 11607, + "ailing": 11608, + "ĠYe": 11609, + "Orig": 11610, + "ĠKa": 11611, + "orig": 11612, + "Ġpropaganda": 11613, + "Ġanonymous": 11614, + "Ġstruggled": 11615, + "Ġoutrage": 11616, + "ATED": 11617, + "ĠBeijing": 11618, + "rary": 11619, + "Ġleather": 11620, + "Ġworlds": 11621, + "Ġbroader": 11622, + "125": 11623, + "idal": 11624, + "ĠBetter": 11625, + "Ġtear": 11626, + "Ext": 11627, + "Ġproposals": 11628, + "Ġiter": 11629, + "ĠSquad": 11630, + "Ġvolunt": 11631, + "mi": 11632, + "Did": 11633, + "ĠPu": 11634, + "pin": 11635, + "Ġspeakers": 11636, + "Ġborders": 11637, + "Ġfigured": 11638, + "='": 11639, + "Ġsimultaneously": 11640, + "aeda": 11641, + "Ġcharging": 11642, + "Ġurged": 11643, + "Ġconj": 11644, + "256": 11645, + "ĠGordon": 11646, + "merce": 11647, + "Ġdocumentary": 11648, + "Share": 11649, + "itol": 11650, + "ONE": 11651, + "ĠGarden": 11652, + "hatt": 11653, + "ĠThompson": 11654, + "aneous": 11655, + "apore": 11656, + "Ġtanks": 11657, + "Ġlessons": 11658, + "track": 11659, + "Ġoutstanding": 11660, + "Ġvolunteers": 11661, + "Ġspray": 11662, + "Ġmanagers": 11663, + "large": 11664, + "Ġcamps": 11665, + "Ġartificial": 11666, + "ĠRu": 11667, + "Ġbags": 11668, + "thal": 11669, + "Ġcompatible": 11670, + "ĠBlade": 11671, + "Ġfed": 11672, + "Ġargues": 11673, + "FI": 11674, + "Ġunfair": 11675, + "Ġcorn": 11676, + "Ġoffset": 11677, + "Ġdirections": 11678, + "Ġdisappointed": 11679, + "ĠConvention": 11680, + "Ġviewing": 11681, + "ME": 11682, + "ocity": 11683, + "Ġtowns": 11684, + "Ġlayers": 11685, + "Ġrolled": 11686, + "Ġjumped": 11687, + "Ġattribute": 11688, + "Ġunnecess": 11689, + "incoln": 11690, + "Ġsuppose": 11691, + "ĠNether": 11692, + "cha": 11693, + "Ġburied": 11694, + "Ġsixth": 11695, + "Ben": 11696, + "ressing": 11697, + "OUR": 11698, + "Ġwound": 11699, + "Ġcycl": 11700, + "Ġmechanisms": 11701, + "Ġcongressional": 11702, + "ĠElement": 11703, + "Ġagreements": 11704, + "Ġdecor": 11705, + "Ġclosest": 11706, + "ĠMit": 11707, + "Google": 11708, + "}}": 11709, + "Ġmixture": 11710, + "Ġfluid": 11711, + "Sign": 11712, + "ĠScholar": 11713, + "Ġpist": 11714, + "asket": 11715, + "abling": 11716, + "Ġracing": 11717, + "hero": 11718, + "riel": 11719, + "assy": 11720, + "Ġcheaper": 11721, + "ben": 11722, + "Ġvertical": 11723, + "amacare": 11724, + "ĠReading": 11725, + "gments": 11726, + "Ġhelicop": 11727, + "Ġsacrifice": 11728, + "aya": 11729, + "paren": 11730, + "VA": 11731, + "ĠLes": 11732, + "ĠStudio": 11733, + "Ġviolations": 11734, + "ĠAnna": 11735, + "acer": 11736, + "é¾": 11737, + "ĠRat": 11738, + "ĠBeck": 11739, + "ĠDick": 11740, + "ĠACT": 11741, + "Ġcomposition": 11742, + "Ġtexture": 11743, + "ĠOwn": 11744, + "Ġsmartphone": 11745, + "ĠNA": 11746, + "Ġforb": 11747, + "import": 11748, + "Ġdefending": 11749, + "ilst": 11750, + "rer": 11751, + "Ġoh": 11752, + "ĠJeremy": 11753, + "Ġbanking": 11754, + "ceptions": 11755, + "Ġrespective": 11756, + "/.": 11757, + "Ġdrinks": 11758, + "ĠWi": 11759, + "Ġbands": 11760, + "ĠLiverpool": 11761, + "Ġgrip": 11762, + "ĠBuy": 11763, + "Ġopenly": 11764, + "Ġreviewed": 11765, + "pert": 11766, + "Ġverify": 11767, + "ĠCole": 11768, + "ĠWales": 11769, + "MO": 11770, + "Ġunpre": 11771, + "Ġshelter": 11772, + "ĠImperial": 11773, + "Ġgui": 11774, + "ĠDak": 11775, + "Ġsuggestions": 11776, + "Ġexplicitly": 11777, + "Ġslave": 11778, + "Ġblockchain": 11779, + "Ġcompeting": 11780, + "Ġpromising": 11781, + "SON": 11782, + "Ġsoccer": 11783, + "Ġconstitution": 11784, + "429": 11785, + "Ġdistract": 11786, + "ĠUser": 11787, + "esides": 11788, + "ĠMethod": 11789, + "ĠTokyo": 11790, + "Ġaccompanied": 11791, + "Client": 11792, + "sur": 11793, + "alog": 11794, + "Ġidentification": 11795, + "Ġinvasion": 11796, + "asma": 11797, + "Ġindustries": 11798, + "ppers": 11799, + "Ġsubtle": 11800, + "ĠUnit": 11801, + "natural": 11802, + "Ġsurvived": 11803, + "Ġflaw": 11804, + "ĺħ": 11805, + "ĠHoll": 11806, + "Ġdeficit": 11807, + "Ġtutorial": 11808, + "ĠChance": 11809, + "Ġarguing": 11810, + "Ġcontemporary": 11811, + "Ġintegration": 11812, + "forward": 11813, + "Ġtum": 11814, + "itis": 11815, + "Ġhiding": 11816, + "ĠDomin": 11817, + "ĠTan": 11818, + "ĠBuilding": 11819, + "ĠVin": 11820, + "Ġspokesperson": 11821, + "ĠNotes": 11822, + "Ġemerging": 11823, + "Ġpreparation": 11824, + "Ġprost": 11825, + "Ġsuspects": 11826, + "Ġautonom": 11827, + "Description": 11828, + "Ġdealt": 11829, + "ĠPear": 11830, + "Ġsteady": 11831, + "Ġdecreased": 11832, + "Ġsovere": 11833, + "ĠClin": 11834, + "Ġgradually": 11835, + "orses": 11836, + "ĠWAR": 11837, + "Serv": 11838, + "ãĤ¢": 11839, + "hr": 11840, + "Ġdirty": 11841, + "ĠBarn": 11842, + "ĠBC": 11843, + "Ġdil": 11844, + "Ġcalendar": 11845, + "Ġcompliance": 11846, + "Ġchamber": 11847, + "bb": 11848, + "Ġpassenger": 11849, + "ateful": 11850, + "ĠTitle": 11851, + "ĠSydney": 11852, + "ĠGot": 11853, + "Ġdarkness": 11854, + "Ġdefect": 11855, + "Ġpacked": 11856, + "assion": 11857, + "Ġgods": 11858, + "Ġharsh": 11859, + "ICK": 11860, + "leans": 11861, + "Ġalgorithm": 11862, + "Ġoxygen": 11863, + "Ġvisits": 11864, + "Ġblade": 11865, + "Ġkilomet": 11866, + "ĠKentucky": 11867, + "Ġkiller": 11868, + "Pack": 11869, + "enny": 11870, + "Ġdivine": 11871, + "Ġnomination": 11872, + "being": 11873, + "Ġengines": 11874, + "Ġcats": 11875, + "Ġbuffer": 11876, + "ĠPhill": 11877, + "Ġtraff": 11878, + "AGE": 11879, + "Ġtongue": 11880, + "Ġradiation": 11881, + "erer": 11882, + "mem": 11883, + "ĠExplicit": 11884, + "é¾į": 11885, + "Ġcouples": 11886, + "Ġphysics": 11887, + "ĠMcK": 11888, + "Ġpolitically": 11889, + "awks": 11890, + "ĠBloom": 11891, + "Ġworship": 11892, + "eger": 11893, + "uter": 11894, + "ĠFO": 11895, + "Ġmathemat": 11896, + "Ġsentenced": 11897, + "Ġdisk": 11898, + "ĠMarg": 11899, + "Ġ/*": 11900, + "PI": 11901, + "Ġoptional": 11902, + "Ġbabies": 11903, + "Ġseeds": 11904, + "ĠScottish": 11905, + "Ġthy": 11906, + "]]": 11907, + "ĠHitler": 11908, + "PH": 11909, + "ngth": 11910, + "Ġrecovered": 11911, + "inge": 11912, + "Ġpowder": 11913, + "Ġlips": 11914, + "Ġdesigner": 11915, + "Ġdisorders": 11916, + "Ġcourage": 11917, + "Ġchaos": 11918, + "\"},{\"": 11919, + "Ġcarrier": 11920, + "bably": 11921, + "High": 11922, + "ĠRT": 11923, + "esity": 11924, + "len": 11925, + "Ġroutes": 11926, + "uating": 11927, + "Fil": 11928, + "NOT": 11929, + "wall": 11930, + "sburgh": 11931, + "Ġengaging": 11932, + "ĠJavaScript": 11933, + "orer": 11934, + "lihood": 11935, + "Ġunions": 11936, + "ĠFederation": 11937, + "ĠTesla": 11938, + "Ġcompletion": 11939, + "ĠTa": 11940, + "Ġprivilege": 11941, + "ĠOrange": 11942, + "Ġneur": 11943, + "parency": 11944, + "Ġbones": 11945, + "Ġtitled": 11946, + "Ġprosecutors": 11947, + "ĠME": 11948, + "Ġengineer": 11949, + "ĠUniverse": 11950, + "ĠHig": 11951, + "nie": 11952, + "oard": 11953, + "Ġhearts": 11954, + "ĠGre": 11955, + "ussion": 11956, + "Ġministry": 11957, + "Ġpenet": 11958, + "ĠNut": 11959, + "ĠOw": 11960, + "ĠXP": 11961, + "instein": 11962, + "Ġbulk": 11963, + "System": 11964, + "icism": 11965, + "ĠMarketable": 11966, + "Ġpreval": 11967, + "Ġposter": 11968, + "Ġattending": 11969, + "urable": 11970, + "Ġlicensed": 11971, + "ĠGh": 11972, + "etry": 11973, + "ĠTradable": 11974, + "Ġblast": 11975, + "à¤": 11976, + "ĠTitan": 11977, + "elled": 11978, + "die": 11979, + "Have": 11980, + "ĠFlame": 11981, + "Ġprofound": 11982, + "Ġparticipating": 11983, + "Ġanime": 11984, + "ĠEss": 11985, + "Ġspecify": 11986, + "Ġregarded": 11987, + "ĠSpell": 11988, + "Ġsons": 11989, + "owned": 11990, + "Ġmerc": 11991, + "Ġexperimental": 11992, + "lando": 11993, + "hs": 11994, + "ĠDungeon": 11995, + "inos": 11996, + "Ġcomply": 11997, + "ĠSystems": 11998, + "arth": 11999, + "Ġseized": 12000, + "local": 12001, + "ĠGirls": 12002, + "udo": 12003, + "oned": 12004, + "ĠFle": 12005, + "Ġconstructed": 12006, + "Ġhosted": 12007, + "Ġscared": 12008, + "actic": 12009, + "ĠIslands": 12010, + "ĠMORE": 12011, + "Ġbless": 12012, + "Ġblocking": 12013, + "Ġchips": 12014, + "Ġevac": 12015, + "Ps": 12016, + "Ġcorporation": 12017, + "Ġox": 12018, + "Ġlighting": 12019, + "Ġneighbors": 12020, + "ĠUb": 12021, + "aro": 12022, + "Ġbeef": 12023, + "ĠUber": 12024, + "Facebook": 12025, + "armed": 12026, + "itate": 12027, + "ĠRating": 12028, + "ĠQuick": 12029, + "Ġoccupied": 12030, + "Ġaims": 12031, + "ĠAdditionally": 12032, + "ĠInterest": 12033, + "Ġdramatically": 12034, + "Ġheal": 12035, + "Ġpainting": 12036, + "Ġengineers": 12037, + "MM": 12038, + "ĠMust": 12039, + "Ġquantity": 12040, + "Paul": 12041, + "Ġearnings": 12042, + "ĠPosts": 12043, + "stra": 12044, + "ãĥ¼ãĥ": 12045, + "Ġstance": 12046, + "Ġdropping": 12047, + "script": 12048, + "Ġdressed": 12049, + "Make": 12050, + "Ġjustify": 12051, + "ĠLtd": 12052, + "Ġprompted": 12053, + "Ġscrut": 12054, + "Ġspeeds": 12055, + "ĠGiants": 12056, + "omer": 12057, + "ĠEditor": 12058, + "Ġdescribing": 12059, + "ĠLie": 12060, + "mented": 12061, + "Ġnowhere": 12062, + "ocaly": 12063, + "Ġinstruction": 12064, + "fortable": 12065, + "Ġentities": 12066, + "Ġcm": 12067, + "ĠNatural": 12068, + "Ġinquiry": 12069, + "Ġpressed": 12070, + "izont": 12071, + "forced": 12072, + "Ġraises": 12073, + "ĠNetflix": 12074, + "ĠSide": 12075, + "Ġouter": 12076, + "Ġamongst": 12077, + "ims": 12078, + "owski": 12079, + "Ġclimb": 12080, + "never": 12081, + "Ġcombine": 12082, + "ding": 12083, + "Ġcompr": 12084, + "Ġsignificance": 12085, + "Ġremembered": 12086, + "ĠNevada": 12087, + "ĠTel": 12088, + "ĠScar": 12089, + "ĠWarriors": 12090, + "ĠJane": 12091, + "Ġcoup": 12092, + "bas": 12093, + "Ġterminal": 12094, + ",-": 12095, + "OH": 12096, + "Ġtension": 12097, + "Ġwings": 12098, + "ĠMyster": 12099, + "����": 12100, + "ĠUnlike": 12101, + "valid": 12102, + "vironments": 12103, + "ĠAli": 12104, + "Ġnaked": 12105, + "books": 12106, + "ĠMun": 12107, + "ĠGulf": 12108, + "Ġdensity": 12109, + "Ġdimin": 12110, + "Ġdesperate": 12111, + "Ġpresidency": 12112, + "Ġ1986": 12113, + "hy": 12114, + "IND": 12115, + "Ġunlock": 12116, + "imens": 12117, + "Ġhandled": 12118, + "ĠEb": 12119, + "Ġdisappeared": 12120, + "Ġgenre": 12121, + "Ġ1988": 12122, + "Ġdetermination": 12123, + "Stream": 12124, + "iko": 12125, + "apters": 12126, + "Ġacknowledge": 12127, + "Jan": 12128, + "Ġcapitalism": 12129, + "Pat": 12130, + "Ġ2020": 12131, + "Ġpainful": 12132, + "Ġcurve": 12133, + "Ġbombs": 12134, + "storm": 12135, + "ĠMetal": 12136, + "encer": 12137, + "ĠFig": 12138, + "ĠAaron": 12139, + "anches": 12140, + "Ġinspiration": 12141, + "Ġexhaust": 12142, + "tains": 12143, + "ashi": 12144, + "Ġdescript": 12145, + "Ġritual": 12146, + "ĠChelsea": 12147, + "Ġpromotion": 12148, + "ĠHung": 12149, + "ĠWard": 12150, + "iva": 12151, + "ĠET": 12152, + "Ġtoss": 12153, + "allow": 12154, + "ĠFrancis": 12155, + "Dep": 12156, + "Ġhappiness": 12157, + "ĠGlass": 12158, + "Ġbeta": 12159, + "Ġstrengthen": 12160, + "NE": 12161, + "oa": 12162, + "Ġbuttons": 12163, + "ĠMurray": 12164, + "Ġkicked": 12165, + "Quest": 12166, + "ĠTalk": 12167, + "ĠSeveral": 12168, + "ĠZero": 12169, + "Ġdrone": 12170, + "ulk": 12171, + "Ġcam": 12172, + "ĠMobile": 12173, + "Ġpreventing": 12174, + "Ġretro": 12175, + "ĠAx": 12176, + "Ġcruel": 12177, + "Ġfloat": 12178, + ".),": 12179, + "Ġfiling": 12180, + "ĠGrant": 12181, + "ĠBor": 12182, + "Ġrib": 12183, + "Ġchampionship": 12184, + "ĠMerc": 12185, + "Ġstyles": 12186, + "Ġcake": 12187, + "Ġbuilds": 12188, + "ĠSelf": 12189, + "iox": 12190, + "Ġepic": 12191, + "oyd": 12192, + "Bel": 12193, + "ĠStew": 12194, + ".(": 12195, + "ahu": 12196, + "ĠBeyond": 12197, + "Ġouts": 12198, + "Ġsolo": 12199, + "ĠTree": 12200, + "Ġpreserve": 12201, + "Ġtub": 12202, + "ARE": 12203, + "roc": 12204, + "ĠImpro": 12205, + "ĠWright": 12206, + "Ġbund": 12207, + "Ġtraged": 12208, + "Ġoccasional": 12209, + "bian": 12210, + "Second": 12211, + "rons": 12212, + "Ġinteractions": 12213, + "formed": 12214, + "sing": 12215, + "Ġowns": 12216, + "Ġhockey": 12217, + "General": 12218, + "Ġlogical": 12219, + "Ġexpend": 12220, + "Ġescal": 12221, + "ĠGriff": 12222, + "ĠCrown": 12223, + "ĠReserve": 12224, + "Ġstopping": 12225, + "Ġexcuse": 12226, + "second": 12227, + "Ġoperated": 12228, + "Ġreaches": 12229, + "ĠMalays": 12230, + "Ġpollution": 12231, + "ĠBrooklyn": 12232, + "Ġdelete": 12233, + "Ġhash": 12234, + "Block": 12235, + "aha": 12236, + "âĢ³": 12237, + "Ġshorter": 12238, + "piece": 12239, + ">>>": 13163, + "ĠMormon": 13164, + "tor": 13165, + "Ġparticles": 13166, + "ĠBart": 13167, + "ryption": 13168, + "Ġadmin": 13169, + "Ġsquee": 13170, + "VIDIA": 13171, + "Ġcreator": 13172, + "iameter": 13173, + "icular": 13174, + "NBC": 13175, + "Ġgrabbed": 13176, + "Ġnodd": 13177, + "Ġrated": 13178, + "Ġrotation": 13179, + "Ġgrasp": 13180, + "Ġexcessive": 13181, + "ĠEC": 13182, + "ĠWhit": 13183, + "Ġinventory": 13184, + "aults": 13185, + "ĠFB": 13186, + "Ġecosystem": 13187, + "Ġbillions": 13188, + "Ġventure": 13189, + "named": 13190, + "Ġdefender": 13191, + "oute": 13192, + "Instead": 13193, + "irable": 13194, + "War": 13195, + "Ġassumption": 13196, + "Ġbite": 13197, + "Ġearthqu": 13198, + "tail": 13199, + "space": 13200, + "Ġgifts": 13201, + "boys": 13202, + "Ġinevitable": 13203, + "Ġstructural": 13204, + "Ġbeneficial": 13205, + "Ġcompelling": 13206, + "hole": 13207, + "ervation": 13208, + "Ġcoat": 13209, + "oj": 13210, + "incarn": 13211, + "ĠYears": 13212, + "Ġdetermining": 13213, + "Ġrhetoric": 13214, + "Ġboundaries": 13215, + "Ġwhites": 13216, + "Ant": 13217, + "addy": 13218, + ")-": 13219, + "raham": 13220, + "etermin": 13221, + "Ġharvest": 13222, + "ĠConc": 13223, + "Ġlaptop": 13224, + "ĠMatch": 13225, + "Ġenjoying": 13226, + "cca": 13227, + "ollar": 13228, + "Ġtrips": 13229, + "Ġaddiction": 13230, + "ĠSak": 13231, + "Ġpowered": 13232, + "Ġcous": 13233, + "ĠRussians": 13234, + "iere": 13235, + "Ġretrie": 13236, + "quality": 13237, + "Ġdiffer": 13238, + "Ġkingdom": 13239, + "ĠLaur": 13240, + "ĠCapitol": 13241, + "Ġconclusions": 13242, + "ĠAltern": 13243, + "ĠNav": 13244, + "Ġtransparent": 13245, + "BER": 13246, + "Group": 13247, + "ĠComplete": 13248, + "Ġinfer": 13249, + "Ġintrig": 13250, + "Ġinsane": 13251, + "RO": 13252, + "ophob": 13253, + "isen": 13254, + "qual": 13255, + "Michael": 13256, + "Ġmuseum": 13257, + "ĠPope": 13258, + "Ġreset": 13259, + "rative": 13260, + "five": 13261, + "Ġaggreg": 13262, + "ittees": 13263, + "ository": 13264, + "Ġcarb": 13265, + "ĠRecord": 13266, + "Ġdecides": 13267, + "ĠFix": 13268, + "Ġexceptions": 13269, + "ĠCommissioner": 13270, + "uns": 13271, + "ĠEnvironmental": 13272, + "Ġlegendary": 13273, + "istence": 13274, + "Ġtunnel": 13275, + "km": 13276, + "Ġinsult": 13277, + "Ġtroll": 13278, + "Ġshake": 13279, + "Ġdetention": 13280, + "ques": 13281, + "ĠChrome": 13282, + "ĠFiles": 13283, + "Ġsubt": 13284, + "Ġprospects": 13285, + "Ġprol": 13286, + "render": 13287, + "proof": 13288, + "Ġperformances": 13289, + "Str": 13290, + "Ġhref": 13291, + "ername": 13292, + "Ġachievement": 13293, + "Ġfut": 13294, + "Full": 13295, + "ĠLeban": 13296, + "google": 13297, + "ãĥĪ": 13298, + "ampa": 13299, + "Maybe": 13300, + "Ġprojected": 13301, + "ĠEmb": 13302, + "Ġcolleg": 13303, + "Ġawards": 13304, + "ĠâĶ": 13305, + "Gold": 13306, + "ĠBlake": 13307, + "ĠRaj": 13308, + "ifting": 13309, + "Ġpending": 13310, + "Ġinstinct": 13311, + "Ġdevelopments": 13312, + "Connect": 13313, + "ĠMand": 13314, + "ĠWITH": 13315, + "ĠPhilippines": 13316, + "profile": 13317, + "Ġaltogether": 13318, + "ĠBund": 13319, + "ĠTD": 13320, + "oooo": 13321, + "amped": 13322, + "iph": 13323, + "Ġsteam": 13324, + "Ġoldest": 13325, + "Ġdetection": 13326, + "ulpt": 13327, + "Ġç": 13328, + "ĠWayne": 13329, + "2006": 13330, + "fa": 13331, + "Ġcircles": 13332, + "ĠFu": 13333, + "Ġdonors": 13334, + "appropriate": 13335, + "ĠDakota": 13336, + "jamin": 13337, + "Ġmotivated": 13338, + "Ġpurchases": 13339, + "ĠLouisiana": 13340, + "ĠSpl": 13341, + "Ġglobe": 13342, + "Ġ105": 13343, + "zip": 13344, + "call": 13345, + "Ġdepartments": 13346, + "Ġsustainable": 13347, + "105": 13348, + "ĠOP": 13349, + "ifiers": 13350, + "Ġprevented": 13351, + "Ġincomp": 13352, + "ĠCommander": 13353, + "Ġdominated": 13354, + "Ġ»": 13355, + "Ġinvested": 13356, + "Ġcomplexity": 13357, + "Ġincl": 13358, + "Ġensuring": 13359, + "Ġrealm": 13360, + "ync": 13361, + "ĠIndependent": 13362, + "rained": 13363, + "ĠJen": 13364, + "ĠFlight": 13365, + "Ġathe": 13366, + "Ġspeculation": 13367, + "ĠTE": 13368, + "ocate": 13369, + "tic": 13370, + "Ġplaint": 13371, + "herry": 13372, + "Ġtoy": 13373, + "Ġ111": 13374, + "Ġplates": 13375, + "status": 13376, + "ĠIsa": 13377, + "Ġdevoted": 13378, + "Cop": 13379, + "ĠES": 13380, + "255": 13381, + "urrency": 13382, + "Main": 13383, + "Ġslaves": 13384, + "Ġpepper": 13385, + "Ġquotes": 13386, + "Ġceiling": 13387, + "ĠFish": 13388, + "Ġtransformation": 13389, + "Ġfraction": 13390, + "Ġadvantages": 13391, + "Ġtoile": 13392, + "Ġstunning": 13393, + "Ġmoist": 13394, + "breaking": 13395, + "si": 13396, + "ĠLocation": 13397, + "ĠMedium": 13398, + "Ġtexts": 13399, + "Ġugly": 13400, + "Ġbio": 13401, + ".âĢĶ": 13402, + "ĠBased": 13403, + "Ġtrains": 13404, + "ĠWing": 13405, + "ĠAncient": 13406, + "ĠRecords": 13407, + "ĠHope": 13408, + "Special": 13409, + "adesh": 13410, + "obi": 13411, + "[/": 13412, + "Ġtemporarily": 13413, + "Ver": 13414, + "hu": 13415, + "oser": 13416, + "Ġovernight": 13417, + "Ġmamm": 13418, + "ĠTreasury": 13419, + "ĠVenezuel": 13420, + "ĠMega": 13421, + "Ġtar": 13422, + "Ġexpects": 13423, + "black": 13424, + "orph": 13425, + "\\\\\\\\": 13426, + "Ġacceptance": 13427, + "Ġradar": 13428, + "sis": 13429, + "Ġjunior": 13430, + "Ġframes": 13431, + "Ġobservation": 13432, + "acies": 13433, + "Power": 13434, + "ĠAdvanced": 13435, + "Mag": 13436, + "ologically": 13437, + "ĠMechan": 13438, + "Ġsentences": 13439, + "Ġanalysts": 13440, + "aughters": 13441, + "forcement": 13442, + "Ġvague": 13443, + "Ġclause": 13444, + "Ġdirectors": 13445, + "Ġevaluate": 13446, + "Ġcabinet": 13447, + "Matt": 13448, + "ĠClassic": 13449, + "Ang": 13450, + "Ġcler": 13451, + "ĠBuck": 13452, + "Ġresearcher": 13453, + "Ġ160": 13454, + "Ġpoorly": 13455, + "Ġexperiencing": 13456, + "ĠPed": 13457, + "ĠManhattan": 13458, + "Ġfreed": 13459, + "Ġthemes": 13460, + "advant": 13461, + "Ġnin": 13462, + "Ġpraise": 13463, + "104": 13464, + "ĠLibya": 13465, + "best": 13466, + "Ġtrusted": 13467, + "Ġcease": 13468, + "Ġdign": 13469, + "Direct": 13470, + "Ġbombing": 13471, + "Ġmigration": 13472, + "ĠSciences": 13473, + "Ġmunicipal": 13474, + "ĠAverage": 13475, + "Ġglory": 13476, + "Ġrevealing": 13477, + "Ġarena": 13478, + "Ġuncertainty": 13479, + "Ġbattlefield": 13480, + "iao": 13481, + "God": 13482, + "Ġcinem": 13483, + "rape": 13484, + "elle": 13485, + "apons": 13486, + "Ġlisting": 13487, + "Ġwaited": 13488, + "Ġspotted": 13489, + "keley": 13490, + "ĠAudio": 13491, + "eor": 13492, + "arding": 13493, + "idding": 13494, + "igma": 13495, + "ĠNeg": 13496, + "Ġlone": 13497, + "Ġ----": 13498, + "exe": 13499, + "deg": 13500, + "Ġtransf": 13501, + "Ġwash": 13502, + "Ġslavery": 13503, + "Ġexploring": 13504, + "ĠWW": 13505, + "atson": 13506, + "Ġencl": 13507, + "lies": 13508, + "ĠCreek": 13509, + "Ġwooden": 13510, + "Manager": 13511, + "ĠBrand": 13512, + "ummy": 13513, + "ĠArthur": 13514, + "Ġbureaucr": 13515, + "Ġblend": 13516, + "arians": 13517, + "Further": 13518, + "Ġsupposedly": 13519, + "Ġwinds": 13520, + "Ġ1979": 13521, + "Ġgravity": 13522, + "Ġanalyses": 13523, + "ĠTravel": 13524, + "ĠVeter": 13525, + "Ġdumb": 13526, + "Ġalternate": 13527, + "gal": 13528, + "Ġconsumed": 13529, + "Ġeffectiveness": 13530, + ".''": 13531, + "Ġpaths": 13532, + "onda": 13533, + "LA": 13534, + "ĠStrong": 13535, + "Ġenables": 13536, + "Ġescaped": 13537, + "Ġ\"\"": 13538, + "Ġ112": 13539, + "Ġ1983": 13540, + "Ġsmiled": 13541, + "Ġtendency": 13542, + "Fire": 13543, + "Ġpars": 13544, + "ĠRoc": 13545, + "Ġlake": 13546, + "Ġfitness": 13547, + "ĠAth": 13548, + "ĠHorn": 13549, + "Ġhier": 13550, + "Ġimpose": 13551, + "mother": 13552, + "Ġpension": 13553, + "icut": 13554, + "borne": 13555, + "iciary": 13556, + "._": 13557, + "ĠSU": 13558, + "Ġpolar": 13559, + "isy": 13560, + "engu": 13561, + "itialized": 13562, + "ATA": 13563, + "write": 13564, + "Ġexercises": 13565, + "ĠDiamond": 13566, + "otypes": 13567, + "Ġharmful": 13568, + "onz": 13569, + "Ġprinting": 13570, + "story": 13571, + "Ġexpertise": 13572, + "ĠGer": 13573, + "Ġtragedy": 13574, + "ĠFly": 13575, + "Ġdivid": 13576, + "ampire": 13577, + "stock": 13578, + "Mem": 13579, + "Ġreign": 13580, + "Ġunve": 13581, + "Ġamend": 13582, + "ĠProphet": 13583, + "Ġmutual": 13584, + "ĠFac": 13585, + "Ġreplacing": 13586, + "Har": 13587, + "ĠCircuit": 13588, + "Ġthroat": 13589, + "ĠShot": 13590, + "Ġbatteries": 13591, + "Ġtoll": 13592, + "Ġaddressing": 13593, + "ĠMedicaid": 13594, + "Ġpupp": 13595, + "ĠNar": 13596, + "olk": 13597, + "Ġequity": 13598, + "MR": 13599, + "ĠHispan": 13600, + "ĠLarge": 13601, + "mid": 13602, + "Dev": 13603, + "Ġexped": 13604, + "Ġdemo": 13605, + "ĠMarshall": 13606, + "ergus": 13607, + "Ġfiber": 13608, + "Ġdivorce": 13609, + "ĠCreate": 13610, + "Ġslower": 13611, + "ĠParker": 13612, + "ĠStudent": 13613, + "ĠTraining": 13614, + "Return": 13615, + "ĠTru": 13616, + "Ġcub": 13617, + "ĠReached": 13618, + "Ġpanic": 13619, + "Ġquarters": 13620, + "Ġrect": 13621, + "Ġtreating": 13622, + "Ġrats": 13623, + "ĠChristianity": 13624, + "oler": 13625, + "Ġsacred": 13626, + "Ġdeclare": 13627, + "ulative": 13628, + "eting": 13629, + "Ġdelivering": 13630, + "estone": 13631, + "Ġtel": 13632, + "ĠLarry": 13633, + "Ġmeta": 13634, + "accept": 13635, + "artz": 13636, + "ĠRoger": 13637, + "handed": 13638, + "Ġheader": 13639, + "Ġtrapped": 13640, + "ĠCentury": 13641, + "Ġknocked": 13642, + "ĠOxford": 13643, + "Ġsurvivors": 13644, + "bot": 13645, + "Ġdemonstration": 13646, + "Ġdirt": 13647, + "Ġassists": 13648, + "OME": 13649, + "ĠDraft": 13650, + "ortunate": 13651, + "folio": 13652, + "pered": 13653, + "usters": 13654, + "gt": 13655, + "ĠLock": 13656, + "Ġjudicial": 13657, + "verted": 13658, + "Ġsecured": 13659, + "outing": 13660, + "ĠBooks": 13661, + "Ġhosting": 13662, + "Ġlifted": 13663, + "length": 13664, + "Ġjer": 13665, + "Ġwheels": 13666, + "ĠRange": 13667, + "umbnails": 13668, + "Ġdiagnosis": 13669, + "tech": 13670, + "ĠStewart": 13671, + "ĠPract": 13672, + "Ġnationwide": 13673, + "Ġdear": 13674, + "Ġobligations": 13675, + "Ġgrows": 13676, + "Ġmandatory": 13677, + "Ġsuspicious": 13678, + "!'": 13679, + "Apr": 13680, + "Great": 13681, + "Ġmortgage": 13682, + "Ġprosecutor": 13683, + "Ġeditorial": 13684, + "ĠKr": 13685, + "Ġprocessed": 13686, + "ungle": 13687, + "Ġflexibility": 13688, + "Earlier": 13689, + "ĠCart": 13690, + "ĠSug": 13691, + "Ġfocuses": 13692, + "Ġstartup": 13693, + "Ġbreach": 13694, + "ĠTob": 13695, + "cycle": 13696, + "ãĢĮ": 13697, + "rose": 13698, + "Ġbizarre": 13699, + "ãĢį": 13700, + "Ġvegetables": 13701, + "$$": 13702, + "Ġretreat": 13703, + "oshi": 13704, + "ĠShop": 13705, + "ĠGround": 13706, + "ĠStop": 13707, + "ĠHawaii": 13708, + "ĠAy": 13709, + "Perhaps": 13710, + "ĠBeaut": 13711, + "uffer": 13712, + "enna": 13713, + "Ġproductivity": 13714, + "Fixed": 13715, + "control": 13716, + "Ġabsent": 13717, + "ĠCampaign": 13718, + "Green": 13719, + "Ġidentifying": 13720, + "Ġregret": 13721, + "Ġpromoted": 13722, + "ĠSeven": 13723, + "Ġeru": 13724, + "neath": 13725, + "aughed": 13726, + "ĠPin": 13727, + "ĠLiving": 13728, + "Cost": 13729, + "omatic": 13730, + "mega": 13731, + "ĠNig": 13732, + "ocy": 13733, + "Ġinbox": 13734, + "Ġempire": 13735, + "Ġhorizont": 13736, + "Ġbranches": 13737, + "Ġmetaph": 13738, + "Active": 13739, + "edi": 13740, + "ĠFilm": 13741, + "ĠSomething": 13742, + "Ġmods": 13743, + "incial": 13744, + "ĠOriginal": 13745, + "Gen": 13746, + "Ġspirits": 13747, + "Ġearning": 13748, + "Hist": 13749, + "Ġriders": 13750, + "Ġsacrific": 13751, + "MT": 13752, + "ĠVA": 13753, + "ĠSalt": 13754, + "Ġoccupation": 13755, + "ĠMi": 13756, + "Ġdisg": 13757, + "lict": 13758, + "Ġnit": 13759, + "Ġnodes": 13760, + "eem": 13761, + "ĠPier": 13762, + "Ġhatred": 13763, + "psy": 13764, + "ãĥī": 13765, + "Ġtheater": 13766, + "Ġsophisticated": 13767, + "Ġdefended": 13768, + "Ġbesides": 13769, + "Ġthoroughly": 13770, + "ĠMedicare": 13771, + "Ġblamed": 13772, + "arently": 13773, + "Ġcrying": 13774, + "FOR": 13775, + "priv": 13776, + "Ġsinging": 13777, + "ĠIl": 13778, + "Ġcute": 13779, + "oided": 13780, + "olitical": 13781, + "ĠNeuro": 13782, + "å¤": 13783, + "Ġdonation": 13784, + "ĠEagles": 13785, + "ĠGive": 13786, + "Tom": 13787, + "Ġsubstantially": 13788, + "ĠLicense": 13789, + "ĠJa": 13790, + "Ġgrey": 13791, + "ĠAnimal": 13792, + "ĠER": 13793, + "ĠUnd": 13794, + "Ġkeen": 13795, + "Ġconclude": 13796, + "ĠMississippi": 13797, + "Engine": 13798, + "ĠStudios": 13799, + "Press": 13800, + "overs": 13801, + "llers": 13802, + "Ġ350": 13803, + "ĠRangers": 13804, + "Ġrou": 13805, + "erto": 13806, + "Ep": 13807, + "issa": 13808, + "ivan": 13809, + "Ġseal": 13810, + "ĠRegist": 13811, + "display": 13812, + "Ġweaken": 13813, + "uum": 13814, + "ĠCommons": 13815, + "ĠSay": 13816, + "Ġcultures": 13817, + "Ġlaughed": 13818, + "Ġslip": 13819, + "Ġtreatments": 13820, + "izable": 13821, + "mart": 13822, + "ĠRice": 13823, + "Ġbeast": 13824, + "Ġobesity": 13825, + "ĠLaure": 13826, + "iga": 13827, + "Which": 13828, + "holder": 13829, + "Ġelderly": 13830, + "Ġpays": 13831, + "Ġcomplained": 13832, + "Ġcrop": 13833, + "Ġproc": 13834, + "Ġexplosive": 13835, + "ĠFan": 13836, + "ĠArsenal": 13837, + "Author": 13838, + "eful": 13839, + "Ġmeals": 13840, + "Ġ(-": 13841, + "idays": 13842, + "Ġimagination": 13843, + "Ġannually": 13844, + "Ġms": 13845, + "asures": 13846, + "Head": 13847, + "ikh": 13848, + "matic": 13849, + "Ġboyfriend": 13850, + "ĠComputer": 13851, + "Ġbump": 13852, + "Ġsurge": 13853, + "ĠCraig": 13854, + "ĠKirk": 13855, + "Del": 13856, + "mediate": 13857, + "Ġscenarios": 13858, + "ĠMut": 13859, + "ĠStream": 13860, + "Ġcompetitors": 13861, + "ÙĦ": 13862, + "ĠStanford": 13863, + "ĠResources": 13864, + "azed": 13865, + "bage": 13866, + "Ġorganis": 13867, + "ĠRelease": 13868, + "Ġseparately": 13869, + "Ġhabits": 13870, + "Ġmeasurements": 13871, + "ĠClose": 13872, + "Ġaccompany": 13873, + "Ġgly": 13874, + "Ġtang": 13875, + "ĠRou": 13876, + "Ġplugin": 13877, + "Ġconvey": 13878, + "ĠChallenge": 13879, + "oots": 13880, + "jan": 13881, + "Ġcurs": 13882, + "ĠRelations": 13883, + "keeper": 13884, + "Ġapproaching": 13885, + "ping": 13886, + "Speaking": 13887, + "Ġarrangement": 13888, + "ĠVI": 13889, + "arettes": 13890, + "Ġaffecting": 13891, + "Ġpermits": 13892, + "because": 13893, + "Ġuseless": 13894, + "ĠHus": 13895, + "!!!!": 13896, + "Ġdestroying": 13897, + "Unfortunately": 13898, + "Ġfascinating": 13899, + "Sem": 13900, + "Ġelectoral": 13901, + "Ġtransparency": 13902, + "ĠChaos": 13903, + "Ġvolunteer": 13904, + "Ġstatistical": 13905, + "Ġactivated": 13906, + "rox": 13907, + "Web": 13908, + "HE": 13909, + "ĠHampshire": 13910, + "isive": 13911, + "Map": 13912, + "Ġtrash": 13913, + "ĠLawrence": 13914, + "stick": 13915, + "Cr": 13916, + "Ġrings": 13917, + "EXT": 13918, + "Ġoperational": 13919, + "opes": 13920, + "Does": 13921, + "ĠEvans": 13922, + "Ġwitnessed": 13923, + "Port": 13924, + "Ġlaunching": 13925, + "econom": 13926, + "wear": 13927, + "ĠParticip": 13928, + "umm": 13929, + "cules": 13930, + "ĠRAM": 13931, + "ĠTun": 13932, + "Ġassured": 13933, + "Ġbinary": 13934, + "Ġbetray": 13935, + "Ġexploration": 13936, + "ĠFel": 13937, + "Ġadmission": 13938, + "itated": 13939, + "Sy": 13940, + "Ġavoided": 13941, + "ĠSimulator": 13942, + "Ġcelebrated": 13943, + "ĠElectric": 13944, + "¥ŀ": 13945, + "Ġcluster": 13946, + "itzerland": 13947, + "health": 13948, + "Line": 13949, + "ĠNash": 13950, + "aton": 13951, + "Ġspare": 13952, + "Ġenterprise": 13953, + "ĠDIS": 13954, + "cludes": 13955, + "Ġflights": 13956, + "Ġregards": 13957, + "ĠÃĹ": 13958, + "half": 13959, + "Ġtrucks": 13960, + "Ġcontacts": 13961, + "Ġuncons": 13962, + "ĠClimate": 13963, + "Ġimmense": 13964, + "NEW": 13965, + "occ": 13966, + "ective": 13967, + "Ġembod": 13968, + "Ġpatrol": 13969, + "Ġbeside": 13970, + "Ġviable": 13971, + "Ġcreep": 13972, + "Ġtriggered": 13973, + "verning": 13974, + "Ġcomparable": 13975, + "ql": 13976, + "Ġgaining": 13977, + "asses": 13978, + "Ġ();": 13979, + "ĠGrey": 13980, + "ĠMLS": 13981, + "sized": 13982, + "Ġprosper": 13983, + "\"?": 13984, + "Ġpolling": 13985, + "Ġshar": 13986, + "ĠRC": 13987, + "Ġfirearm": 13988, + "orient": 13989, + "Ġfence": 13990, + "Ġvariations": 13991, + "giving": 13992, + "ĠPi": 13993, + "ospel": 13994, + "Ġpledge": 13995, + "Ġcure": 13996, + "Ġspy": 13997, + "Ġviolated": 13998, + "Ġrushed": 13999, + "Ġstroke": 14000, + "ĠBlog": 14001, + "sels": 14002, + "ĠEc": 14003, + ",''": 14004, + "Ġpale": 14005, + "ĠCollins": 14006, + "terror": 14007, + "ĠCanadians": 14008, + "Ġtune": 14009, + "Ġlaboratory": 14010, + "Ġnons": 14011, + "tarian": 14012, + "Ġdisability": 14013, + "ĠGam": 14014, + "Ġsinger": 14015, + "alg": 14016, + "ĠSenior": 14017, + "Ġtraded": 14018, + "ĠWarrior": 14019, + "Ġinfring": 14020, + "ĠFranklin": 14021, + "Ġstrain": 14022, + "ĠSwedish": 14023, + "Ġseventh": 14024, + "ĠBenn": 14025, + "ĠTell": 14026, + "Ġsyndrome": 14027, + "Ġwondered": 14028, + "iden": 14029, + "++++": 14030, + "igo": 14031, + "Ġpurple": 14032, + "Ġjournalism": 14033, + "Ġrebel": 14034, + "Ġfu": 14035, + "blog": 14036, + "Ġinvite": 14037, + "rencies": 14038, + "ĠContact": 14039, + "Israel": 14040, + "ĠContent": 14041, + "Ġcheer": 14042, + "Ġbedroom": 14043, + "ĠEngineering": 14044, + "ĠQueens": 14045, + "Ġdwell": 14046, + "ĠPlayStation": 14047, + "ĠDim": 14048, + "ĠColon": 14049, + "lr": 14050, + "Ġoperates": 14051, + "Ġmotivation": 14052, + "USA": 14053, + "astered": 14054, + "Core": 14055, + "ĠTruth": 14056, + "olo": 14057, + "OSE": 14058, + "ĠMemory": 14059, + "Ġpredec": 14060, + "Ġanarch": 14061, + "Ġ1920": 14062, + "ĠYam": 14063, + "è": 14064, + "bid": 14065, + "Ġgrateful": 14066, + "Ġexcitement": 14067, + "Ġtreasure": 14068, + "Ġlongest": 14069, + "ctive": 14070, + "Ġdeserves": 14071, + "Ġreserves": 14072, + "Ġcops": 14073, + "ĠOttawa": 14074, + "ĠEgyptian": 14075, + "anked": 14076, + "Ġartif": 14077, + "Ġhypothesis": 14078, + ":/": 14079, + "Ġpurchasing": 14080, + "Ġlovely": 14081, + "HP": 14082, + "Ġdivide": 14083, + "Ġstrictly": 14084, + "Ġquestioning": 14085, + "Ġtaxpayers": 14086, + "ĠJoy": 14087, + "Ġrolls": 14088, + "ĠHeavy": 14089, + "Ġports": 14090, + "Ġmagnetic": 14091, + "Ġinflamm": 14092, + "Ġbrush": 14093, + "tics": 14094, + "âĪĴ": 14095, + "Ġbottles": 14096, + "ppy": 14097, + "Ġpadd": 14098, + "ãĤ¯": 14099, + "million": 14100, + "Ġdevastating": 14101, + "Ġcompiled": 14102, + "Ġmedication": 14103, + "Ġtwelve": 14104, + "ĠPerry": 14105, + "Space": 14106, + "imb": 14107, + "your": 14108, + "Ġleaked": 14109, + "ĠTar": 14110, + "Ġunity": 14111, + "Ġinfected": 14112, + "Ġtraveled": 14113, + "IDE": 14114, + "ĠMcDonald": 14115, + "txt": 14116, + "ĠPrinc": 14117, + "Ġinterven": 14118, + "ĠTaiwan": 14119, + "ĠPow": 14120, + "Ġbearing": 14121, + "ĠThread": 14122, + "Ġzones": 14123, + "izards": 14124, + "unks": 14125, + "Chapter": 14126, + "llor": 14127, + "Ġ·": 14128, + "Ġwounds": 14129, + "Ġdiscretion": 14130, + "Ġsucceeded": 14131, + "iking": 14132, + "Ġiconic": 14133, + "Call": 14134, + "Ġscreening": 14135, + "ĠMis": 14136, + "icts": 14137, + "Ġministers": 14138, + "Ġseparation": 14139, + "Player": 14140, + "Ġbip": 14141, + "Ġbeloved": 14142, + "Ġcounting": 14143, + "ĠEye": 14144, + "around": 14145, + "inging": 14146, + "Ġtablet": 14147, + "Ġoffence": 14148, + "inance": 14149, + "have": 14150, + "ĠInfo": 14151, + "ĠNinja": 14152, + "Ġprotective": 14153, + "ĠCass": 14154, + "Mac": 14155, + "ĠQuality": 14156, + "North": 14157, + "Ġic": 14158, + "ĠCuba": 14159, + "ĠChronicle": 14160, + "ĠProperty": 14161, + "Ġfastest": 14162, + "otos": 14163, + "ĠGerm": 14164, + "OWN": 14165, + "Ġboom": 14166, + "ĠStanley": 14167, + "erguson": 14168, + "Ġclever": 14169, + "Ġenters": 14170, + "mode": 14171, + "terior": 14172, + "ĠSens": 14173, + "Ġlinear": 14174, + "ARK": 14175, + "Ġcomparing": 14176, + "Ġpurely": 14177, + "Ġsafer": 14178, + "ĠPotter": 14179, + "Ġcups": 14180, + "RT": 14181, + "Ġgluc": 14182, + "Ġattributed": 14183, + "Ġdupl": 14184, + "ĠPap": 14185, + "Ġprecious": 14186, + "Ġpa": 14187, + "ictionary": 14188, + "ĠTig": 14189, + "ĠToo": 14190, + "olutions": 14191, + "stan": 14192, + "Ġrobots": 14193, + "Ġlobb": 14194, + "Ġstatute": 14195, + "Ġprevention": 14196, + "western": 14197, + "160": 14198, + "ĠActive": 14199, + "ĠMaria": 14200, + "hal": 14201, + "None": 14202, + "ellar": 14203, + "ĠKB": 14204, + "ĠPartners": 14205, + "ĠSingle": 14206, + "ĠFollowing": 14207, + "ango": 14208, + "acious": 14209, + "Ġthou": 14210, + "Ġkg": 14211, + "Ġinfluential": 14212, + "ĠFriends": 14213, + "Sur": 14214, + "ainted": 14215, + "Ġforums": 14216, + "Ġstarter": 14217, + "Ġcitizenship": 14218, + "ĠElection": 14219, + "onge": 14220, + "otation": 14221, + "osph": 14222, + ";;;;": 14223, + "utical": 14224, + "pur": 14225, + "eren": 14226, + "Ġaccusations": 14227, + "bitious": 14228, + "abbit": 14229, + "ĠOrd": 14230, + "Posted": 14231, + "irk": 14232, + "Ġsensitivity": 14233, + "iche": 14234, + "ĠAmy": 14235, + "ĠFab": 14236, + "Ġsummit": 14237, + "Ġpedest": 14238, + "Ġrubber": 14239, + "Ġagricultural": 14240, + "Ġcancel": 14241, + "AE": 14242, + "Ġinaug": 14243, + "Ġcontam": 14244, + "Ġfirmly": 14245, + "iw": 14246, + "stage": 14247, + "ĠKan": 14248, + "Ġtier": 14249, + "Ġinvention": 14250, + "Ġtranslated": 14251, + "ĠRules": 14252, + "Box": 14253, + "Twitter": 14254, + "IDS": 14255, + "Ġpizza": 14256, + "Ġdebug": 14257, + "ĠDrop": 14258, + "vs": 14259, + "Ġhorses": 14260, + "big": 14261, + "Ġboring": 14262, + "Ġhood": 14263, + "ĠMcCain": 14264, + "atched": 14265, + "ĠBros": 14266, + "Ġskip": 14267, + "Ġessay": 14268, + "stat": 14269, + "ĠLegends": 14270, + "Ġammunition": 14271, + "auc": 14272, + "Ġshooter": 14273, + "Ġunh": 14274, + "Ġsupplied": 14275, + "Ġgeneric": 14276, + "ĠSK": 14277, + "iban": 14278, + "yrics": 14279, + "Ġ255": 14280, + "Ġclimbing": 14281, + "Former": 14282, + "Ġflip": 14283, + "Ġjumping": 14284, + "Ġfrustration": 14285, + "ĠTerry": 14286, + "Ġneighborhoods": 14287, + "Ġmedian": 14288, + "bean": 14289, + "Ġbrains": 14290, + "Following": 14291, + "Ġshaped": 14292, + "Ġdraws": 14293, + "Ġaltered": 14294, + "Jack": 14295, + "Ġrecipes": 14296, + "Ġskilled": 14297, + "wealth": 14298, + "achi": 14299, + "election": 14300, + "Ġbehaviors": 14301, + "deals": 14302, + "ĠUntil": 14303, + "Fe": 14304, + "Ġdeclaration": 14305, + "marks": 14306, + "ĠBetween": 14307, + "celona": 14308, + "Ġreson": 14309, + "Ġbubble": 14310, + "Among": 14311, + "Ġimperial": 14312, + "GS": 14313, + "Ġfeminist": 14314, + "2005": 14315, + "ĠKyle": 14316, + "Ġaccounting": 14317, + "ĠTele": 14318, + "ĠTyr": 14319, + "Ġconnecting": 14320, + "Ġrehab": 14321, + "ĠPred": 14322, + "sim": 14323, + "Ġmeantime": 14324, + "Ġphysician": 14325, + "MW": 14326, + "ĠCampbell": 14327, + "ĠBrandon": 14328, + "Ġcontributing": 14329, + "ĠRule": 14330, + "ĠWeight": 14331, + "ĠNap": 14332, + "Ġinteractive": 14333, + "Ġvag": 14334, + "Ġhelmet": 14335, + "ĠComb": 14336, + "four": 14337, + "Ġshipped": 14338, + "Ġcompleting": 14339, + "ĠPD": 14340, + "PDATE": 14341, + "Ġspreading": 14342, + "Ġscary": 14343, + "erving": 14344, + "ĠGas": 14345, + "Ġfrank": 14346, + "school": 14347, + "Ġromantic": 14348, + "Ġstabil": 14349, + "Rob": 14350, + "Ġaccurately": 14351, + "Ġacute": 14352, + "ĠHann": 14353, + "Ġsymbols": 14354, + "Ġcivilization": 14355, + "ĠAW": 14356, + "Ġlightning": 14357, + "Ġconsiders": 14358, + "Ġvenue": 14359, + "Ġ×": 14360, + "Ġoven": 14361, + "ĠSF": 14362, + "his": 14363, + "Ġnu": 14364, + "ĠLearn": 14365, + "Ġpeoples": 14366, + "Ġstd": 14367, + "Ġslee": 14368, + "Ġslic": 14369, + "ĠStatistics": 14370, + "Ġcorners": 14371, + "ĠBaker": 14372, + "Ġ:)": 14373, + "mentation": 14374, + "olver": 14375, + "Ġlaughing": 14376, + "ĠTodd": 14377, + "onde": 14378, + "ĠHills": 14379, + "Ġnuts": 14380, + "ĠWoman": 14381, + "plane": 14382, + "Ġliver": 14383, + "ĠInside": 14384, + "Sorry": 14385, + "Ġagrees": 14386, + "Ġfundament": 14387, + "ĠFisher": 14388, + "Ġauction": 14389, + "Ġthreads": 14390, + "glas": 14391, + "ĠBasic": 14392, + "ĠNat": 14393, + "Ġlacking": 14394, + "Ġcelebration": 14395, + "ju": 14396, + "Ġsilly": 14397, + "Euro": 14398, + "Ġtatt": 14399, + "ighty": 14400, + "controlled": 14401, + "Test": 14402, + "ĠSingh": 14403, + "Ġrage": 14404, + "Ġrhyth": 14405, + "offic": 14406, + "ĠPhantom": 14407, + "Ġheadlines": 14408, + "Ġresponding": 14409, + "ĠMorning": 14410, + "Ġvitamin": 14411, + "Ġboots": 14412, + "ĠSite": 14413, + "alin": 14414, + "pi": 14415, + "Ġviral": 14416, + "ĠUC": 14417, + "DER": 14418, + "ĠSex": 14419, + "Ġstocks": 14420, + "current": 14421, + "Ġchurches": 14422, + "ĠRare": 14423, + "ĠMurphy": 14424, + "Ġdenial": 14425, + "ĠGaming": 14426, + "Ġtoug": 14427, + "Ġnick": 14428, + "Ġmakers": 14429, + "ĠRonald": 14430, + "Ġgenerous": 14431, + "ĠDoc": 14432, + "ĠMorris": 14433, + "Ġtransformed": 14434, + "ĠNormal": 14435, + "Ġ104": 14436, + "ĠKickstarter": 14437, + "ĠUpon": 14438, + "Online": 14439, + "ĠIRS": 14440, + "Ġwrap": 14441, + "Ġloving": 14442, + "Ġarrives": 14443, + "ĠDue": 14444, + "Ġheter": 14445, + "ĠMade": 14446, + "Ġrental": 14447, + "Ġbelongs": 14448, + "Ġattorneys": 14449, + "Ġcrops": 14450, + "Ġmatched": 14451, + "ulum": 14452, + "oline": 14453, + "109": 14454, + "Ġdispar": 14455, + "Ġbuyers": 14456, + "ĠCambridge": 14457, + "Ġethics": 14458, + "roups": 14459, + "Ġjustified": 14460, + "Ġmarginal": 14461, + "Ġrespected": 14462, + "winning": 14463, + "Ġnodded": 14464, + "ĠSerge": 14465, + "ĠFormer": 14466, + "Craft": 14467, + "################": 14468, + "ĠWarner": 14469, + "Ġdash": 14470, + "ete": 14471, + "Ġentert": 14472, + "ĠEscape": 14473, + "outheast": 14474, + "Ġknees": 14475, + "ĠBomb": 14476, + "Ġrug": 14477, + "Pass": 14478, + "Ġattitudes": 14479, + "government": 14480, + "ĠPrior": 14481, + "Ġqualities": 14482, + "Ġnotification": 14483, + "ĠPhone": 14484, + "lie": 14485, + "Ġanticipated": 14486, + "ĠCombat": 14487, + "ĠBarry": 14488, + "Ġ1982": 14489, + "Users": 14490, + "oner": 14491, + "Ġcomputing": 14492, + "ĠConnecticut": 14493, + "Ġlesser": 14494, + "Ġpeers": 14495, + "ĠCu": 14496, + "Ġtechnically": 14497, + "Ġsubmission": 14498, + "ĠUniversal": 14499, + "Ġmanually": 14500, + "ourge": 14501, + "Ġrespondents": 14502, + "ĠBTC": 14503, + "ĠHost": 14504, + "Ġfare": 14505, + "ĠBird": 14506, + "Ġreceipt": 14507, + "also": 14508, + "Ġjack": 14509, + "Ġagriculture": 14510, + "Ġskull": 14511, + "Ġ!=": 14512, + "Ġpassive": 14513, + "ĠCI": 14514, + "Ġsocieties": 14515, + "Ġreminded": 14516, + "Ġinterference": 14517, + "Buy": 14518, + "Ġâľ": 14519, + "gon": 14520, + "Ġscrutiny": 14521, + "ĠWitch": 14522, + "Ġconducting": 14523, + "Ġãĥ": 14524, + "Ġexchanges": 14525, + "ĠMitchell": 14526, + "Ġinhabit": 14527, + "Ġtwist": 14528, + "BD": 14529, + "Ġwherever": 14530, + "groupon": 14531, + "Ġjokes": 14532, + "ĠBenjamin": 14533, + "ĠRandom": 14534, + "frame": 14535, + "ĠLions": 14536, + "Ġhighlighted": 14537, + "ĠArkansas": 14538, + "Ent": 14539, + "Ġpile": 14540, + "Ġprelim": 14541, + "gs": 14542, + "minded": 14543, + "Ġfelony": 14544, + "ĠGA": 14545, + "ĠLuck": 14546, + "Ġpractically": 14547, + "ĠBos": 14548, + "Ġactress": 14549, + "Dam": 14550, + "ĠBou": 14551, + "Ġvisa": 14552, + "Ġembedded": 14553, + "Ġhybrid": 14554, + "Ġearliest": 14555, + "Ġsooner": 14556, + "social": 14557, + "ĠHA": 14558, + "Ġsteep": 14559, + "Ġdisadvant": 14560, + "Ġexploit": 14561, + "ĠEgg": 14562, + "ĠUltra": 14563, + "Ġnecessity": 14564, + "Local": 14565, + "iege": 14566, + "Ġdated": 14567, + "Ġmasses": 14568, + "Ġsubscription": 14569, + "pless": 14570, + "Ġanonym": 14571, + "Ġpresumably": 14572, + "Blue": 14573, + "Their": 14574, + "asketball": 14575, + "ĠPhilip": 14576, + "Ġcomed": 14577, + "loaded": 14578, + "rane": 14579, + "Ġreflection": 14580, + "China": 14581, + "Ġextends": 14582, + "Ġforming": 14583, + "Ġunders": 14584, + "2001": 14585, + "Ġgrat": 14586, + "Ġconcentrations": 14587, + "Ġinsulin": 14588, + "Ġsecular": 14589, + "Ġwhilst": 14590, + "Ġwinners": 14591, + "Advertisements": 14592, + "Ġdeliberately": 14593, + "ĠWorking": 14594, + "Ġsink": 14595, + "etics": 14596, + "dale": 14597, + "Ġmandate": 14598, + "Ġgram": 14599, + "Ġvacation": 14600, + "Ġwarnings": 14601, + "ripp": 14602, + "ĠTHAT": 14603, + "Ġcommentary": 14604, + "Ġintu": 14605, + "Ġaest": 14606, + "Ġreasoning": 14607, + "Ġbreakdown": 14608, + "ĠZombie": 14609, + "Ġ-->": 14610, + "ĠPolitical": 14611, + "cott": 14612, + "Ġthrust": 14613, + "Ġtechnological": 14614, + "Ġdeciding": 14615, + "Ġtrafficking": 14616, + "Long": 14617, + "Welcome": 14618, + "prising": 14619, + "ĠCommunications": 14620, + "Ġendors": 14621, + "Ġswift": 14622, + "Ġmetabol": 14623, + "coins": 14624, + "resa": 14625, + "ĠHTTP": 14626, + "Ġenroll": 14627, + "ĠHappy": 14628, + "usr": 14629, + "intage": 14630, + "Ġ[\"": 14631, + "uably": 14632, + "ĠMaterial": 14633, + "Ġrepeal": 14634, + "Sept": 14635, + "kh": 14636, + "ĠModi": 14637, + "Ġunderneath": 14638, + "ĠIL": 14639, + "shore": 14640, + "Ġdiagnosed": 14641, + "aceutical": 14642, + "Ġshower": 14643, + "aux": 14644, + "ĠSwitch": 14645, + "ĠStrength": 14646, + "Ġjihad": 14647, + "national": 14648, + "Ġtrauma": 14649, + "ussy": 14650, + "oni": 14651, + "Ġconsolid": 14652, + "Ġcalories": 14653, + "ĠFlynn": 14654, + "agged": 14655, + "168": 14656, + "ĠPink": 14657, + "Ġfulfill": 14658, + "Ġchains": 14659, + "Ġnotably": 14660, + "ĠAV": 14661, + "Life": 14662, + "ĠChuck": 14663, + "mus": 14664, + "ĠUrban": 14665, + "ĠHend": 14666, + "Ġdeposit": 14667, + "ĠSad": 14668, + "Ġaffair": 14669, + "ORK": 14670, + "ieval": 14671, + "ĠFDA": 14672, + "Ġtrop": 14673, + "ĠOverall": 14674, + "Ġvirtue": 14675, + "Ġsatisfaction": 14676, + "aund": 14677, + "Ġlun": 14678, + "ĠSwitzerland": 14679, + "ĠOperation": 14680, + "process": 14681, + "Ġshook": 14682, + "Ġcounties": 14683, + "leased": 14684, + "ĠCharlotte": 14685, + "112": 14686, + "Ġtranscript": 14687, + "Ġredd": 14688, + "push": 14689, + "ĠHey": 14690, + "ĠAnalysis": 14691, + "[\"": 14692, + "Ġalternatives": 14693, + "ardless": 14694, + "Ġeleph": 14695, + "Ġprejud": 14696, + "ĠLeaf": 14697, + "Having": 14698, + "ĠHub": 14699, + "Ġexpressions": 14700, + "ĠVolume": 14701, + "Ġshocking": 14702, + "ĠReds": 14703, + "Ġreadily": 14704, + "Ġplanets": 14705, + "adata": 14706, + "Ġcollapsed": 14707, + "ĠMadrid": 14708, + "Ġirrit": 14709, + "ipper": 14710, + "ĠEnc": 14711, + "ĠWire": 14712, + "Ġbuzz": 14713, + "ĠGP": 14714, + "asha": 14715, + "Ġaccidentally": 14716, + "uru": 14717, + "Ġfrustrated": 14718, + "ĠSA": 14719, + "Ġhungry": 14720, + "ĠHuff": 14721, + "Ġlabels": 14722, + "anto": 14723, + "ĠEP": 14724, + "Ġbarriers": 14725, + ")|": 14726, + "ĠBerkeley": 14727, + "ĠJets": 14728, + "Ġpairs": 14729, + "ĠLan": 14730, + "James": 14731, + "ĠBear": 14732, + "Ġhumor": 14733, + "ĠLiberty": 14734, + "Ġmagnitude": 14735, + "Ġaging": 14736, + "ĠMason": 14737, + "Ġfriendship": 14738, + "umbling": 14739, + "Ġemerge": 14740, + "Ġnewspapers": 14741, + "Ġambitious": 14742, + "ĠRichards": 14743, + "aternal": 14744, + "Ġ1981": 14745, + "Ġcookies": 14746, + "Ġsculpt": 14747, + "Ġpursuit": 14748, + "Location": 14749, + "Ġscripts": 14750, + "pc": 14751, + "Ġarrangements": 14752, + "Ġdiameter": 14753, + "Ġloses": 14754, + "amation": 14755, + "Ġliqu": 14756, + "ĠJake": 14757, + "arette": 14758, + "Ġunderstands": 14759, + "ĠZen": 14760, + "vm": 14761, + "Ġapprove": 14762, + "Ġwip": 14763, + "Ġultra": 14764, + "Ġintend": 14765, + "ĠDI": 14766, + "ascular": 14767, + "Ġstays": 14768, + "ĠKor": 14769, + "ĠKl": 14770, + "Ġinvesting": 14771, + "La": 14772, + "Ġbelieving": 14773, + "bad": 14774, + "mouth": 14775, + "Ġtaxpayer": 14776, + "ãĥĥ": 14777, + "ĠQuebec": 14778, + "Ġlap": 14779, + "ĠSwiss": 14780, + "drop": 14781, + "Ġdrain": 14782, + "iri": 14783, + "etc": 14784, + "ften": 14785, + "ĠNex": 14786, + "Ġstraw": 14787, + "Ġscreaming": 14788, + "Ġcounted": 14789, + "Ġdamaging": 14790, + "Ġambassador": 14791, + "century": 14792, + "Ġprox": 14793, + "Ġarrests": 14794, + "uv": 14795, + "ilateral": 14796, + "ĠCharg": 14797, + "Ġprescribed": 14798, + "Ġindependently": 14799, + "Ġfierce": 14800, + "ĠBaby": 14801, + "Ġbrave": 14802, + "Ġsuits": 14803, + "=>": 14804, + "Ġbaseline": 14805, + "ĠRate": 14806, + "Ġislands": 14807, + "Ġ((": 14808, + "green": 14809, + "ixels": 14810, + "Ġnamely": 14811, + "ĠVillage": 14812, + "than": 14813, + "amy": 14814, + "Version": 14815, + "gmail": 14816, + "entials": 14817, + "ĠSud": 14818, + "ĠMelbourne": 14819, + "Ġarriving": 14820, + "Ġquantum": 14821, + "eff": 14822, + "ropolitan": 14823, + "Tri": 14824, + "Ġfuneral": 14825, + "ĠIR": 14826, + "ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ": 14827, + "ĠCob": 14828, + "itably": 14829, + "Ġturb": 14830, + "Ġcombo": 14831, + "Review": 14832, + "Ġdeployment": 14833, + "uity": 14834, + "ĠBott": 14835, + "Ġinvisible": 14836, + "Ġrendering": 14837, + "Ġunlocked": 14838, + "Ġaqu": 14839, + "ĠVladimir": 14840, + "Ġpad": 14841, + "ĠBrain": 14842, + "ĠLegacy": 14843, + "dragon": 14844, + "ĠKurdish": 14845, + "Ġsounded": 14846, + "Ġdetained": 14847, + "ĠDM": 14848, + "gary": 14849, + "Ġdaughters": 14850, + "Ġdisturbing": 14851, + "uka": 14852, + "ĠParad": 14853, + "Ġtast": 14854, + "Ġunfortunate": 14855, + "Ġul": 14856, + "emin": 14857, + "Ġattendance": 14858, + "trl": 14859, + "Ġparks": 14860, + "ĠMemorial": 14861, + "ĠAlice": 14862, + "othy": 14863, + "guard": 14864, + "ĠDise": 14865, + "ĠShan": 14866, + "ĠForum": 14867, + "Rich": 14868, + "Ġshifted": 14869, + "uez": 14870, + "Ġlighter": 14871, + "ĠMagn": 14872, + "Ġcod": 14873, + "Sch": 14874, + "hammad": 14875, + "Pub": 14876, + "350": 14877, + "ĠPokemon": 14878, + "Ġprototype": 14879, + "Ġunre": 14880, + "Base": 14881, + "ĠStudents": 14882, + "ĠReply": 14883, + "ĠCommunist": 14884, + "Ġgau": 14885, + "ĠTyler": 14886, + "IZ": 14887, + "Ġparticipated": 14888, + "Ġsuprem": 14889, + "ĠDetails": 14890, + "Ġvessels": 14891, + "rod": 14892, + "Ġtribe": 14893, + "keep": 14894, + "Ġassumptions": 14895, + "Ġpound": 14896, + "Ġcrude": 14897, + "ĠAvailable": 14898, + "Ġswimming": 14899, + "Ġinclusion": 14900, + "Ġadvances": 14901, + "culation": 14902, + "Ġconservation": 14903, + "Ġoverd": 14904, + "ĠBuffalo": 14905, + "Article": 14906, + "edge": 14907, + "Ġawa": 14908, + "ĠMadison": 14909, + "Ġsidew": 14910, + "Ġcatast": 14911, + "ĠKrist": 14912, + "ucle": 14913, + "ĠHighway": 14914, + "ĠTerror": 14915, + "Ġactivation": 14916, + "Ġunconscious": 14917, + "ĠSatan": 14918, + "ĠSusan": 14919, + "illery": 14920, + "Ġarranged": 14921, + "iop": 14922, + "Ġrumors": 14923, + "urring": 14924, + "think": 14925, + "ĠKeith": 14926, + "ĠKind": 14927, + "Ġavoiding": 14928, + "byn": 14929, + "nut": 14930, + "ĠSpeaker": 14931, + "rus": 14932, + "names": 14933, + "Ġguilt": 14934, + "ĠOlympics": 14935, + "Ġsail": 14936, + "ĠMes": 14937, + "levant": 14938, + "ĠColumbus": 14939, + "aft": 14940, + "City": 14941, + "South": 14942, + "ĠHarvey": 14943, + "ĠPun": 14944, + "Several": 14945, + "Ġmentally": 14946, + "Ġimpress": 14947, + "mount": 14948, + "ĠUbuntu": 14949, + "âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ": 14950, + "ĠSuperman": 14951, + "ĠMPs": 14952, + "Ġintentions": 14953, + "ĠRacing": 14954, + "Ġlikelihood": 14955, + "Ġ240": 14956, + "Total": 14957, + "Ġtoys": 14958, + "ĠWatson": 14959, + "Ġurge": 14960, + "Lear": 14961, + "ĠPaper": 14962, + "Ġoccurring": 14963, + "ĠBeng": 14964, + "ĠCert": 14965, + "Ġstones": 14966, + "Tim": 14967, + "ĠTwin": 14968, + "zb": 14969, + "ĠDynam": 14970, + "Ġpolitician": 14971, + "kens": 14972, + "ĠEnterprise": 14973, + "UTERS": 14974, + "Ġabol": 14975, + "Ġrefresh": 14976, + "Ġarbitrary": 14977, + "pection": 14978, + "Ġtroubles": 14979, + "Ġ});": 14980, + "tv": 14981, + "Ġpilots": 14982, + "Ġdistribute": 14983, + "Ġaudit": 14984, + "Ġpause": 14985, + "original": 14986, + "Ġrivals": 14987, + "£": 14988, + "Fig": 14989, + "TL": 14990, + "abil": 14991, + "rying": 14992, + "Lin": 14993, + "ioned": 14994, + "lon": 14995, + "Ġfancy": 14996, + "Ġcrashed": 14997, + "Ġtract": 14998, + "Ġshed": 14999, + "Ġconsume": 15000, + "Based": 15001, + "download": 15002, + "init": 15003, + "Ġvoltage": 15004, + "Introdu": 15005, + "Ġcondemned": 15006, + "ĠFinance": 15007, + "respect": 15008, + "Ġexcluded": 15009, + "Ġestablishing": 15010, + "heric": 15011, + "Ġheritage": 15012, + "Ġspectacular": 15013, + "Ġunst": 15014, + "ĠSnowden": 15015, + "ĠLane": 15016, + "San": 15017, + "Ġprotections": 15018, + "struction": 15019, + "incinn": 15020, + "Ġmacro": 15021, + "Custom": 15022, + "iosity": 15023, + "Ġesp": 15024, + "Ġfunctioning": 15025, + "Ġmush": 15026, + "Ġpuzzle": 15027, + "Ġethical": 15028, + "Mal": 15029, + "Ġgoverning": 15030, + "ĠFerguson": 15031, + "Ġrestored": 15032, + "Ġstressed": 15033, + "ĠCounter": 15034, + "ĠKas": 15035, + "clip": 15036, + "ANS": 15037, + "Ġseiz": 15038, + "UK": 15039, + "byss": 15040, + "oldown": 15041, + "api": 15042, + "Ġpermanently": 15043, + "ounters": 15044, + "West": 15045, + "Through": 15046, + "Light": 15047, + "atoes": 15048, + "Ġneat": 15049, + "Ġcord": 15050, + "urer": 15051, + "Ġseverely": 15052, + "ĠAven": 15053, + "Ġinterrog": 15054, + "Ġtriple": 15055, + "Given": 15056, + "Number": 15057, + "Ġarise": 15058, + "Ġsher": 15059, + "plant": 15060, + "Ġflower": 15061, + "ĠCou": 15062, + "Ġate": 15063, + "Ġnewer": 15064, + "bul": 15065, + "Ġmeanwhile": 15066, + "ĠLair": 15067, + "Ġadjustment": 15068, + "ĠCopyright": 15069, + "Ġdivers": 15070, + "iological": 15071, + "Ġgamers": 15072, + "oat": 15073, + "Ġhistorically": 15074, + "Ġanalog": 15075, + "Ġlongtime": 15076, + "Ġprescription": 15077, + "ĠMist": 15078, + "ĠHyper": 15079, + "ĠMaine": 15080, + "ĠDeity": 15081, + "Ġmultipl": 15082, + "ĠReincarn": 15083, + "ĠHyd": 15084, + "ĠPic": 15085, + "Sil": 15086, + "rants": 15087, + "ĠCris": 15088, + ".;": 15089, + "({": 15090, + "ependence": 15091, + "Ġrecy": 15092, + "ateur": 15093, + "Ġquad": 15094, + "Ġglob": 15095, + "Ġconced": 15096, + "team": 15097, + "Ġcapitalist": 15098, + "ĠLot": 15099, + "Ġroyal": 15100, + "ĠCyber": 15101, + "Ġblacks": 15102, + "metic": 15103, + "riv": 15104, + "ĠDanny": 15105, + "Ġspo": 15106, + "ĠRO": 15107, + "Ġanimated": 15108, + "rypted": 15109, + "ĠDeputy": 15110, + "Ġrendered": 15111, + "FE": 15112, + "Ġstreak": 15113, + "Ġclouds": 15114, + "ĠDoug": 15115, + "~~~~~~~~": 15116, + "Ġdiscour": 15117, + "ĠVeh": 15118, + "Ġpsychology": 15119, + "ĠJourney": 15120, + "Ġcrystal": 15121, + "ĠFrost": 15122, + "Ġsuspicion": 15123, + "Ġrelate": 15124, + "orus": 15125, + "ĠCrypt": 15126, + "ĠNVIDIA": 15127, + "comed": 15128, + "uting": 15129, + "incinnati": 15130, + "Ġvulnerability": 15131, + "ostic": 15132, + "Ġisolation": 15133, + "Ġcooling": 15134, + "ĠCoalition": 15135, + "Ġ119": 15136, + "Four": 15137, + "ĠDeal": 15138, + "Ġâī": 15139, + "semble": 15140, + "rament": 15141, + "ĠBarcelona": 15142, + "Ġ102": 15143, + "Ġcocaine": 15144, + "ocalypse": 15145, + "Feb": 15146, + "ogenic": 15147, + "Ġmutation": 15148, + "Ġcryptoc": 15149, + "ĠKel": 15150, + "ĠGit": 15151, + "ais": 15152, + "Ġsisters": 15153, + "ANK": 15154, + "Ġactivate": 15155, + "Ter": 15156, + "Ġdread": 15157, + "ylon": 15158, + "Ġpropri": 15159, + "Aust": 15160, + "ĠDefault": 15161, + "Ġoutdoor": 15162, + "Ġsheer": 15163, + "ceive": 15164, + "Ġgently": 15165, + "о": 15166, + "Program": 15167, + "ĠâĨĴ": 15168, + "Ġvegan": 15169, + "ĠCrus": 15170, + "Ġresponsibilities": 15171, + "ĠHR": 15172, + "OLD": 15173, + "Ġprevents": 15174, + "Ġstiff": 15175, + "ĠWere": 15176, + "Ġathletic": 15177, + "ĠScore": 15178, + "Ġ):": 15179, + "Ġcolumns": 15180, + "ĠLoc": 15181, + "available": 15182, + "ĠFram": 15183, + "ĠSessions": 15184, + "Ġcompanion": 15185, + "Ġpacks": 15186, + "140": 15187, + "ĠKnights": 15188, + "Ġfart": 15189, + "Ġstreams": 15190, + "Ġshore": 15191, + "Ġappeals": 15192, + "ĠPerformance": 15193, + "haul": 15194, + "ĠStra": 15195, + "ĠNag": 15196, + "103": 15197, + "ĠTransportation": 15198, + "BB": 15199, + "Ev": 15200, + "zan": 15201, + "Public": 15202, + "Ġtwin": 15203, + "ulsion": 15204, + "Mult": 15205, + "Ġelectro": 15206, + "Ġstatue": 15207, + "ationally": 15208, + "ĠNort": 15209, + "Ġinspection": 15210, + "/*": 15211, + "igue": 15212, + "Ġcompassion": 15213, + "ĠTales": 15214, + "ĠStein": 15215, + "ĠScreen": 15216, + "ĠBug": 15217, + "ĠLion": 15218, + "girl": 15219, + "Ġwithdrawal": 15220, + "Ġobjectives": 15221, + "Ġbloody": 15222, + "Ġpreliminary": 15223, + "Ġjacket": 15224, + "Ġdimensions": 15225, + "ĠCool": 15226, + "ĠOccup": 15227, + "Ġwreck": 15228, + "Ġdoubled": 15229, + "anking": 15230, + "Ġ1975": 15231, + "Ġglasses": 15232, + "ĠWang": 15233, + "prov": 15234, + "Path": 15235, + "connected": 15236, + "ĠMulti": 15237, + "ĠNorway": 15238, + "agonist": 15239, + "Ġfeared": 15240, + "Ġtouching": 15241, + "Ġarguably": 15242, + "¯¯¯¯¯¯¯¯": 15243, + "ĠNCAA": 15244, + "chem": 15245, + "Ġspat": 15246, + "ĠWWE": 15247, + "ĠCel": 15248, + "igger": 15249, + "Ġattacker": 15250, + "ĠJoin": 15251, + "object": 15252, + "etta": 15253, + "Ġeliminated": 15254, + "det": 15255, + "Ġdestruct": 15256, + "ĠLucas": 15257, + "ctuary": 15258, + "180": 15259, + "ĠBrady": 15260, + "ĠBlues": 15261, + "Bay": 15262, + "aukee": 15263, + "Ġtimeline": 15264, + "Ġdelegates": 15265, + "written": 15266, + "ufficient": 15267, + "Ġshapes": 15268, + "Copyright": 15269, + "ouble": 15270, + "service": 15271, + "Ġpione": 15272, + "Ġcolleges": 15273, + "Ġrows": 15274, + "Ġspite": 15275, + "Ġassessed": 15276, + "360": 15277, + "Ġlease": 15278, + "Ġconfidential": 15279, + "cker": 15280, + "ĠManning": 15281, + "ĠVoice": 15282, + "Ġsealed": 15283, + "Ġcalculate": 15284, + "NO": 15285, + "ĠAssistant": 15286, + "Ġteenager": 15287, + "ulent": 15288, + "atherine": 15289, + "Ġmock": 15290, + "Ġdiamond": 15291, + "Ġfest": 15292, + "Ġswitched": 15293, + "Ġresume": 15294, + "ĠPuerto": 15295, + "Ġlanes": 15296, + "iration": 15297, + "ĠSimilarly": 15298, + "Ġrod": 15299, + "ĠSel": 15300, + "ĠPalace": 15301, + "ĠLimited": 15302, + "eous": 15303, + "Ġvariant": 15304, + "Ġward": 15305, + "Ġ))": 15306, + "Show": 15307, + "OOK": 15308, + "Alex": 15309, + "ĠNep": 15310, + "bris": 15311, + "ĠWikipedia": 15312, + "Ġexceptional": 15313, + "Ġmanages": 15314, + "ĠDraw": 15315, + "Again": 15316, + "Ġcopper": 15317, + "utt": 15318, + "Ġexports": 15319, + "Ġportfolio": 15320, + "Ġelevated": 15321, + "Rated": 15322, + "ĠOtherwise": 15323, + "ĠTact": 15324, + "ĠShel": 15325, + "ĠTX": 15326, + "\"âĢĶ": 15327, + "Ġresur": 15328, + "ĠWa": 15329, + "venant": 15330, + "Ġmonetary": 15331, + "people": 15332, + "Email": 15333, + "Ġfifty": 15334, + "ĠSweet": 15335, + "ĠMalaysia": 15336, + "Ġconfusing": 15337, + "ĠRio": 15338, + "uda": 15339, + "utenant": 15340, + "\");": 15341, + "Ġpraised": 15342, + "Ġvolumes": 15343, + "turn": 15344, + "Ġmature": 15345, + "Ġnonprofit": 15346, + "Ġpassionate": 15347, + "ĠPrivate": 15348, + "Ġ103": 15349, + "Ġdescend": 15350, + "ç¥ŀ": 15351, + "uffy": 15352, + "headed": 15353, + "Whether": 15354, + "rien": 15355, + "zech": 15356, + "beit": 15357, + "Ġchrom": 15358, + "ĠMcM": 15359, + "Ġdancing": 15360, + "Ġeleg": 15361, + "ĠNoticed": 15362, + "115": 15363, + "Ġadvocacy": 15364, + "ENTS": 15365, + "ambling": 15366, + "ĠMinor": 15367, + "ĠFinn": 15368, + "Ġpriorities": 15369, + "Ġthereof": 15370, + "ĠStage": 15371, + "ĠRogers": 15372, + "Ġsubstitute": 15373, + "ĠJar": 15374, + "ĠJefferson": 15375, + "Ġlightly": 15376, + "102": 15377, + "ĠLisa": 15378, + "uits": 15379, + "ysical": 15380, + "Ġshifts": 15381, + "Ġdrones": 15382, + "Ġworkplace": 15383, + "Ġresid": 15384, + "ensed": 15385, + "ahn": 15386, + "Ġpreferences": 15387, + "server": 15388, + "Ġdebates": 15389, + "doc": 15390, + "ĠGods": 15391, + "Ġhelicopter": 15392, + "Ġhonour": 15393, + "Ġconsiderably": 15394, + "eded": 15395, + "ĠFemale": 15396, + "ĠAnne": 15397, + "Ġreun": 15398, + "ĠFace": 15399, + "ĠHallow": 15400, + "ĠBudget": 15401, + "Ġcondemn": 15402, + "Ġtender": 15403, + "Prof": 15404, + "ocratic": 15405, + "ĠTurner": 15406, + "ĠAgric": 15407, + "Ġ1976": 15408, + "Ġapt": 15409, + "disc": 15410, + "ĠFighter": 15411, + "ĠAur": 15412, + "Ġgarbage": 15413, + "input": 15414, + "ĠKarl": 15415, + "ĠOliver": 15416, + "ĠLanguage": 15417, + "kn": 15418, + "Non": 15419, + "ĠClar": 15420, + "Ġtraditions": 15421, + "Ġadvertisement": 15422, + "ĠSor": 15423, + "Ġarchive": 15424, + "Ġvillages": 15425, + "750": 15426, + "Ġimplementing": 15427, + "waukee": 15428, + "Ġdietary": 15429, + "Ġswitching": 15430, + "Republic": 15431, + "Ġvelocity": 15432, + "Ġcit": 15433, + "ĠAwards": 15434, + "Ġfinancing": 15435, + "Ġlasted": 15436, + ")]": 15437, + "Ġreminder": 15438, + "Person": 15439, + "Ġprecision": 15440, + "Ġdesigners": 15441, + "ĠFried": 15442, + "ĠBorder": 15443, + "Ġtragic": 15444, + "Ġwield": 15445, + "Ġinitiatives": 15446, + "ĠTank": 15447, + "wer": 15448, + "Ġjoins": 15449, + "Ro": 15450, + "inery": 15451, + "Ġarrow": 15452, + "Ġgenerating": 15453, + "founder": 15454, + "Ġsearches": 15455, + "Ġrandomly": 15456, + "Access": 15457, + "Ġbatch": 15458, + "Ġposed": 15459, + "lat": 15460, + "Ġpursuing": 15461, + "asa": 15462, + "Ġtestified": 15463, + "forming": 15464, + "ĠShar": 15465, + "wiki": 15466, + "ĠEither": 15467, + "Sometimes": 15468, + "Ġsenators": 15469, + "ĠJohnny": 15470, + "ĠTaliban": 15471, + "ĠGPS": 15472, + "\":\"/": 15473, + "ãģ®å": 15474, + "Ġanalyzed": 15475, + "ĠRubio": 15476, + "ĠMovement": 15477, + "opard": 15478, + "iii": 15479, + "Stand": 15480, + "fight": 15481, + "Ġignoring": 15482, + "iang": 15483, + "ĠGN": 15484, + "soever": 15485, + "ĠSTAT": 15486, + "Ġrefusing": 15487, + "Ġsweat": 15488, + "Ġbay": 15489, + "PORT": 15490, + "irmed": 15491, + "aky": 15492, + "Ġdispro": 15493, + "Ġlabeled": 15494, + "Ġ108": 15495, + "Hello": 15496, + "Ġpleasant": 15497, + "aba": 15498, + "Ġtriumph": 15499, + "Ġaboard": 15500, + "Ġincom": 15501, + "ĠCrow": 15502, + "lett": 15503, + "Ġfolk": 15504, + "Ġchase": 15505, + "``": 15506, + "ĠBrus": 15507, + "Ġteens": 15508, + "cue": 15509, + "Ġterrain": 15510, + "hyd": 15511, + "ilight": 15512, + "ORY": 15513, + "Support": 15514, + "ews": 15515, + "lli": 15516, + "raints": 15517, + "ĠCand": 15518, + "Ġabused": 15519, + "achment": 15520, + "larg": 15521, + "Bas": 15522, + "ĠCancer": 15523, + "Ġ1978": 15524, + "Ġsupporter": 15525, + "access": 15526, + "ĠTermin": 15527, + "ĠTampa": 15528, + "ĠANY": 15529, + "Ġnewest": 15530, + "ĠCriminal": 15531, + "edu": 15532, + "Ġ1930": 15533, + "Ġadmits": 15534, + "Ġende": 15535, + "Ġfailures": 15536, + "urate": 15537, + "fulness": 15538, + "cycl": 15539, + "ĠSubject": 15540, + "Ġinfinite": 15541, + "three": 15542, + "WA": 15543, + "pit": 15544, + "ĠInstall": 15545, + "Rad": 15546, + "iliation": 15547, + "GM": 15548, + "Ġcontinent": 15549, + "Ġaccommodate": 15550, + "ĠClay": 15551, + "Ġpup": 15552, + "ĠFunction": 15553, + "Ġhammer": 15554, + "ĠAlberta": 15555, + "Ġrevised": 15556, + "Ġminorities": 15557, + "Ġmeasurement": 15558, + "Connell": 15559, + "Ġdisable": 15560, + "ĠMix": 15561, + "Incre": 15562, + "Ġfork": 15563, + "ĠRosen": 15564, + "Ġimplies": 15565, + "umblr": 15566, + "ANG": 15567, + "Ġproteins": 15568, + "Ġaggression": 15569, + "Ġfacilitate": 15570, + "SN": 15571, + "Ġillegally": 15572, + "uer": 15573, + "Ġacadem": 15574, + "Ġpuzz": 15575, + "ĠShift": 15576, + "pay": 15577, + "ollo": 15578, + "Ġaudiences": 15579, + "Build": 15580, + "Ġnoble": 15581, + "Ġsyntax": 15582, + "âĺħ": 15583, + "Ġbeam": 15584, + "ĠBed": 15585, + "ĠAld": 15586, + "Ġorigins": 15587, + "video": 15588, + "Ġ1977": 15589, + "ĠAssault": 15590, + "Ġgarage": 15591, + "Team": 15592, + "Ġverdict": 15593, + "Ġdwar": 15594, + "ĠVirtual": 15595, + "event": 15596, + "Keep": 15597, + "Ġsentiment": 15598, + "Ġwildlife": 15599, + "shirt": 15600, + "Ġburg": 15601, + "Ġrecommendation": 15602, + "represent": 15603, + "Ġgallery": 15604, + "owners": 15605, + "Ġscholar": 15606, + "Ġconvenience": 15607, + "ĠSwift": 15608, + "Ġconvinc": 15609, + "Cap": 15610, + "Ġwarfare": 15611, + "ĠVisual": 15612, + "Ġconstitute": 15613, + "Ġabort": 15614, + "ĠWeather": 15615, + "ĠLooking": 15616, + "ĠHem": 15617, + "Ġmartial": 15618, + "Ġincoming": 15619, + "etition": 15620, + "Ġtolerance": 15621, + "ĠCreated": 15622, + "Ġflows": 15623, + "ĠElder": 15624, + "Ġsouls": 15625, + "Ġfoul": 15626, + "ĠPain": 15627, + "ĠCAN": 15628, + "Ġ220": 15629, + "bc": 15630, + "hend": 15631, + "Ġgenius": 15632, + "Real": 15633, + "ĠWr": 15634, + "ometer": 15635, + "pad": 15636, + "Ġlimiting": 15637, + "ĠSi": 15638, + "ĠLore": 15639, + "ĠAdventures": 15640, + "Ġvaried": 15641, + "Disc": 15642, + "fin": 15643, + "ĠPersonal": 15644, + "Chris": 15645, + "Ġinvented": 15646, + "Ġdive": 15647, + "ĠRise": 15648, + "Ġoz": 15649, + "ĠComics": 15650, + "Ġexpose": 15651, + "ĠReb": 15652, + "letters": 15653, + "site": 15654, + "imated": 15655, + "Ġhacking": 15656, + "Ġeducated": 15657, + "ĠNobody": 15658, + "Ġdepri": 15659, + "Ġincentive": 15660, + "ãĤ·": 15661, + "Ġoversight": 15662, + "Ġtribes": 15663, + "ĠBelgium": 15664, + "Ġlicensing": 15665, + "ourt": 15666, + "Product": 15667, + "ahl": 15668, + "ĠGem": 15669, + "Ġspecialist": 15670, + "Ġcra": 15671, + "anners": 15672, + "ĠCorbyn": 15673, + "Ġ1973": 15674, + "READ": 15675, + "Ġsummar": 15676, + "Ġoverlook": 15677, + "ĠApplication": 15678, + "Ġinappropriate": 15679, + "Ġdownloaded": 15680, + "Que": 15681, + "ĠBears": 15682, + "Ġthumb": 15683, + "ĠCharacter": 15684, + "ĠReincarnated": 15685, + "ĠSid": 15686, + "Ġdemonstrates": 15687, + "sky": 15688, + "ĠBloomberg": 15689, + "ĠArray": 15690, + "ĠResults": 15691, + "ĠFourth": 15692, + "ĠEDT": 15693, + "ĠOscar": 15694, + "cend": 15695, + "Ġ106": 15696, + "ĠNULL": 15697, + "ĠHERE": 15698, + "match": 15699, + "ĠBrun": 15700, + "Ġglucose": 15701, + "ieg": 15702, + "egu": 15703, + "Ġcertified": 15704, + "Ġrelie": 15705, + "Ġhumanitarian": 15706, + "Ġprayers": 15707, + "King": 15708, + "Ġnan": 15709, + "hou": 15710, + "108": 15711, + "ulu": 15712, + "Ġrenewable": 15713, + "Ġdistinguish": 15714, + "Ġdense": 15715, + "ĠVent": 15716, + "ĠPackage": 15717, + "ĠBoss": 15718, + "Ġeditors": 15719, + "Ġmigr": 15720, + "Tra": 15721, + "ĠPeters": 15722, + "ĠArctic": 15723, + "2004": 15724, + "ĠCape": 15725, + "Ġlocally": 15726, + "Ġlasting": 15727, + "Ġhandy": 15728, + ".).": 15729, + "Pan": 15730, + "ĠRES": 15731, + "Index": 15732, + "Ġtensions": 15733, + "Ġformerly": 15734, + "Ġideological": 15735, + "Ġsensors": 15736, + "Ġdealers": 15737, + "Ġdefines": 15738, + "Sk": 15739, + "Ġproceeds": 15740, + "Ġproxy": 15741, + "azines": 15742, + "ĠBash": 15743, + "ĠPad": 15744, + "ĠCraft": 15745, + "ealous": 15746, + "Ġsheets": 15747, + "ometry": 15748, + "June": 15749, + "clock": 15750, + "TT": 15751, + "ĠTheatre": 15752, + "ĠBuzz": 15753, + "Ġchapters": 15754, + "Ġmillenn": 15755, + "Ġdough": 15756, + "ĠCongressional": 15757, + "Ġimagined": 15758, + "avior": 15759, + "Ġclinic": 15760, + "Ġ1945": 15761, + "Ġholder": 15762, + "root": 15763, + "olester": 15764, + "Ġrestart": 15765, + "BN": 15766, + "ĠHamas": 15767, + "ĠJob": 15768, + "Ġorb": 15769, + "Ġram": 15770, + "Ġdisclose": 15771, + "Ġtranslate": 15772, + "Ġimmigrant": 15773, + "Ġannoying": 15774, + "Ġtreaty": 15775, + "anium": 15776, + "ĠTea": 15777, + "ĠLegion": 15778, + "Ġcrowds": 15779, + "ĠBec": 15780, + "ĠAer": 15781, + "ohyd": 15782, + "Bro": 15783, + "Looking": 15784, + "Ġlbs": 15785, + "Ġaggress": 15786, + "Ġseam": 15787, + "Ġintercept": 15788, + "ĠMI": 15789, + "mercial": 15790, + "activ": 15791, + "ĠCit": 15792, + "Ġdimension": 15793, + "Ġconsistency": 15794, + "Ġrushing": 15795, + "ĠDouglas": 15796, + "Ġtrim": 15797, + "Install": 15798, + "icker": 15799, + "Ġshy": 15800, + "106": 15801, + "Ġmentions": 15802, + "pelled": 15803, + "ĠTak": 15804, + "cost": 15805, + "Ġclassroom": 15806, + "Ġfortune": 15807, + "driven": 15808, + "Ġunle": 15809, + "ĠWheel": 15810, + "Ġinvestor": 15811, + "ĠMasters": 15812, + "kit": 15813, + "Ġassociations": 15814, + "ĠEvolution": 15815, + "oping": 15816, + "uscript": 15817, + "Ġprovincial": 15818, + "ĠWalter": 15819, + "avi": 15820, + "SO": 15821, + "Ġunlimited": 15822, + "English": 15823, + "ĠCards": 15824, + "ĠEbola": 15825, + "nered": 15826, + "Ġrevenge": 15827, + "Ġoutright": 15828, + "umper": 15829, + "Ġfitting": 15830, + "ĠSolid": 15831, + "Ġformally": 15832, + "Ġproblematic": 15833, + "Ġhazard": 15834, + "Ġencryption": 15835, + "Ġstraightforward": 15836, + "ĠAK": 15837, + "Ġpse": 15838, + "ĠOrb": 15839, + "ĠChamber": 15840, + "ĠMak": 15841, + "Contents": 15842, + "Ġloyalty": 15843, + "Ġlyrics": 15844, + "ĠSym": 15845, + "Ġwelcomed": 15846, + "Ġcooked": 15847, + "Ġmonop": 15848, + "Ġnurse": 15849, + "Ġmisleading": 15850, + "Ġeternal": 15851, + "Ġshifting": 15852, + "Ġ+=": 15853, + "Vis": 15854, + "Ġinstitutional": 15855, + "illary": 15856, + "Ġpant": 15857, + "VERT": 15858, + "ĠACC": 15859, + "ĠEnh": 15860, + "Ġincon": 15861, + "ĠREUTERS": 15862, + "Ġdonated": 15863, + "âĢ¦âĢ¦âĢ¦âĢ¦": 15864, + "Intern": 15865, + "Ġexhibit": 15866, + "Ġtire": 15867, + "ĠRic": 15868, + "ĠChampion": 15869, + "ĠMuhammad": 15870, + "NING": 15871, + "ĠSoccer": 15872, + "Ġmobility": 15873, + "Ġvarying": 15874, + "ĠMovie": 15875, + "Ġlord": 15876, + "oak": 15877, + "Field": 15878, + "Ġvector": 15879, + "usions": 15880, + "Ġscrap": 15881, + "Ġenabling": 15882, + "make": 15883, + "Tor": 15884, + ".*": 15885, + "||": 15886, + "ĠWebsite": 15887, + "ĠNPC": 15888, + "Ġsocialist": 15889, + "ĠBilly": 15890, + "ĠAdditional": 15891, + "Ġcargo": 15892, + "Ġfarms": 15893, + "ĠSoon": 15894, + "ĠPrize": 15895, + "Ġmidnight": 15896, + "Ġ900": 15897, + "seen": 15898, + "ĠSpot": 15899, + "Ġsheep": 15900, + "Ġsponsored": 15901, + "ĠHi": 15902, + "ĠJump": 15903, + "Ġ1967": 15904, + "Microsoft": 15905, + "ĠAgent": 15906, + "Ġcharts": 15907, + "dir": 15908, + "Ġadjacent": 15909, + "Ġtricks": 15910, + "Ġmanga": 15911, + "Ġexagger": 15912, + "/>": 15913, + "football": 15914, + "ĠFCC": 15915, + "GC": 15916, + "ĠTier": 15917, + "andra": 15918, + "OUND": 15919, + "%),": 15920, + "Ġfruits": 15921, + "VC": 15922, + "ĠAA": 15923, + "Rober": 15924, + "Ġmidst": 15925, + "âĹ": 15926, + "anka": 15927, + "Ġlegislature": 15928, + "ĠNeil": 15929, + "Ġtourists": 15930, + "\"\"": 15931, + "ĠWarning": 15932, + "ĠNevertheless": 15933, + "ĠOfficial": 15934, + "ĠWhatever": 15935, + "Ġmold": 15936, + "Ġdrafted": 15937, + "Ġsubstances": 15938, + "Ġbreed": 15939, + "Ġtags": 15940, + "ĠTask": 15941, + "Ġverb": 15942, + "Ġmanufactured": 15943, + "comments": 15944, + "ĠPolish": 15945, + "Prov": 15946, + "Ġdetermines": 15947, + "Obama": 15948, + "kers": 15949, + "Ġutterly": 15950, + "Ġsect": 15951, + "sche": 15952, + "ĠGates": 15953, + "ĠChap": 15954, + "Ġaluminum": 15955, + "Ġzombie": 15956, + "ĠTouch": 15957, + "ĠUP": 15958, + "Ġsatisfy": 15959, + "Ġpredomin": 15960, + "ascript": 15961, + "Ġelaborate": 15962, + "Ġ1968": 15963, + "Ġmeasuring": 15964, + "ĠVari": 15965, + "anyahu": 15966, + "Ġsir": 15967, + "ulates": 15968, + "idges": 15969, + "ickets": 15970, + "ĠSpencer": 15971, + "TM": 15972, + "oubted": 15973, + "Ġprey": 15974, + "Ġinstalling": 15975, + "ĠCab": 15976, + "reed": 15977, + "reated": 15978, + "Supp": 15979, + "Ġwrist": 15980, + "ĠKerry": 15981, + "107": 15982, + "ĠKle": 15983, + "ĠRachel": 15984, + "Ġcotton": 15985, + "ĠARE": 15986, + "ĠEle": 15987, + "Control": 15988, + "Ġloads": 15989, + "ĠDod": 15990, + "anas": 15991, + "bone": 15992, + "Ġclassical": 15993, + "ĠRegional": 15994, + "ĠInteg": 15995, + "VM": 15996, + "Ġdesires": 15997, + "Ġautism": 15998, + "supported": 15999, + "ĠMessage": 16000, + "Ġcompact": 16001, + "writer": 16002, + "Ġ109": 16003, + "ĠHurricane": 16004, + "cision": 16005, + "Ġcycles": 16006, + "Ġdrill": 16007, + "Ġcolleague": 16008, + "Ġmaker": 16009, + "German": 16010, + "Ġmistaken": 16011, + "Sun": 16012, + "ĠGay": 16013, + "Ġwhatsoever": 16014, + "Ġsells": 16015, + "ĠAirl": 16016, + "liv": 16017, + "ĠOption": 16018, + "Ġsolved": 16019, + "Ġsectors": 16020, + "Ġhorizontal": 16021, + "Ġequation": 16022, + "ĠSkill": 16023, + "ĠBio": 16024, + "gement": 16025, + "ĠSnap": 16026, + "ĠLegal": 16027, + "Ġtrademark": 16028, + "Ġmakeup": 16029, + "Ġassembled": 16030, + "Ġsaves": 16031, + "ĠHalloween": 16032, + "ĠVermont": 16033, + "ĠFROM": 16034, + "Ġfarming": 16035, + "ĠPodcast": 16036, + "acceptable": 16037, + "ĠHigher": 16038, + "Ġasleep": 16039, + "ullivan": 16040, + "Ġreferen": 16041, + "ĠLev": 16042, + "Ġbullets": 16043, + "oko": 16044, + "HC": 16045, + "Ġstairs": 16046, + "Ġmaintains": 16047, + "ĠLower": 16048, + "ĠVi": 16049, + "Ġmarine": 16050, + "Ġacres": 16051, + "Ġcoordinator": 16052, + "ĠJoh": 16053, + "Ġcounterparts": 16054, + "ĠBrothers": 16055, + "Ġindict": 16056, + "bra": 16057, + "Ġchunk": 16058, + "Ġcents": 16059, + "Home": 16060, + "ĠMonth": 16061, + "Ġaccordingly": 16062, + "ifles": 16063, + "ĠGermans": 16064, + "ĠSyn": 16065, + "Hub": 16066, + "Ġeyeb": 16067, + "âĶĢâĶĢâĶĢâĶĢ": 16068, + "Ġranges": 16069, + "ĠHolland": 16070, + "ĠRobot": 16071, + "fc": 16072, + "Mike": 16073, + "Ġplasma": 16074, + "Ġswap": 16075, + "Ġathlete": 16076, + "ĠRams": 16077, + ",'\"": 16078, + "Ġinfections": 16079, + "Ġcorrid": 16080, + "Ġvib": 16081, + "Ġpatches": 16082, + "Ġtraditionally": 16083, + "Ġrevelation": 16084, + "Ġsweep": 16085, + "Ġglance": 16086, + "Ġinex": 16087, + "2003": 16088, + "ĠRaw": 16089, + "working": 16090, + "osures": 16091, + "ĠDat": 16092, + "ĠLynch": 16093, + "Ġleverage": 16094, + "ĠReid": 16095, + "Ġcorrelation": 16096, + "iances": 16097, + "avascript": 16098, + "Ġrepository": 16099, + "retty": 16100, + "Ġ1972": 16101, + "240": 16102, + "Ġoun": 16103, + "pol": 16104, + "ĠReed": 16105, + "Ġtactical": 16106, + "isite": 16107, + "Apple": 16108, + "ĠQuinn": 16109, + "Ġraped": 16110, + "illo": 16111, + "Europe": 16112, + "Ġalgorithms": 16113, + "ĠRodrig": 16114, + "iu": 16115, + "Ġillum": 16116, + "Ġfame": 16117, + "Ġintroducing": 16118, + "Ġdelays": 16119, + "ĠRaiders": 16120, + "Ġwhistle": 16121, + "Ġnovels": 16122, + "ĠReally": 16123, + "Ġderiv": 16124, + "Ġpublications": 16125, + "ĠNeither": 16126, + "ĠCommerce": 16127, + "Ġaston": 16128, + "language": 16129, + "Notes": 16130, + "ĠRoth": 16131, + "ĠFear": 16132, + "Ġmate": 16133, + "Ġparade": 16134, + "ĠQB": 16135, + "Ġmaneu": 16136, + "ĠCincinnati": 16137, + "mitting": 16138, + "Ġwaist": 16139, + "ĠRew": 16140, + "Ġdiscont": 16141, + "а": 16142, + "Ġstaring": 16143, + "Ġalias": 16144, + "Ġsecurities": 16145, + "Ġtoilet": 16146, + "ĠJedi": 16147, + "Ġunlaw": 16148, + "vised": 16149, + "////////": 16150, + "](": 16151, + "ĠWeiss": 16152, + "Ġprest": 16153, + "ĠCompan": 16154, + "Ġmemo": 16155, + "ĠGrace": 16156, + "July": 16157, + "ĠElite": 16158, + "center": 16159, + "ĠStay": 16160, + "Ġgalaxy": 16161, + "Ġtooth": 16162, + "ĠSettings": 16163, + "Ġsubjected": 16164, + "ãĤ¦": 16165, + "Ġlineback": 16166, + "Ġretailers": 16167, + "ĠWant": 16168, + "Ġdangers": 16169, + "Air": 16170, + "Ġvoluntary": 16171, + "eway": 16172, + "Ġinterpreted": 16173, + "otine": 16174, + "ç": 16175, + "Ġpel": 16176, + "Service": 16177, + "ĠEventually": 16178, + "Ġcareers": 16179, + "Ġthreaten": 16180, + "Ġmemor": 16181, + "ĠBradley": 16182, + "ancies": 16183, + "sn": 16184, + "ĠUnknown": 16185, + "National": 16186, + "Ġshadows": 16187, + "ailand": 16188, + "ĠDash": 16189, + "Everyone": 16190, + "izzard": 16191, + "March": 16192, + "=(": 16193, + "Ġpulls": 16194, + "Ġstranger": 16195, + "Ġbackwards": 16196, + "ĠBernard": 16197, + "imensional": 16198, + "Ġchron": 16199, + "Ġtheoretical": 16200, + "ktop": 16201, + "Ġware": 16202, + "ĠInvestig": 16203, + "ĠIniti": 16204, + "ĠOperations": 16205, + "oven": 16206, + "ocide": 16207, + "*/": 16208, + "Ġflames": 16209, + "ĠCash": 16210, + "shit": 16211, + "Ġcab": 16212, + "ĠAnaly": 16213, + "ĠSeah": 16214, + "Ġdefining": 16215, + "Ġordering": 16216, + "Ġimmun": 16217, + "Ġpersistent": 16218, + "ACH": 16219, + "Russian": 16220, + "mans": 16221, + "Ġhind": 16222, + "Ġphotography": 16223, + "©": 16224, + "Ġhug": 16225, + "Ġ107": 16226, + "ĠHence": 16227, + "iots": 16228, + "udeau": 16229, + "Ġsubsidies": 16230, + "Ġroutinely": 16231, + "ĠDevice": 16232, + "itic": 16233, + "Ġdisgust": 16234, + "lander": 16235, + "Ġ1940": 16236, + "Ġassignment": 16237, + "ĠBesides": 16238, + "wick": 16239, + "ĠDust": 16240, + "usc": 16241, + "structed": 16242, + "111": 16243, + "develop": 16244, + "Ġfond": 16245, + "Ġintersection": 16246, + "Ġdignity": 16247, + "Ġcommissioner": 16248, + "Without": 16249, + "reach": 16250, + "Ġcartoon": 16251, + "Ġscales": 16252, + "ãĥŃ": 16253, + "FIG": 16254, + "Ġsurveys": 16255, + "ĠIndonesia": 16256, + "Ġartwork": 16257, + "Ġunch": 16258, + "Ġcycling": 16259, + "unct": 16260, + "auer": 16261, + "orate": 16262, + "ĠObviously": 16263, + "Ġcharacterized": 16264, + "feld": 16265, + "Ġaffirm": 16266, + "Ġinnings": 16267, + "Ġé": 16268, + "Ġaliens": 16269, + "Ġcloth": 16270, + "etooth": 16271, + "ĠCertain": 16272, + "§": 16273, + "Ġdigest": 16274, + "know": 16275, + "ĠXL": 16276, + "Ġpredictions": 16277, + "Ġdin": 16278, + "WAR": 16279, + "Ġaftermath": 16280, + "Example": 16281, + "ĠSuccess": 16282, + "ĠThr": 16283, + "IGN": 16284, + "Ġminer": 16285, + "Bus": 16286, + "Ġclarity": 16287, + "heimer": 16288, + "ĠOUT": 16289, + "ĠSend": 16290, + "ĠCircle": 16291, + "ĠDiet": 16292, + "Ġpronounced": 16293, + "Ġcreators": 16294, + "Ġearthquake": 16295, + "attery": 16296, + "geons": 16297, + "Ġod": 16298, + "Ġlaying": 16299, + "orp": 16300, + "Ult": 16301, + "project": 16302, + "Ġundermin": 16303, + "Ġsequel": 16304, + "Sam": 16305, + "ĠDarkness": 16306, + "Ġreception": 16307, + "bull": 16308, + "YS": 16309, + "ĠVir": 16310, + "Ġsequences": 16311, + "ĠCoin": 16312, + "Ġoutfit": 16313, + "ĠWait": 16314, + "119": 16315, + "Ġdelivers": 16316, + "......": 16317, + "Ġblown": 16318, + "ĠEsc": 16319, + "ĠMath": 16320, + "perm": 16321, + "ĠUl": 16322, + "Ġglim": 16323, + "Ġfacial": 16324, + "Ġgreenhouse": 16325, + "Ġtokens": 16326, + "/-": 16327, + "ĠAnnual": 16328, + "ĠONE": 16329, + "Ġteenage": 16330, + "ĠPhysical": 16331, + "ĠLang": 16332, + "ĠCelt": 16333, + "Ġsued": 16334, + "ividually": 16335, + "Ġpatience": 16336, + "chair": 16337, + "regular": 16338, + "Ġaug": 16339, + "inv": 16340, + "except": 16341, + "ĠLil": 16342, + "Ġnest": 16343, + "fd": 16344, + "sum": 16345, + "ĠChase": 16346, + "Russia": 16347, + "ĠJennifer": 16348, + "Ġoffseason": 16349, + "Overall": 16350, + "Fore": 16351, + "Ġriot": 16352, + "Aud": 16353, + "former": 16354, + "Ġdefenders": 16355, + "ĠCT": 16356, + "iotic": 16357, + "ribly": 16358, + "Ġautomated": 16359, + "Ġpenis": 16360, + "Ġinsist": 16361, + "Ġdiagram": 16362, + "ĠSQL": 16363, + "ĠGarc": 16364, + "Ġwitch": 16365, + "client": 16366, + "ierra": 16367, + "ambers": 16368, + "Ġrecount": 16369, + "far": 16370, + "Very": 16371, + "osterone": 16372, + "Ġappreciated": 16373, + "ĠPerfect": 16374, + "Section": 16375, + "Ġdoses": 16376, + "ocaust": 16377, + "Ġcostly": 16378, + "Ġgrams": 16379, + "ĠShi": 16380, + "Ġwrestling": 16381, + "Ġ1971": 16382, + "Ġtrophy": 16383, + "Ġnerve": 16384, + "ĠKaz": 16385, + "ĠExperience": 16386, + "Ġpledged": 16387, + "Ġplayback": 16388, + "Ġcreativity": 16389, + "bye": 16390, + "Ġattackers": 16391, + "Ġholders": 16392, + "ĠCoach": 16393, + "ĠPhD": 16394, + "Ġtransfers": 16395, + "Ġcolored": 16396, + "ĠHindu": 16397, + "Ġdrown": 16398, + "Ġlistened": 16399, + "ĠWA": 16400, + "iasm": 16401, + "PO": 16402, + "Ġappealing": 16403, + "Ġdisclosed": 16404, + "ĠChicken": 16405, + "agging": 16406, + "Ġpleaded": 16407, + "Ġnavigation": 16408, + "ĠReturns": 16409, + "Ġ[[": 16410, + "ROR": 16411, + "EA": 16412, + "Ġphotographer": 16413, + "ĠRider": 16414, + "ippers": 16415, + "Ġslice": 16416, + "Ġerect": 16417, + "Ġhed": 16418, + "issance": 16419, + "ĠVikings": 16420, + "urious": 16421, + "Ġappet": 16422, + "oubtedly": 16423, + "Child": 16424, + "Ġauthentic": 16425, + "oos": 16426, + "ĠMaking": 16427, + "Ġannouncing": 16428, + "Ġbod": 16429, + "Ġmeter": 16430, + "ĠNine": 16431, + "ĠRogue": 16432, + "Ġworkforce": 16433, + "Ġrenewed": 16434, + "Ġorganisations": 16435, + "acs": 16436, + "PLE": 16437, + "Short": 16438, + "Ġcompounds": 16439, + "ĠVisit": 16440, + "Ġenvelop": 16441, + "earth": 16442, + "Ġsupportive": 16443, + "ggle": 16444, + "ĠBrussels": 16445, + "ĠGuild": 16446, + "Create": 16447, + "REL": 16448, + "Ġaveraged": 16449, + "Ġ1969": 16450, + "riages": 16451, + "Ġlengthy": 16452, + "Ġforgot": 16453, + "Okay": 16454, + "ĠErd": 16455, + "Ġdealer": 16456, + "Ġrecession": 16457, + "DD": 16458, + "Ġdesperately": 16459, + "Ġhunger": 16460, + "Ġsticks": 16461, + "Ġmph": 16462, + "ĠFaith": 16463, + "Ġintentionally": 16464, + "Ġdemol": 16465, + "ueller": 16466, + "ĠSale": 16467, + "Ġdebris": 16468, + "spring": 16469, + "Ġleap": 16470, + ">>>>": 16471, + "Ġcontainers": 16472, + "selling": 16473, + "ranean": 16474, + "attering": 16475, + "Ġcommented": 16476, + "ĠCM": 16477, + "onut": 16478, + "Ġwoods": 16479, + "especially": 16480, + "Ġorganize": 16481, + "ivic": 16482, + "ĠWoods": 16483, + "anga": 16484, + "squ": 16485, + "Ġmaj": 16486, + "amon": 16487, + "Ġaxis": 16488, + "Ġ1974": 16489, + "ĠDenmark": 16490, + "Ġwarrior": 16491, + "ĠPand": 16492, + "Ġoutlined": 16493, + "ĠBO": 16494, + "insula": 16495, + "zilla": 16496, + "ebook": 16497, + "Ġdare": 16498, + "Ġsearched": 16499, + "Ġnavigate": 16500, + "Sn": 16501, + "writing": 16502, + "Ġunited": 16503, + "Japan": 16504, + "ĠHebrew": 16505, + "Ġflame": 16506, + "Ġrelies": 16507, + "Ġcatching": 16508, + "ĠSho": 16509, + "Ġimprisonment": 16510, + "Ġpockets": 16511, + "Ġclosure": 16512, + "ĠFam": 16513, + "tim": 16514, + "adequ": 16515, + "Activity": 16516, + "Ġrecruiting": 16517, + "ĠWATCH": 16518, + "ĠArgentina": 16519, + "dest": 16520, + "Ġapologize": 16521, + "oro": 16522, + "Ġlacks": 16523, + "Ġtuned": 16524, + "ĠGriffin": 16525, + "Ġinfamous": 16526, + "Ġcelebrity": 16527, + "sson": 16528, + "Ġ----------------------------------------------------------------": 16529, + "ĠIsis": 16530, + "ĠDisplay": 16531, + "Ġcredibility": 16532, + "Ġeconomies": 16533, + "Ġheadline": 16534, + "ĠCowboys": 16535, + "Ġindef": 16536, + "Ġlately": 16537, + "Ġincentives": 16538, + "button": 16539, + "ĠMob": 16540, + "Aut": 16541, + "Ġresigned": 16542, + "ĠOm": 16543, + "camp": 16544, + "Ġprofiles": 16545, + "Ġschemes": 16546, + "olphins": 16547, + "ayed": 16548, + "Clinton": 16549, + "enh": 16550, + "ĠYahoo": 16551, + "Ġabst": 16552, + "Ġank": 16553, + "suits": 16554, + "Ġwished": 16555, + "ĠMarco": 16556, + "udden": 16557, + "Ġsphere": 16558, + "ĠBishop": 16559, + "Ġincorporated": 16560, + "ĠPlant": 16561, + "114": 16562, + "Ġhated": 16563, + "pic": 16564, + "Ġdonate": 16565, + "Ġlined": 16566, + "Ġbeans": 16567, + "Ġstealing": 16568, + "Ġcostume": 16569, + "Ġsheriff": 16570, + "Ġforty": 16571, + "Ġintact": 16572, + "Ġadapted": 16573, + "Ġtravelling": 16574, + "bart": 16575, + "Ġnicely": 16576, + "Ġdried": 16577, + "Ġscal": 16578, + "osity": 16579, + "NOTE": 16580, + "ĠBh": 16581, + "ĠBroncos": 16582, + "ĠIgn": 16583, + "Ġintimate": 16584, + "Ġchemistry": 16585, + "Ġoptimal": 16586, + "Deb": 16587, + "ĠGeneration": 16588, + "Ġ],": 16589, + "ichi": 16590, + "ĠWii": 16591, + "ĠYOUR": 16592, + "ventions": 16593, + "Write": 16594, + "Ġpopul": 16595, + "unning": 16596, + "ĠWor": 16597, + "Vol": 16598, + "Ġqueen": 16599, + "heads": 16600, + "KK": 16601, + "Ġanalyze": 16602, + "opic": 16603, + "earchers": 16604, + "Ġdot": 16605, + "legraph": 16606, + "astically": 16607, + "Ġupgrades": 16608, + "Ġcares": 16609, + "Ġextending": 16610, + "Ġfreeze": 16611, + "Ġinability": 16612, + "Ġorgans": 16613, + "Ġpretend": 16614, + "Ġoutlet": 16615, + "113": 16616, + "olan": 16617, + "ĠMall": 16618, + "uling": 16619, + "talk": 16620, + "Ġexpressing": 16621, + "ĠAlways": 16622, + "ĠBegin": 16623, + "files": 16624, + "Ġlicenses": 16625, + "%%": 16626, + "ĠMitt": 16627, + "Ġfilters": 16628, + "ĠMilwaukee": 16629, + "GN": 16630, + "Ġunfold": 16631, + "Mo": 16632, + "Ġnutrition": 16633, + "ppo": 16634, + "Bo": 16635, + "Ġfounding": 16636, + "Ġundermine": 16637, + "Ġeasiest": 16638, + "ĠCzech": 16639, + "ĠMack": 16640, + "Ġsexuality": 16641, + "ĠNixon": 16642, + "Win": 16643, + "ĠArn": 16644, + "ĠKin": 16645, + "ãĤ£": 16646, + "icer": 16647, + "Ġfortun": 16648, + "Ġsurfaces": 16649, + "aghd": 16650, + "Ġcarriers": 16651, + "ĠPART": 16652, + "ĠTib": 16653, + "Ġinterval": 16654, + "Ġfrustrating": 16655, + "ĠShip": 16656, + "ĠArmed": 16657, + "ffe": 16658, + "Ġboats": 16659, + "ĠAbraham": 16660, + "inis": 16661, + "Ġsuited": 16662, + "thread": 16663, + "iov": 16664, + "abul": 16665, + "ĠVenezuela": 16666, + "Ġtom": 16667, + "super": 16668, + "Ġcastle": 16669, + "although": 16670, + "ioxide": 16671, + "eches": 16672, + "Ġevolutionary": 16673, + "Ġnegotiate": 16674, + "Ġconfronted": 16675, + "Remember": 16676, + "Ġ170": 16677, + "Such": 16678, + "Ġ911": 16679, + "mult": 16680, + "ĠAbyss": 16681, + "urry": 16682, + "kees": 16683, + "spec": 16684, + "ĠBarbara": 16685, + "Ġbelonging": 16686, + "Ġvillain": 16687, + "istani": 16688, + "Ġaccountable": 16689, + "Ġportions": 16690, + "ĠDecl": 16691, + "Ur": 16692, + "ĠKate": 16693, + "gre": 16694, + "Ġmagazines": 16695, + "UCK": 16696, + "Ġregulate": 16697, + "omon": 16698, + "ĠAlmost": 16699, + "Ġoverview": 16700, + "Ġscram": 16701, + "Ġloot": 16702, + "ĠFitz": 16703, + "Ġcharacteristic": 16704, + "ĠSnake": 16705, + "say": 16706, + "ĠRico": 16707, + "Ġtrait": 16708, + "ĠJoined": 16709, + "aucus": 16710, + "Ġadaptation": 16711, + "ĠAirlines": 16712, + "Ġarchae": 16713, + "ĠIde": 16714, + "Ġbikes": 16715, + "Ġliterary": 16716, + "Ġinfluences": 16717, + "ĠUsed": 16718, + "Creat": 16719, + "Ġplea": 16720, + "ĠDefence": 16721, + "ĠAssass": 16722, + "Ġpond": 16723, + "ULT": 16724, + ")\"": 16725, + "Ġevaluated": 16726, + "Ġobtaining": 16727, + "Ġdemographic": 16728, + "Ġvigil": 16729, + "aley": 16730, + "Ġspouse": 16731, + "ĠSeahawks": 16732, + "respons": 16733, + "ĠBelt": 16734, + "umatic": 16735, + "Ġrises": 16736, + "runner": 16737, + "ĠMichelle": 16738, + "Ġpotent": 16739, + "race": 16740, + "ĠPAC": 16741, + "Find": 16742, + "olesterol": 16743, + "ISS": 16744, + "ĠIntroduced": 16745, + "resses": 16746, + "ignment": 16747, + "Os": 16748, + "ĠTu": 16749, + "ĠDex": 16750, + "icides": 16751, + "Ġsparked": 16752, + "ĠLaura": 16753, + "ĠBryant": 16754, + "Ġsmiling": 16755, + "ĠNexus": 16756, + "Ġdefendants": 16757, + "ĠCatal": 16758, + "Ġdishes": 16759, + "shaped": 16760, + "Ġprolong": 16761, + "mt": 16762, + "($": 16763, + "ãĢĤ": 16764, + "Ġcalculations": 16765, + "ĠSame": 16766, + "Ġpiv": 16767, + "HH": 16768, + "Ġcancelled": 16769, + "Ġgrin": 16770, + "Ġterritories": 16771, + "istically": 16772, + "Come": 16773, + "ĠParent": 16774, + "Project": 16775, + "Ġneglig": 16776, + "ĠPrivacy": 16777, + "Ġammo": 16778, + "LECT": 16779, + "olutely": 16780, + "ĠEpic": 16781, + "Ġmisunder": 16782, + "wal": 16783, + "April": 16784, + "mos": 16785, + "pathy": 16786, + "ĠCarson": 16787, + "Ġalbums": 16788, + "ĠEasy": 16789, + "Ġpistol": 16790, + "<<": 16791, + "Ġ\\(": 16792, + "target": 16793, + "help": 16794, + "Ġinterpre": 16795, + "conscious": 16796, + "ĠHousing": 16797, + "ĠJoint": 16798, + "127": 16799, + "Ġbeers": 16800, + "science": 16801, + "ĠFirefox": 16802, + "effective": 16803, + "ĠCabin": 16804, + "ĠOkay": 16805, + "ĠApplic": 16806, + "Ġspacecraft": 16807, + "ĠSR": 16808, + "vet": 16809, + "ĠStrange": 16810, + "SB": 16811, + "Ġcorps": 16812, + "iberal": 16813, + "efficient": 16814, + "Ġprevalence": 16815, + "Ġeconomists": 16816, + "118": 16817, + "Thread": 16818, + "ordable": 16819, + "ODE": 16820, + "ĠCant": 16821, + "=-=-": 16822, + "ifiable": 16823, + "ĠAround": 16824, + "Ġpole": 16825, + "Ġwillingness": 16826, + "CLA": 16827, + "ĠKid": 16828, + "Ġcomplement": 16829, + "Ġscattered": 16830, + "Ġinmates": 16831, + "Ġbleeding": 16832, + "every": 16833, + "Ġqueue": 16834, + "ĠTrain": 16835, + "Ġhij": 16836, + "Ġmelee": 16837, + "pleted": 16838, + "Ġdigit": 16839, + "Ġgem": 16840, + "official": 16841, + "Ġlifting": 16842, + "е": 16843, + "Requ": 16844, + "itutes": 16845, + "Ġpackaging": 16846, + "ĠWorkers": 16847, + "hran": 16848, + "ĠLebanon": 16849, + "olesc": 16850, + "Ġpunished": 16851, + "ĠJuan": 16852, + "Ġjam": 16853, + "ĠDocument": 16854, + "Ġmapping": 16855, + "icates": 16856, + "Ġinevitably": 16857, + "Ġvanilla": 16858, + "ĠTon": 16859, + "Ġwatches": 16860, + "Ġleagues": 16861, + "Ġinitiated": 16862, + "degree": 16863, + "portion": 16864, + "Ġrecalls": 16865, + "Ġruin": 16866, + "Ġmelt": 16867, + "IAN": 16868, + "Ġhem": 16869, + "Exp": 16870, + "Ġbaking": 16871, + "ĠColomb": 16872, + "atible": 16873, + "Ġradius": 16874, + "plug": 16875, + "ĠIF": 16876, + "etically": 16877, + "Ġfict": 16878, + "HER": 16879, + "ĠTap": 16880, + "atinum": 16881, + "Ġink": 16882, + "Ġcoh": 16883, + "ĠWizard": 16884, + "both": 16885, + "tex": 16886, + "Ġspends": 16887, + "ĠCurrently": 16888, + "ĠPit": 16889, + "Ġneurons": 16890, + "ignt": 16891, + "Ġrall": 16892, + "Ġbuses": 16893, + "building": 16894, + "Ġadjustments": 16895, + "Ġcried": 16896, + "iblical": 16897, + "atted": 16898, + "ĠZion": 16899, + "ĠMatter": 16900, + "Ġmeditation": 16901, + "ĠDennis": 16902, + "Ġours": 16903, + "ĠTab": 16904, + "Ġrankings": 16905, + "ortal": 16906, + "Ġadvers": 16907, + "Ġsurrender": 16908, + "ĠGob": 16909, + "cium": 16910, + "omas": 16911, + "imeter": 16912, + "Ġmultiplayer": 16913, + "Ġheroin": 16914, + "Ġoptimistic": 16915, + "Ġindicator": 16916, + "ĠBrig": 16917, + "Ġgrocery": 16918, + "Ġapplicant": 16919, + "ĠRocket": 16920, + "vid": 16921, + "Exception": 16922, + "pent": 16923, + "Ġorganizing": 16924, + "Ġencounters": 16925, + "ĠTOD": 16926, + "Ġjewel": 16927, + "Save": 16928, + "ĠChristie": 16929, + "Ġheating": 16930, + "Ġlazy": 16931, + "ĠCP": 16932, + "Ġcousin": 16933, + "Config": 16934, + "Ġregener": 16935, + "Ġnearest": 16936, + "Ġachieving": 16937, + "ENS": 16938, + "throw": 16939, + "ĠRichmond": 16940, + "antle": 16941, + "2002": 16942, + "Ġanten": 16943, + "bird": 16944, + "133": 16945, + "Ġnarc": 16946, + "raint": 16947, + "unny": 16948, + "ĠHispanic": 16949, + "ournaments": 16950, + "Ġprophe": 16951, + "ĠThailand": 16952, + "ĠTi": 16953, + "Ġinjection": 16954, + "Ġinherit": 16955, + "ravis": 16956, + "Ġmedi": 16957, + "Ġwhoever": 16958, + "ĠDEBUG": 16959, + "GP": 16960, + "ĠHud": 16961, + "Card": 16962, + "prom": 16963, + "Ġpor": 16964, + "Ġoverhead": 16965, + "Law": 16966, + "Ġviolate": 16967, + "Ġheated": 16968, + "Ġdescriptions": 16969, + "Ġachievements": 16970, + "ĠBeer": 16971, + "ĠQuant": 16972, + "Was": 16973, + "Ġeighth": 16974, + "ĠIv": 16975, + "Ġspecialized": 16976, + "UPDATE": 16977, + "ĠDelta": 16978, + "Pop": 16979, + "Jul": 16980, + "ĠAsk": 16981, + "ophy": 16982, + "Ġnewsletters": 16983, + "ĠTool": 16984, + "Ġgard": 16985, + "ĠConfeder": 16986, + "ĠGMT": 16987, + "ĠAbbott": 16988, + "Ġimmunity": 16989, + "ĠVM": 16990, + "Islam": 16991, + "Ġimplicit": 16992, + "wd": 16993, + "Ġ1944": 16994, + "ravity": 16995, + "ometric": 16996, + "Ġsurviving": 16997, + "urai": 16998, + "ĠPrison": 16999, + "Ġrust": 17000, + "ĠSketch": 17001, + "Ġbees": 17002, + "ĠTheory": 17003, + "Ġmerit": 17004, + "Tex": 17005, + "chat": 17006, + "Ġmim": 17007, + "Ġpaste": 17008, + "ĠKoch": 17009, + "Ġignorance": 17010, + "ĠShoot": 17011, + "Ġbasement": 17012, + "United": 17013, + "ĠAdvis": 17014, + "height": 17015, + "Ġfoster": 17016, + "Ġdetain": 17017, + "information": 17018, + "Ġneural": 17019, + "';": 17020, + "Ġproves": 17021, + "allery": 17022, + "Ġinvitation": 17023, + "umbers": 17024, + "Ġcattle": 17025, + "Ġbicycle": 17026, + "zi": 17027, + "Ġconsultant": 17028, + "Ġapology": 17029, + "ĠTiger": 17030, + "Ġ123": 17031, + "999": 17032, + "Ġindividually": 17033, + "rt": 17034, + "igion": 17035, + "ĠBrazilian": 17036, + "Ġdisturb": 17037, + "Ġentrepreneurs": 17038, + "Ġforests": 17039, + "cerpt": 17040, + "plates": 17041, + "pher": 17042, + "clipse": 17043, + "Ġtwitter": 17044, + "Ġacids": 17045, + "ographical": 17046, + "hum": 17047, + "ĠBald": 17048, + "ifully": 17049, + "Ġcompiler": 17050, + "ĠDA": 17051, + "Ġdonor": 17052, + "asi": 17053, + "Ġtribal": 17054, + "lash": 17055, + "ĠConfig": 17056, + "Ġapplicants": 17057, + "Ġsalaries": 17058, + "135": 17059, + "Putin": 17060, + "ĠFocus": 17061, + "irs": 17062, + "Ġmisconduct": 17063, + "ĠHaz": 17064, + "Ġeaten": 17065, + "Mobile": 17066, + "Muslim": 17067, + "ĠMarcus": 17068, + "viol": 17069, + "Ġfavorable": 17070, + "Ġstub": 17071, + "adin": 17072, + "ĠHob": 17073, + "Ġfaithful": 17074, + "Ġelectronics": 17075, + "Ġvacuum": 17076, + "wait": 17077, + "backed": 17078, + "economic": 17079, + "dist": 17080, + "Ġtenure": 17081, + "Ġsincere": 17082, + "ĠTogether": 17083, + "ĠWave": 17084, + "Ġprogression": 17085, + "Ġdenying": 17086, + "Ġdistress": 17087, + "braska": 17088, + "third": 17089, + "Ġmixing": 17090, + "Ġcolonial": 17091, + "Ġprivately": 17092, + "Ġunrest": 17093, + "aternity": 17094, + "Ġpremises": 17095, + "anti": 17096, + "gregation": 17097, + "Ġlicence": 17098, + "ĠHind": 17099, + "ĠSamuel": 17100, + "Ġconvincing": 17101, + "ĠAce": 17102, + "ĠRust": 17103, + "ĠNetanyahu": 17104, + "Ġhandles": 17105, + "ĠPatch": 17106, + "oriented": 17107, + "aho": 17108, + "ĠGonz": 17109, + "Ġhackers": 17110, + "claimer": 17111, + "Ġcustoms": 17112, + "ĠGran": 17113, + "fighters": 17114, + "Ġluc": 17115, + "Ġmanuscript": 17116, + "arenthood": 17117, + "Ġdevil": 17118, + "Ġwarriors": 17119, + "Ġoffenders": 17120, + "William": 17121, + "Ġholidays": 17122, + "Ġnightmare": 17123, + "Ġlever": 17124, + "ifferent": 17125, + "Stat": 17126, + "Ġexhibition": 17127, + "puted": 17128, + "ĠPure": 17129, + "Ġalpha": 17130, + "Ġenthusiasm": 17131, + "ĠRepresentatives": 17132, + "EAR": 17133, + "ĠTyp": 17134, + "Ġwheat": 17135, + "ĠAlf": 17136, + "Ġcorrection": 17137, + "Ġevangel": 17138, + "ATT": 17139, + "Miss": 17140, + "Ġsoup": 17141, + "Ġimplied": 17142, + "param": 17143, + "Ġsexy": 17144, + "ĠLux": 17145, + "Ġrepublic": 17146, + "patch": 17147, + "ablish": 17148, + "Ġicons": 17149, + "Ġfathers": 17150, + "ĠGET": 17151, + "ĠCarib": 17152, + "Ġregulated": 17153, + "ĠCohen": 17154, + "ĠBobby": 17155, + "Ġner": 17156, + "Ġbent": 17157, + "ventory": 17158, + "ĠAlong": 17159, + "ĠEST": 17160, + "ĠWallace": 17161, + "Ġmurders": 17162, + "rise": 17163, + "kell": 17164, + "ĠCommonwealth": 17165, + "Ġnasty": 17166, + "eta": 17167, + "ĠMIT": 17168, + "Ġadministered": 17169, + "Ġgenuinely": 17170, + "Editor": 17171, + "nick": 17172, + "Ġhydro": 17173, + "********************************": 17174, + "ĠBle": 17175, + "Ġfines": 17176, + "Ġgorge": 17177, + "ausible": 17178, + "rh": 17179, + "Ġapple": 17180, + "mentioned": 17181, + "Ġrope": 17182, + "otyp": 17183, + "HR": 17184, + "Ġdisappointing": 17185, + "Ġcage": 17186, + "nik": 17187, + "Ġdoubts": 17188, + "ĠFREE": 17189, + "prints": 17190, + "ĠMUST": 17191, + "Ġvendors": 17192, + "ĠInqu": 17193, + "Ġliberals": 17194, + "Ġcontractor": 17195, + "Ġupside": 17196, + "children": 17197, + "Ġtricky": 17198, + "Ġregulators": 17199, + "charged": 17200, + "liter": 17201, + "Ġ***": 17202, + "Ġrebell": 17203, + "lang": 17204, + "Ġlocals": 17205, + "Ġphysicians": 17206, + "Ġhey": 17207, + "arse": 17208, + "tm": 17209, + "ĠLex": 17210, + "Ġbehavioral": 17211, + "successful": 17212, + "FX": 17213, + "Ġbrick": 17214, + "ovic": 17215, + "Ġconform": 17216, + "Ġreviewing": 17217, + "Ġinsights": 17218, + "Ġbiology": 17219, + "ĠRemove": 17220, + "ĠExtra": 17221, + "Ġcommitting": 17222, + "induced": 17223, + "ignty": 17224, + "igm": 17225, + "Ġatomic": 17226, + "Common": 17227, + "ĠEM": 17228, + "ĠPere": 17229, + "ĠItems": 17230, + "eh": 17231, + "Ġpreserved": 17232, + "ĠHood": 17233, + "Ġprisoner": 17234, + "Ġbankruptcy": 17235, + "Ġgren": 17236, + "ushes": 17237, + "Ġexploitation": 17238, + "Ġsignatures": 17239, + "Ġfinan": 17240, + "],\"": 17241, + "ĠMR": 17242, + "Ġmeg": 17243, + "remlin": 17244, + "Ġmusicians": 17245, + "Ġselecting": 17246, + "Ġexamining": 17247, + "INK": 17248, + "lated": 17249, + "Hi": 17250, + "Ġartic": 17251, + "Ġpets": 17252, + "Ġimpair": 17253, + "ĠMAN": 17254, + "Ġtablets": 17255, + "include": 17256, + "Range": 17257, + "Ġcaut": 17258, + "Ġlogs": 17259, + "Ġmounting": 17260, + "Ġunaware": 17261, + "Ġdynamics": 17262, + "ĠPalestine": 17263, + "ĠQuarter": 17264, + "ĠPurple": 17265, + "Ġma": 17266, + "ĠImport": 17267, + "Ġcollections": 17268, + "ciation": 17269, + "Ġsuccessor": 17270, + "Ġclone": 17271, + "Ġaiming": 17272, + "Ġpossessed": 17273, + "Ġsticking": 17274, + "Ġshaking": 17275, + "Ġlocate": 17276, + "ĠHockey": 17277, + "Turn": 17278, + "170": 17279, + "Ġfifteen": 17280, + "ĠHarrison": 17281, + "Ġcontinuously": 17282, + "ĠTC": 17283, + "ĠValent": 17284, + "ĠRescue": 17285, + "Ġbypass": 17286, + "amount": 17287, + "Ġmast": 17288, + "Ġprotects": 17289, + "Ġartistic": 17290, + "Ġsometime": 17291, + "Ġshoe": 17292, + "Ġshouted": 17293, + "ificant": 17294, + "etitive": 17295, + "ĠRegister": 17296, + "ĠJin": 17297, + "Ġconcentrated": 17298, + "lington": 17299, + "onies": 17300, + "Ġgenerator": 17301, + "yrim": 17302, + "ĠArmen": 17303, + "Ġclearing": 17304, + "ido": 17305, + "ĠTW": 17306, + "alph": 17307, + "Ġladies": 17308, + "Hard": 17309, + "Ġdialog": 17310, + "Ġinputs": 17311, + "æľ": 17312, + "Ġposes": 17313, + "Ġslots": 17314, + "ĠPremium": 17315, + "Ġleaks": 17316, + "Ġbosses": 17317, + "Ġ113": 17318, + "course": 17319, + "Acc": 17320, + "ĠNewton": 17321, + "ĠAustria": 17322, + "ĠMage": 17323, + "Ġteaches": 17324, + "abad": 17325, + "Ġwears": 17326, + "Ġcyl": 17327, + "Ġcurse": 17328, + "ĠSales": 17329, + "ĠWings": 17330, + "Ġpsy": 17331, + "Ġgaps": 17332, + "ĠIceland": 17333, + "ĠPinterest": 17334, + "Ġlandlord": 17335, + "Ġdefinitions": 17336, + "ĠKer": 17337, + "Ġsufficiently": 17338, + "ĠPence": 17339, + "ĠArchitect": 17340, + "Ġsurpass": 17341, + "Ġ114": 17342, + "Ġsuperhero": 17343, + "ĠDisease": 17344, + "Ġpriests": 17345, + "ĠCulture": 17346, + "Ġdefinitive": 17347, + "Ġsecretly": 17348, + "ĠDance": 17349, + "install": 17350, + "chief": 17351, + "ĠJessica": 17352, + "Would": 17353, + "Updated": 17354, + "Ġlocker": 17355, + "ĠKay": 17356, + "Ġmemorial": 17357, + "è¦": 17358, + "fat": 17359, + "Ġdisgu": 17360, + "Ġflavors": 17361, + "ĠBaseball": 17362, + "ĠResistance": 17363, + "Ġkicks": 17364, + "Ġenv": 17365, + "Ġteenagers": 17366, + "Dark": 17367, + "ĠCAR": 17368, + "Ġhalt": 17369, + "ĠLG": 17370, + "ĠGabriel": 17371, + "Ġfever": 17372, + "Ġsatur": 17373, + "Ġmall": 17374, + "Ġaffiliate": 17375, + "ĠSleep": 17376, + "ĠSpecific": 17377, + "ĠVel": 17378, + "Ġjar": 17379, + "ĠSacred": 17380, + "ĠEdwards": 17381, + "ĠACL": 17382, + "Ġretained": 17383, + "ĠGiant": 17384, + "Ġlimitation": 17385, + "inces": 17386, + "Ġrefusal": 17387, + "ĠTale": 17388, + "ĠButler": 17389, + "Ġaccidents": 17390, + "ĠCSS": 17391, + "Ġimported": 17392, + "ĠCopy": 17393, + "α": 17394, + "ERT": 17395, + "zel": 17396, + "Ġdivisions": 17397, + "hots": 17398, + "ĠAlb": 17399, + "ĠDS": 17400, + "Loader": 17401, + "Washington": 17402, + "atisf": 17403, + "ĠCreative": 17404, + "\\.": 17405, + "ĠAutom": 17406, + "redict": 17407, + "Ġreceptor": 17408, + "ĠCarlos": 17409, + "Method": 17410, + "oka": 17411, + "Ġmalicious": 17412, + "Ġstepping": 17413, + ",[": 17414, + "ĠDad": 17415, + "Ġattraction": 17416, + "ĠEffects": 17417, + "ĠPirate": 17418, + "ĠCer": 17419, + "ĠIndustry": 17420, + "ĠRud": 17421, + "Ġcharter": 17422, + "Ġdining": 17423, + "Ġinsists": 17424, + "Ġconfigure": 17425, + "Ġ(#": 17426, + "ĠSimple": 17427, + "ĠScroll": 17428, + "UTC": 17429, + "175": 17430, + "ĠKon": 17431, + "Ġmarketplace": 17432, + "ĠãĤ": 17433, + "Ġrefres": 17434, + "Ġgates": 17435, + "erred": 17436, + "ĠPod": 17437, + "Ġbehave": 17438, + "Frank": 17439, + "node": 17440, + "Ġendorsed": 17441, + "hett": 17442, + "asive": 17443, + "ĠHomeland": 17444, + "Ġrides": 17445, + "ĠLeave": 17446, + "erness": 17447, + "Ġflooding": 17448, + "AFP": 17449, + "Ġrisen": 17450, + "Ġcontinually": 17451, + "Ġunanim": 17452, + "ĠContract": 17453, + "ĠPas": 17454, + "Ġguided": 17455, + "ĠChile": 17456, + "bd": 17457, + "Ġsucc": 17458, + "ptic": 17459, + "Ġcommittees": 17460, + "ĠLuther": 17461, + "ĠAnyone": 17462, + "Ġsab": 17463, + "124": 17464, + "Ġpixel": 17465, + "ĠBak": 17466, + "ĠTag": 17467, + "ĠBennett": 17468, + "Enter": 17469, + "small": 17470, + "ĠPresidential": 17471, + "Ġpul": 17472, + "Ġcontrace": 17473, + "archive": 17474, + "Ġcoastal": 17475, + "ĠKids": 17476, + "192": 17477, + "âĢ²": 17478, + "icky": 17479, + "INGTON": 17480, + "Ġwolf": 17481, + "ĠStalin": 17482, + "Tur": 17483, + "idget": 17484, + "amas": 17485, + "ĠUnless": 17486, + "Ġsponsor": 17487, + "Ġmorph": 17488, + "ĠChoose": 17489, + "Ġrunner": 17490, + "Ġunbel": 17491, + "Ġmud": 17492, + "ĠMana": 17493, + "Ġdubbed": 17494, + "Ġgodd": 17495, + "urers": 17496, + "window": 17497, + "Ġrelied": 17498, + "Ġcelebrating": 17499, + "osc": 17500, + "Ġ135": 17501, + "Ġlobbying": 17502, + "Ġincomplete": 17503, + "Ġrestriction": 17504, + "Ġincap": 17505, + "itus": 17506, + "Ġexpectation": 17507, + "ĠApollo": 17508, + "Ġintens": 17509, + "Ġsync": 17510, + "GH": 17511, + "Ġmanipulation": 17512, + "BY": 17513, + "Ġspear": 17514, + "Ġbreasts": 17515, + "Ġvolcan": 17516, + "ilia": 17517, + "Material": 17518, + "Ġformats": 17519, + "ĠBast": 17520, + "Ġparliamentary": 17521, + "Ġsnake": 17522, + "Ġservants": 17523, + "ĠTrudeau": 17524, + "ĠGrim": 17525, + "ĠArabic": 17526, + "ĠSCP": 17527, + "ĠBoys": 17528, + "station": 17529, + "Ġprospective": 17530, + "orde": 17531, + "initialized": 17532, + "Ġbored": 17533, + "ABLE": 17534, + "Ġaccessed": 17535, + "Ġtaxi": 17536, + "ĠShell": 17537, + "aiden": 17538, + "ursed": 17539, + "inates": 17540, + "ĠInsurance": 17541, + "ĠPete": 17542, + "September": 17543, + "650": 17544, + "Ġadventures": 17545, + "ĠCover": 17546, + "Ġtribute": 17547, + "Ġsketch": 17548, + "Ġempower": 17549, + "ĠØ": 17550, + "ĠGlenn": 17551, + "ĠDaw": 17552, + "=\\\"": 17553, + "ĠPolitics": 17554, + "Ġguides": 17555, + "Ġdioxide": 17556, + "ĠGore": 17557, + "ĠBright": 17558, + "ĠSierra": 17559, + "Ġvalued": 17560, + "cond": 17561, + "Ġpointer": 17562, + "Select": 17563, + "Ġrisky": 17564, + "Ġabsorb": 17565, + "images": 17566, + "Ġrefuses": 17567, + "Ġbonuses": 17568, + "___": 17569, + "Ġhilar": 17570, + "ĠFeatures": 17571, + "220": 17572, + "ĠCollector": 17573, + "Foot": 17574, + "Ġ1964": 17575, + "culus": 17576, + "Ġdawn": 17577, + "Ġworkout": 17578, + "ĠLO": 17579, + "Ġphilosophical": 17580, + "ĠSandy": 17581, + "ĠYouth": 17582, + "Ġliable": 17583, + "Af": 17584, + "blue": 17585, + "Ġoverturn": 17586, + "lessness": 17587, + "ĠTribune": 17588, + "ĠIng": 17589, + "Ġfactories": 17590, + "Ġcatches": 17591, + "Ġprone": 17592, + "Ġmatrix": 17593, + "Ġlogin": 17594, + "Ġinacc": 17595, + "Ġexert": 17596, + "sys": 17597, + "Ġneedle": 17598, + "ĠQur": 17599, + "Ġnotified": 17600, + "oulder": 17601, + "tx": 17602, + "Ġreminds": 17603, + "Ġpublishers": 17604, + "Ġnort": 17605, + "Ġgit": 17606, + "Ġflies": 17607, + "ĠEmily": 17608, + "Ġflowing": 17609, + "ĠAlien": 17610, + "ĠStrateg": 17611, + "Ġhardest": 17612, + "Ġmodification": 17613, + "API": 17614, + "ĠMY": 17615, + "Ġcrashes": 17616, + "stairs": 17617, + "number": 17618, + "Ġurging": 17619, + "channel": 17620, + "ĠFalcon": 17621, + "Ġinhabitants": 17622, + "Ġterrifying": 17623, + "Ġutilize": 17624, + "Ġbanner": 17625, + "Ġcigarettes": 17626, + "Ġsenses": 17627, + "ĠHolmes": 17628, + "Ġpractition": 17629, + "ĠPhillips": 17630, + "otto": 17631, + "Ġcompile": 17632, + "Model": 17633, + "ĠKo": 17634, + "Ġ[]": 17635, + "Americans": 17636, + "ĠTerms": 17637, + "Ġmedications": 17638, + "ĠAna": 17639, + "Ġfundamentally": 17640, + "ĠNotice": 17641, + "Ġweaker": 17642, + "Ġ0000": 17643, + "Ġgarlic": 17644, + "Ġoutbreak": 17645, + "Ġeconomist": 17646, + "ĠBirth": 17647, + "Ġobstacles": 17648, + "arcer": 17649, + "ĠOrthodox": 17650, + "Ġplacebo": 17651, + "ĠCrew": 17652, + "aspberry": 17653, + "ĠAngels": 17654, + "Ġdischarge": 17655, + "Ġdestructive": 17656, + "117": 17657, + "ĠRising": 17658, + "Ġdairy": 17659, + "late": 17660, + "Ġcollision": 17661, + "ĠTigers": 17662, + "eanor": 17663, + "ocumented": 17664, + "ĠInvalid": 17665, + "Ġdont": 17666, + "ĠLiter": 17667, + "ĠVa": 17668, + "Ġhydrogen": 17669, + "Ġvariants": 17670, + "ĠBrowns": 17671, + "Ġ1965": 17672, + "Ġindigenous": 17673, + "Ġtrades": 17674, + "Ġremainder": 17675, + "Ġswept": 17676, + "ĠImpact": 17677, + "Ġredist": 17678, + "Ġunint": 17679, + "graduate": 17680, + "ãĥķ": 17681, + "ĠWILL": 17682, + "ãģ®ç": 17683, + "ĠCritical": 17684, + "Ġfisher": 17685, + "Ġvicious": 17686, + "Ġreversed": 17687, + "Year": 17688, + "ĠSox": 17689, + "Ġshootings": 17690, + "Ġfilming": 17691, + "Ġtouchdowns": 17692, + "aires": 17693, + "mel": 17694, + "Ġgrandfather": 17695, + "Ġaffection": 17696, + "ingle": 17697, + "Ġoverly": 17698, + "Additional": 17699, + "Ġsupreme": 17700, + "ĠGrad": 17701, + "Ġsporting": 17702, + "Ġmercy": 17703, + "ĠBrooks": 17704, + "ounty": 17705, + "Ġperforms": 17706, + "Ġtightly": 17707, + "Ġdemons": 17708, + "Ġkillings": 17709, + "Ġfaction": 17710, + "ĠNova": 17711, + "auts": 17712, + "Ġundoubtedly": 17713, + "arin": 17714, + "Ġunderway": 17715, + "rak": 17716, + "Ġliv": 17717, + "ĠRegion": 17718, + "Ġbriefing": 17719, + "sers": 17720, + "cloud": 17721, + "ĠMik": 17722, + "usp": 17723, + "Ġprediction": 17724, + "azor": 17725, + "Ġportable": 17726, + "ĠGand": 17727, + "Ġpresenting": 17728, + "Ġ1080": 17729, + "»": 17730, + "ushi": 17731, + "ĠSpark": 17732, + "thereum": 17733, + "Ġjustification": 17734, + "ĠNy": 17735, + "Ġcontractors": 17736, + "mingham": 17737, + "ĠStyle": 17738, + "åħ": 17739, + "ĠChronicles": 17740, + "ĠPicture": 17741, + "Ġproving": 17742, + "Ġwives": 17743, + "sett": 17744, + "Ġmolecules": 17745, + "ĠFairy": 17746, + "Ġconsisting": 17747, + "Ġpier": 17748, + "alone": 17749, + "inition": 17750, + "Ġnucle": 17751, + "json": 17752, + "Ġgotta": 17753, + "Ġmobil": 17754, + "Ġverbal": 17755, + "arium": 17756, + "Ġmonument": 17757, + "ucked": 17758, + "Ġ256": 17759, + "Tech": 17760, + "minecraft": 17761, + "ĠTrack": 17762, + "Ġtile": 17763, + "Ġcompatibility": 17764, + "asis": 17765, + "Ġsadd": 17766, + "Ġinstructed": 17767, + "ĠMueller": 17768, + "Ġlethal": 17769, + "Ġhormone": 17770, + "Ġorche": 17771, + "else": 17772, + "Ġskelet": 17773, + "Ġentertaining": 17774, + "Ġminimize": 17775, + "again": 17776, + "Ġundergo": 17777, + "Ġconstraints": 17778, + "Ġcigarette": 17779, + "ĠIslamist": 17780, + "Ġtravels": 17781, + "ĠPanthers": 17782, + "lings": 17783, + "Care": 17784, + "Ġlawsuits": 17785, + "uras": 17786, + "Ġcryst": 17787, + "Ġlowered": 17788, + "Ġaerial": 17789, + "Ġcombinations": 17790, + "Ġhaun": 17791, + "Ġcha": 17792, + "Ġvine": 17793, + "Ġquantities": 17794, + "Ġlinking": 17795, + "bank": 17796, + "Ġsoy": 17797, + "Bill": 17798, + "ĠAngela": 17799, + "Ġrecipient": 17800, + "ĠProtest": 17801, + "Ġsocket": 17802, + "Ġsolidarity": 17803, + "ĠâĨ": 17804, + "mill": 17805, + "Ġvaries": 17806, + "ĠPakistani": 17807, + "Dragon": 17808, + "Ġune": 17809, + "Ġhorizon": 17810, + "³³³³³³³³": 17811, + "Ġprovinces": 17812, + "Ġfrankly": 17813, + "Ġenacted": 17814, + "notes": 17815, + "['": 17816, + "Ġ192": 17817, + "ocracy": 17818, + "Ġendorsement": 17819, + "Ġovertime": 17820, + "True": 17821, + "Lab": 17822, + "licted": 17823, + "ĠDNC": 17824, + "Ġbeats": 17825, + "ĠJamie": 17826, + "152": 17827, + "ĠINT": 17828, + "Contact": 17829, + "Ġaccounted": 17830, + "hash": 17831, + "ĠPackers": 17832, + "pires": 17833, + "Ġlesbian": 17834, + "Ġamendments": 17835, + "Ġhopeful": 17836, + "ĠFinland": 17837, + "Ġspotlight": 17838, + "Ġconfigured": 17839, + "Ġtroubled": 17840, + "Ġgaze": 17841, + "ĠCalgary": 17842, + "Ġreliability": 17843, + "Ġinsurg": 17844, + "swer": 17845, + "buy": 17846, + "ĠSkin": 17847, + "Ġpixels": 17848, + "Ġhandgun": 17849, + "Ġparas": 17850, + "Ġcategor": 17851, + "ĠEL": 17852, + "ĠRex": 17853, + "Indeed": 17854, + "Ġkinda": 17855, + "Ġconjunction": 17856, + "ĠBryan": 17857, + "ĠManufact": 17858, + "yang": 17859, + "Plus": 17860, + "SQL": 17861, + "ishment": 17862, + "Ġdominate": 17863, + "Ġnail": 17864, + "Ġoath": 17865, + "Ġerupt": 17866, + "ĠFine": 17867, + "itbart": 17868, + "ĠChip": 17869, + "ĠAbd": 17870, + "ĠNam": 17871, + "Ġbuyer": 17872, + "Ġdissent": 17873, + "Leaks": 17874, + "Contin": 17875, + "Ġrider": 17876, + "ĠSomeone": 17877, + "Ġillusion": 17878, + "cin": 17879, + "ĠBoeing": 17880, + "Ġinadequ": 17881, + "ovation": 17882, + "iants": 17883, + "Ġrebuild": 17884, + "450": 17885, + "ĠDestiny": 17886, + "SW": 17887, + "ĠTill": 17888, + "Hit": 17889, + "iaz": 17890, + "ĠBangl": 17891, + "achers": 17892, + "ĠReform": 17893, + "Ġsegments": 17894, + "Ġsystematic": 17895, + "dc": 17896, + "ĠConservatives": 17897, + "Ġportal": 17898, + "hor": 17899, + "ĠDragonbound": 17900, + "Ġdragged": 17901, + "omo": 17902, + "Ġthee": 17903, + "advert": 17904, + "ĠReports": 17905, + "ĠEt": 17906, + "Ġbarrels": 17907, + "August": 17908, + "Ġcomparisons": 17909, + "Ġhex": 17910, + "Ġanthrop": 17911, + "\"[": 17912, + "borough": 17913, + "abi": 17914, + "Ġpictured": 17915, + "playing": 17916, + "ĠAddress": 17917, + "ĠMirror": 17918, + "Smith": 17919, + "Ġtires": 17920, + "ĠNPR": 17921, + "AAAA": 17922, + "Ġclassification": 17923, + "ĠThan": 17924, + "ĠHarm": 17925, + "ĠRA": 17926, + "Ġrejection": 17927, + "mination": 17928, + "Ġranged": 17929, + "ĠFalls": 17930, + "DI": 17931, + "Host": 17932, + "ãĤ´": 17933, + "ĠExample": 17934, + "listed": 17935, + "thirds": 17936, + "Ġsafegu": 17937, + "brand": 17938, + "Ġprobable": 17939, + "Canada": 17940, + "ITION": 17941, + "ĠQaeda": 17942, + "Ġchick": 17943, + "Ġimports": 17944, + "hit": 17945, + "loc": 17946, + "WW": 17947, + "Ġblew": 17948, + "Ġanytime": 17949, + "Ġwholes": 17950, + "iked": 17951, + "Ġcalculation": 17952, + "create": 17953, + "ĠOri": 17954, + "Ġupgraded": 17955, + "Ġappar": 17956, + "utory": 17957, + "ĠMol": 17958, + "Brit": 17959, + "ĠJong": 17960, + "INAL": 17961, + "ĠStarting": 17962, + "Ġdice": 17963, + "urtle": 17964, + "Ġrelying": 17965, + "closure": 17966, + "Ġprofitable": 17967, + "Ġslaughter": 17968, + "ĠManual": 17969, + "caster": 17970, + "Ġ\"$": 17971, + "Ġfeather": 17972, + "ĠSimply": 17973, + "ieves": 17974, + "Ġdeterior": 17975, + "ĠPCI": 17976, + "Ġstamp": 17977, + "Ġflaws": 17978, + "Ġshade": 17979, + "hammer": 17980, + "Ġpassport": 17981, + "Ġconting": 17982, + "amel": 17983, + "Ġobservers": 17984, + "Ġneglect": 17985, + "ĠRB": 17986, + "ĠBrotherhood": 17987, + "Ġskeptical": 17988, + "family": 17989, + "usk": 17990, + "Ġemotionally": 17991, + "âĻ": 17992, + "ĠBeta": 17993, + "asonable": 17994, + "idity": 17995, + "ĠMul": 17996, + "Ġkicking": 17997, + "ĠCarm": 17998, + "ollah": 17999, + "VERTIS": 18000, + "ĠAthen": 18001, + "Ġladder": 18002, + "ĠBullet": 18003, + "å£": 18004, + "0001": 18005, + "ĠWildlife": 18006, + "ĠMask": 18007, + "ĠNan": 18008, + "Rev": 18009, + "Ġunacceptable": 18010, + "legal": 18011, + "Ġcrowded": 18012, + "agi": 18013, + "ĠCox": 18014, + "je": 18015, + "Ġmorality": 18016, + "Ġfuels": 18017, + "Ġcables": 18018, + "Ġmankind": 18019, + "ĠCaribbean": 18020, + "Ġanchor": 18021, + "Ġbyte": 18022, + "ĠOften": 18023, + "ĠOz": 18024, + "Ġcrafted": 18025, + "Ġhistorian": 18026, + "ĠWu": 18027, + "Ġtowers": 18028, + "ĠCitizens": 18029, + "Ġhelm": 18030, + "Ġcredentials": 18031, + "Ġsingular": 18032, + "ĠJesse": 18033, + "Ġtackles": 18034, + "Ġcontempt": 18035, + "Ġafore": 18036, + "ĠShadows": 18037, + "Ġnil": 18038, + "Ġurgent": 18039, + "apple": 18040, + "blood": 18041, + "Ġvon": 18042, + "Ġoffline": 18043, + "Ġbreathe": 18044, + "Ġjumps": 18045, + "Ġirrelevant": 18046, + "oxic": 18047, + "omal": 18048, + "important": 18049, + "Jim": 18050, + "Ġgloves": 18051, + "arming": 18052, + "depth": 18053, + "Ġtalents": 18054, + "ookie": 18055, + "ĠSB": 18056, + "Ġpalm": 18057, + "uffs": 18058, + "esta": 18059, + "IGH": 18060, + "Ġcanon": 18061, + "ĠVerizon": 18062, + "ĠPle": 18063, + "Ġcoupled": 18064, + "velt": 18065, + "Ġfundraising": 18066, + "ĠGetting": 18067, + "ĠDLC": 18068, + "Ġmathematical": 18069, + "ĠHS": 18070, + "ĠCardinals": 18071, + "telling": 18072, + "Ġsponsors": 18073, + "ĠÏ": 18074, + "ĠBulls": 18075, + "option": 18076, + "Ġpropose": 18077, + "Ġmemorable": 18078, + "Ġembraced": 18079, + "Ġdeclining": 18080, + "Health": 18081, + "eda": 18082, + "Ġ};": 18083, + "Ġspam": 18084, + "mile": 18085, + "Ġpitcher": 18086, + "ĠEight": 18087, + "Ġcaring": 18088, + "utic": 18089, + "role": 18090, + "Ġairline": 18091, + "ernandez": 18092, + "ĠAthlet": 18093, + "Ġcertification": 18094, + "uxe": 18095, + "riger": 18096, + "Ġempir": 18097, + "Ġsensation": 18098, + "Ġdism": 18099, + "Ġbolt": 18100, + "Ġevolve": 18101, + "House": 18102, + "Ġconsultation": 18103, + "ĠDuty": 18104, + "Ġtouches": 18105, + "ĠNathan": 18106, + "Ġfaint": 18107, + "had": 18108, + "\"(": 18109, + "ĠConsumer": 18110, + "ĠExtreme": 18111, + "Ġ127": 18112, + "ĠHerm": 18113, + "ĠSacrament": 18114, + "izoph": 18115, + "Ġanxious": 18116, + "ulously": 18117, + "Ġsocially": 18118, + "ĠUTC": 18119, + "Ġsolving": 18120, + "ĠLetter": 18121, + "History": 18122, + "educ": 18123, + "Price": 18124, + "));": 18125, + "Ġreload": 18126, + "amic": 18127, + "Ġpork": 18128, + "Ġdiscourse": 18129, + "Ġtournaments": 18130, + "airo": 18131, + "ĠKur": 18132, + "ĠCosta": 18133, + "Ġviolating": 18134, + "Ġinterfere": 18135, + "Ġrecreational": 18136, + "uffle": 18137, + "Ġspeeches": 18138, + "Ġneeding": 18139, + "Ġremembers": 18140, + "Ġcredited": 18141, + "nia": 18142, + "focused": 18143, + "amera": 18144, + "Ġbru": 18145, + "umbs": 18146, + "ĠCuban": 18147, + "Ġpreceding": 18148, + "Ġnonsense": 18149, + "acial": 18150, + "Ġsmartphones": 18151, + "ĠStories": 18152, + "Sports": 18153, + "ĠEmergency": 18154, + "ouncing": 18155, + "efined": 18156, + "Ġber": 18157, + "Ġconsulting": 18158, + "Ġmasters": 18159, + "heastern": 18160, + ".\"[": 18161, + "ĠRunning": 18162, + "Ġsuscept": 18163, + "ĠFeng": 18164, + "America": 18165, + "prises": 18166, + "stitial": 18167, + "ĠWeekly": 18168, + "ĠGreater": 18169, + "modules": 18170, + "ifter": 18171, + "Graphics": 18172, + "uler": 18173, + "Ġwholly": 18174, + "Ġsuppress": 18175, + "Ġconcealed": 18176, + "Ġhappily": 18177, + "Ġaccepts": 18178, + "ĠEnjoy": 18179, + "Ġrivers": 18180, + "ĠExcept": 18181, + "225": 18182, + "ĠNHS": 18183, + "ĠMcConnell": 18184, + "Ġpussy": 18185, + "ferred": 18186, + "utable": 18187, + "Ġattain": 18188, + "Ġ>=": 18189, + "Ġdeposits": 18190, + "rophic": 18191, + "Ġnotorious": 18192, + "ĠShaw": 18193, + "ilitation": 18194, + "Ġepidemic": 18195, + "allic": 18196, + "Ġsmallest": 18197, + "ovich": 18198, + "Ġaccessories": 18199, + "perties": 18200, + "Ġsurplus": 18201, + "ĠMech": 18202, + "Ġambig": 18203, + "ĠImmigration": 18204, + "Ġchim": 18205, + "eval": 18206, + "Ġpracticing": 18207, + "ĠMystery": 18208, + "Ġdomains": 18209, + "ĠSilicon": 18210, + "apps": 18211, + "Ġkilometers": 18212, + "ea": 18213, + "ĠSmash": 18214, + "Ġwarranty": 18215, + "Ġnost": 18216, + "sil": 18217, + "rev": 18218, + "Jon": 18219, + "ĠDublin": 18220, + "Ġtastes": 18221, + "Ġbout": 18222, + "great": 18223, + "error": 18224, + "Ġswitches": 18225, + "ĠBapt": 18226, + "DO": 18227, + "oki": 18228, + "Ġsourced": 18229, + "produ": 18230, + "Ġattachment": 18231, + "ĠIssue": 18232, + "ĠQuestion": 18233, + "Join": 18234, + "Ġfitted": 18235, + "Ġunlawful": 18236, + "^^": 18237, + "erek": 18238, + "Ġauthentication": 18239, + "Ġstole": 18240, + "Ġaccountability": 18241, + "label": 18242, + "Search": 18243, + "Ġalbeit": 18244, + "atican": 18245, + "funded": 18246, + "ĠAdding": 18247, + "ĠIQ": 18248, + "Ġsubmar": 18249, + "lit": 18250, + "aque": 18251, + "ĠLearning": 18252, + "Ġinteger": 18253, + "Master": 18254, + "ĠChrom": 18255, + "Ġpremier": 18256, + "Op": 18257, + "ĠLiu": 18258, + "Ġblessed": 18259, + "ĠGlobe": 18260, + "ĠResponse": 18261, + "Ġlegitim": 18262, + "ĠMerkel": 18263, + "Ġdisposal": 18264, + "´": 18265, + "Ġgauge": 18266, + "peat": 18267, + "Ġinduced": 18268, + "Ġquestionable": 18269, + "arthy": 18270, + "ĠVit": 18271, + "ĠFeed": 18272, + "Until": 18273, + "Ut": 18274, + "worthy": 18275, + "RY": 18276, + "ĠHerald": 18277, + "ĠHammer": 18278, + "Ġmedal": 18279, + "ĠRivers": 18280, + "ĠHack": 18281, + "Ġclarify": 18282, + "Ġtracked": 18283, + "Ġautonomous": 18284, + "Ġtenant": 18285, + "ĠQatar": 18286, + "erie": 18287, + "Ġgrim": 18288, + "ĠMonitor": 18289, + "Ġresistant": 18290, + "ĠSpec": 18291, + "ĠWells": 18292, + "NAS": 18293, + "148": 18294, + "Ġminers": 18295, + "iotics": 18296, + "Ġmisses": 18297, + "116": 18298, + "gian": 18299, + "git": 18300, + "ĠEyes": 18301, + "pres": 18302, + "Ġgraduated": 18303, + "Ġangel": 18304, + "Ġsynchron": 18305, + "Ġefficiently": 18306, + "Ġtransmitted": 18307, + "Harry": 18308, + "Ġglobally": 18309, + "ENCE": 18310, + "ĠMontana": 18311, + "raged": 18312, + "ĠPrevention": 18313, + "Ġpiss": 18314, + "ĠLl": 18315, + "Ġshelf": 18316, + "ĠBJP": 18317, + "ĠTestament": 18318, + "ĠLate": 18319, + "iker": 18320, + "ĠHapp": 18321, + "ĠJulian": 18322, + "hall": 18323, + "Ġspont": 18324, + "Ġshutdown": 18325, + "Ġinconsistent": 18326, + "Ġsubscribers": 18327, + "Ġskeleton": 18328, + "ĠNebraska": 18329, + "Ġinspire": 18330, + "ĠVoid": 18331, + "Feed": 18332, + "Ġangles": 18333, + "ĠSprings": 18334, + "Ġbenchmark": 18335, + "Ġvaccines": 18336, + "izophren": 18337, + "sexual": 18338, + "uffed": 18339, + "Ġshine": 18340, + "ĠKath": 18341, + "Ġgesture": 18342, + "inea": 18343, + "Ġrip": 18344, + "Ġoppression": 18345, + "Ġconscience": 18346, + "bt": 18347, + "ĠLum": 18348, + "Ġincidence": 18349, + "ĠFa": 18350, + "wr": 18351, + "Ġmineral": 18352, + "ĠSpurs": 18353, + "alky": 18354, + "Ġthunder": 18355, + "Ġopio": 18356, + "Being": 18357, + "ĠPalm": 18358, + "Ġwasted": 18359, + "Ġlb": 18360, + "iaries": 18361, + "ĠInitiative": 18362, + "Ġcurric": 18363, + "Ġmarker": 18364, + "ĠMcL": 18365, + "Ġextensions": 18366, + "ĠPv": 18367, + "ĠArms": 18368, + "Ġofferings": 18369, + "Ġdefenses": 18370, + "Ġvendor": 18371, + "Ġcontradict": 18372, + "ĠColin": 18373, + "Ġreddit": 18374, + "Ġperipher": 18375, + "122": 18376, + "Ġsins": 18377, + "Edit": 18378, + "ICT": 18379, + "Soft": 18380, + "ĠShah": 18381, + "Ġadministrator": 18382, + "ĠTrip": 18383, + "Ġpornography": 18384, + "Ġtuition": 18385, + "inence": 18386, + "ĠProgress": 18387, + "Ġcatalog": 18388, + "Ġsuite": 18389, + "Ġhike": 18390, + "Ġreproductive": 18391, + "engine": 18392, + "Ġdrought": 18393, + "ĠNoah": 18394, + "Ġ230": 18395, + "Ġdude": 18396, + "Ġrelaxed": 18397, + "Ġpartition": 18398, + "Ġparticipant": 18399, + "Ġtelesc": 18400, + "Ġfeas": 18401, + "ĠFF": 18402, + "owner": 18403, + "Ġsweeping": 18404, + "Ġlenses": 18405, + "Ġmatchup": 18406, + "ĠRepl": 18407, + "ournals": 18408, + "Ġcredible": 18409, + "Ġgrandmother": 18410, + "Ġthermal": 18411, + "Ġsubscribing": 18412, + "Ġidentities": 18413, + "colm": 18414, + "UCT": 18415, + "Ġreluctant": 18416, + "users": 18417, + "ĠCort": 18418, + "Ġassisted": 18419, + "OSS": 18420, + "ATIONS": 18421, + "ISH": 18422, + "Ġpharmaceutical": 18423, + "icable": 18424, + "adian": 18425, + "ĠSonic": 18426, + "ĠFury": 18427, + "ĠMong": 18428, + "AH": 18429, + "ĠPsychology": 18430, + "Ġphosph": 18431, + "Ġtreats": 18432, + "ŃĶ": 18433, + "Ġsteadily": 18434, + "ĠHello": 18435, + "Ġrelates": 18436, + "Ġclue": 18437, + "Expl": 18438, + "auth": 18439, + "Ġrevision": 18440, + "Ġeld": 18441, + "osion": 18442, + "Ġbron": 18443, + "144": 18444, + "rikes": 18445, + "Ġmines": 18446, + "Ġblanket": 18447, + "ĠFail": 18448, + "eled": 18449, + "ĠImagine": 18450, + "ĠPlanned": 18451, + "aic": 18452, + "Request": 18453, + "Mad": 18454, + "ĠHorse": 18455, + "ĠEagle": 18456, + "Ġcapac": 18457, + "157": 18458, + "Ġling": 18459, + "ĠNice": 18460, + "ĠParenthood": 18461, + "minster": 18462, + "ogs": 18463, + "ensitive": 18464, + "Nothing": 18465, + "Ġcarn": 18466, + "Fin": 18467, + "ĠPE": 18468, + "Ġrifles": 18469, + "ĠLP": 18470, + "Sand": 18471, + "ĠguiActive": 18472, + "Ġtourist": 18473, + "CNN": 18474, + "Ġunveiled": 18475, + "Ġpredecessor": 18476, + "}{": 18477, + "uber": 18478, + "Ġoffshore": 18479, + "Ġoptical": 18480, + "ĠRot": 18481, + "ĠPearl": 18482, + "eton": 18483, + "Ġstared": 18484, + "Ġfarther": 18485, + "atility": 18486, + "contin": 18487, + "ĠGy": 18488, + "ĠFoster": 18489, + "ĠCoc": 18490, + "rients": 18491, + "Ġdesigning": 18492, + "ĠEconomy": 18493, + "ONG": 18494, + "Women": 18495, + "ĠNancy": 18496, + "erver": 18497, + "Ġmascul": 18498, + "Ġcasualties": 18499, + "Ġ225": 18500, + "ĠSullivan": 18501, + "ĠChoice": 18502, + "Ġaster": 18503, + "ws": 18504, + "Ġhotels": 18505, + "Ġconsiderations": 18506, + "Ġcouch": 18507, + "ĠStrip": 18508, + "ĠGn": 18509, + "Ġmanipulate": 18510, + "lied": 18511, + "Ġsynthetic": 18512, + "Ġassaulted": 18513, + "Ġoffenses": 18514, + "ĠDrake": 18515, + "Ġimpe": 18516, + "October": 18517, + "ĠHeritage": 18518, + "hl": 18519, + "ĠBlair": 18520, + "Unlike": 18521, + "Ġgrief": 18522, + "Ġ450": 18523, + "Ġopted": 18524, + "Ġresignation": 18525, + "ilo": 18526, + "Ġverse": 18527, + "ĠTomb": 18528, + "Ġupt": 18529, + "Ġaired": 18530, + "ĠHook": 18531, + "ĠMLB": 18532, + "Ġassumes": 18533, + "outed": 18534, + "ĠVers": 18535, + "Ġinferior": 18536, + "Ġbundle": 18537, + "ĠDNS": 18538, + "ographer": 18539, + "Ġmultip": 18540, + "ĠSouls": 18541, + "Ġillustrated": 18542, + "Ġtactic": 18543, + "Ġdressing": 18544, + "Ġduo": 18545, + "Conf": 18546, + "Ġrelent": 18547, + "Ġcant": 18548, + "Ġscarce": 18549, + "Ġcandy": 18550, + "ĠCF": 18551, + "Ġaffiliated": 18552, + "Ġsprint": 18553, + "ylan": 18554, + "ĠGarcia": 18555, + "Ġjunk": 18556, + "Print": 18557, + "exec": 18558, + "Crit": 18559, + "Ġportrait": 18560, + "iries": 18561, + "ĠOFF": 18562, + "Ġdisputes": 18563, + "WR": 18564, + "Love": 18565, + "ãģĦ": 18566, + "ĠReyn": 18567, + "Ġhipp": 18568, + "opath": 18569, + "Ġfloors": 18570, + "ĠFeel": 18571, + "Ġworries": 18572, + "Ġsettlements": 18573, + "ĠPos": 18574, + "Ġmosque": 18575, + "Ġfinals": 18576, + "Ġcrushed": 18577, + "ĠProbably": 18578, + "ĠBot": 18579, + "ĠMans": 18580, + "ĠPeriod": 18581, + "Ġsovereignty": 18582, + "Ġseller": 18583, + "Ġapost": 18584, + "Ġamateur": 18585, + "Ġdorm": 18586, + "Ġconsuming": 18587, + "Ġarmour": 18588, + "ĠRoose": 18589, + "Ġintensive": 18590, + "Ġeliminating": 18591, + "ĠSunni": 18592, + "ĠAleppo": 18593, + "jin": 18594, + "Ġadvise": 18595, + "pal": 18596, + "ĠHalo": 18597, + "Ġdescent": 18598, + "Ġsimpler": 18599, + "Ġbooth": 18600, + "STR": 18601, + "Later": 18602, + "ĠCave": 18603, + "===": 18604, + "Ġmol": 18605, + "Ġfist": 18606, + "Ġshotgun": 18607, + "supp": 18608, + "Ġrobbery": 18609, + "Effect": 18610, + "Ġobscure": 18611, + "ĠProfessional": 18612, + "Ġembassy": 18613, + "Ġmilitant": 18614, + "Ġincarcer": 18615, + "Ġgenerates": 18616, + "Ġlaunches": 18617, + "Ġadministrators": 18618, + "Ġshaft": 18619, + "Ġcircular": 18620, + "Ġfreshman": 18621, + "ĠWes": 18622, + "ĠJoel": 18623, + "ĠDrew": 18624, + "ĠDuncan": 18625, + "ĠApparently": 18626, + "sight": 18627, + "ĠInternal": 18628, + "ĠIndividual": 18629, + "ĠFE": 18630, + "Ġbore": 18631, + "ĠMt": 18632, + "Ġbroadly": 18633, + "ĠOptions": 18634, + "ountain": 18635, + "ipes": 18636, + "ĠVideos": 18637, + "204": 18638, + "Ġhills": 18639, + "Ġsimulation": 18640, + "Ġdisappointment": 18641, + "itan": 18642, + "ĠLaboratory": 18643, + "Ġupward": 18644, + "Ġboundary": 18645, + "Ġdarker": 18646, + "hart": 18647, + "Ġdominance": 18648, + "Cong": 18649, + "ĠOracle": 18650, + "ĠLords": 18651, + "Ġscholarship": 18652, + "ĠVincent": 18653, + "ede": 18654, + "ĠRah": 18655, + "Ġencourages": 18656, + "rov": 18657, + "Ġquo": 18658, + "Ġpremise": 18659, + "ĠCrisis": 18660, + "ĠHolocaust": 18661, + "Ġrhythm": 18662, + "Ġmetric": 18663, + "club": 18664, + "Ġtransported": 18665, + "Ġnod": 18666, + "ĠPist": 18667, + "Ġancestors": 18668, + "ĠFreder": 18669, + "thumbnails": 18670, + "ĠCE": 18671, + "OND": 18672, + "Phil": 18673, + "venge": 18674, + "ĠProducts": 18675, + "castle": 18676, + "Ġqualifying": 18677, + "ĠKaren": 18678, + "VERTISEMENT": 18679, + "Ġmighty": 18680, + "Ġexplanations": 18681, + "Ġfixing": 18682, + "Di": 18683, + "Ġdeclaring": 18684, + "Ġanonymity": 18685, + "Ġjuven": 18686, + "ĠNord": 18687, + "ĠDoom": 18688, + "ĠActually": 18689, + "Ok": 18690, + "phis": 18691, + "ĠDesert": 18692, + "Ġ116": 18693, + "IK": 18694, + "ĠFM": 18695, + "Ġincomes": 18696, + "VEL": 18697, + "okers": 18698, + "Ġpecul": 18699, + "Ġlightweight": 18700, + "gue": 18701, + "Ġaccent": 18702, + "Ġincrement": 18703, + "ĠChan": 18704, + "Ġcomplaining": 18705, + "ĠBaghd": 18706, + "Ġmidfielder": 18707, + "Ġoverhaul": 18708, + "Process": 18709, + "ĠHollow": 18710, + "ĠTitans": 18711, + "Small": 18712, + "manuel": 18713, + "ĠUnity": 18714, + "ĠEvents": 18715, + "Sty": 18716, + "Ġdisproportion": 18717, + "nesty": 18718, + "enes": 18719, + "ĠCod": 18720, + "Ġdemonstrations": 18721, + "ĠCrimson": 18722, + "ĠOH": 18723, + "Ġenrolled": 18724, + "Ġcel": 18725, + "ĠBrett": 18726, + "Ġaide": 18727, + "Ġheels": 18728, + "Ġbroadband": 18729, + "Ġmarking": 18730, + "Ġwizard": 18731, + "ĠNJ": 18732, + "ĠChiefs": 18733, + "Ġingredient": 18734, + "Ġdug": 18735, + "ĠShut": 18736, + "urchase": 18737, + "endor": 18738, + "Ġfarmer": 18739, + "ĠGoldman": 18740, + "129": 18741, + "155": 18742, + "Order": 18743, + "Ġlion": 18744, + "iably": 18745, + "Ġstain": 18746, + "array": 18747, + "ilitary": 18748, + "ĠFAQ": 18749, + "Ġexploded": 18750, + "ĠMcCarthy": 18751, + "ĠTweet": 18752, + "ĠGreens": 18753, + "eking": 18754, + "ln": 18755, + "ensen": 18756, + "Ġmotorcycle": 18757, + "Ġparticle": 18758, + "Ġcholesterol": 18759, + "Bron": 18760, + "Ġstair": 18761, + "Ġoxid": 18762, + "Ġdesirable": 18763, + "ibles": 18764, + "Ġtheor": 18765, + "forcing": 18766, + "Ġpromotional": 18767, + "ovo": 18768, + "boot": 18769, + "ĠBonus": 18770, + "rawling": 18771, + "Ġshortage": 18772, + "ĠPsy": 18773, + "Ġrecruited": 18774, + "Ġinfants": 18775, + "Ġtestosterone": 18776, + "Ġdeduct": 18777, + "Ġdistinctive": 18778, + "Ġfirmware": 18779, + "built": 18780, + "145": 18781, + "Ġexplored": 18782, + "Ġfactions": 18783, + "Ġvide": 18784, + "Ġtattoo": 18785, + "Ġfinancially": 18786, + "Ġfatigue": 18787, + "Ġproceeding": 18788, + "constitutional": 18789, + "Ġmiser": 18790, + "Ġchairs": 18791, + "gging": 18792, + "ipple": 18793, + "Ġdent": 18794, + "Ġdisreg": 18795, + "çĶ": 18796, + "stant": 18797, + "llo": 18798, + "bps": 18799, + "akening": 18800, + "Ġabnormal": 18801, + "ĠERA": 18802, + "士": 18803, + "ĠHBO": 18804, + "ĠMAR": 18805, + "Ġconcess": 18806, + "Ġservant": 18807, + "Ġaspir": 18808, + "lav": 18809, + "ĠPanel": 18810, + "amo": 18811, + "Ġprecip": 18812, + "Ġrecordings": 18813, + "Ġproceeded": 18814, + "Ġcolony": 18815, + "ĠTang": 18816, + "ablo": 18817, + "Ġstripped": 18818, + "Left": 18819, + "too": 18820, + "Ġpotatoes": 18821, + "Ġfinest": 18822, + "%).": 18823, + "Ġcrap": 18824, + "ĠZach": 18825, + "abases": 18826, + "ĠGoth": 18827, + "Ġbillionaire": 18828, + "wolf": 18829, + "Ġsanction": 18830, + "SK": 18831, + "Ġlogged": 18832, + "Po": 18833, + "eyed": 18834, + "unal": 18835, + "Ġcricket": 18836, + "Ġarmies": 18837, + "Ġuncovered": 18838, + "Cloud": 18839, + "ón": 18840, + "Ġrebounds": 18841, + "Ġmes": 18842, + "Oper": 18843, + "Pac": 18844, + "Ġnationally": 18845, + "Ġinserted": 18846, + "pict": 18847, + "Ġgovernance": 18848, + "и": 18849, + "Ġprivileges": 18850, + "GET": 18851, + "Ġfavorites": 18852, + "imity": 18853, + "Ġlover": 18854, + "them": 18855, + "empl": 18856, + "Ġgorgeous": 18857, + "Ann": 18858, + "Ġslipped": 18859, + "Ġveto": 18860, + "Bob": 18861, + "Ġslim": 18862, + "ucc": 18863, + "ĠFame": 18864, + "uddenly": 18865, + "Ġdenies": 18866, + "ĠMaur": 18867, + "Ġdistances": 18868, + "Ġwanna": 18869, + "tar": 18870, + "ĠSER": 18871, + "ĠâĪ": 18872, + "Ġlemon": 18873, + "athetic": 18874, + "Ġliteral": 18875, + "Ġdistinguished": 18876, + "Ġanswering": 18877, + "GI": 18878, + "Ġreligions": 18879, + "ĠPhilos": 18880, + "ĠLay": 18881, + "Ġcompos": 18882, + "irements": 18883, + "ĠKos": 18884, + "inez": 18885, + "rolling": 18886, + "Ġyoungest": 18887, + "andise": 18888, + "ĠBorn": 18889, + "Ġaltar": 18890, + "amina": 18891, + "ĠBoot": 18892, + "voc": 18893, + "Ġdigging": 18894, + "Ġpressures": 18895, + "Ġlen": 18896, + "264": 18897, + "Ġassassination": 18898, + "ĠBirmingham": 18899, + "ĠMyth": 18900, + "Ġsovereign": 18901, + "ĠArtist": 18902, + "ĠPhotograph": 18903, + "Ġdepicted": 18904, + "Ġdispens": 18905, + "orthy": 18906, + "Ġambul": 18907, + "integ": 18908, + "ĠCele": 18909, + "ĠTibet": 18910, + "Ġhierarchy": 18911, + "Ġcu": 18912, + "Ġpreseason": 18913, + "ĠPeterson": 18914, + "Ġcolours": 18915, + "Ġworrying": 18916, + "Ġbackers": 18917, + "ĠPalmer": 18918, + "Ġμ": 18919, + "Ġcontributor": 18920, + "Ġhearings": 18921, + "Ġurine": 18922, + "ĠÙ": 18923, + "ourgeois": 18924, + "Similar": 18925, + "ĠZimmer": 18926, + "something": 18927, + "ĠUSC": 18928, + "Ġstrengths": 18929, + "ĠFI": 18930, + "Ġlogging": 18931, + "Asked": 18932, + "ĠThai": 18933, + "inqu": 18934, + "ĠWalt": 18935, + "Ġcrews": 18936, + "itism": 18937, + "301": 18938, + "Ġsharply": 18939, + "umed": 18940, + "Ġredirect": 18941, + "rators": 18942, + "Inf": 18943, + "ĠWeapons": 18944, + "Ġteasp": 18945, + "1999": 18946, + "Live": 18947, + "ĠEspecially": 18948, + "ĠSter": 18949, + "ĠVeterans": 18950, + "Ġintro": 18951, + "otherapy": 18952, + "Ġmalware": 18953, + "Ġbreeding": 18954, + "Ġmolecular": 18955, + "ĠRoute": 18956, + "ĠComment": 18957, + "ochem": 18958, + "Ġain": 18959, + "Season": 18960, + "Ġlinebacker": 18961, + "Ä«": 18962, + "ĠEconomics": 18963, + "esar": 18964, + "ĠLives": 18965, + "ĠEmma": 18966, + "Ġkin": 18967, + "ĠTerrit": 18968, + "Ġplanted": 18969, + "oton": 18970, + "ĠButter": 18971, + "ĠSpons": 18972, + "PER": 18973, + "Ġdungeon": 18974, + "Ġsymbolic": 18975, + "Ġfilmed": 18976, + "Ġdiets": 18977, + "Ġconcludes": 18978, + "Ġcertainty": 18979, + "ĠFormat": 18980, + "Ġstrangers": 18981, + "format": 18982, + "ĠPhase": 18983, + "Ġcopied": 18984, + "Ġmetres": 18985, + "lda": 18986, + "ĠUsers": 18987, + "Ġdeliberate": 18988, + "Ġwashed": 18989, + "ĠLance": 18990, + "imation": 18991, + "Ġimproper": 18992, + "ĠGenesis": 18993, + "ickr": 18994, + "ĠKush": 18995, + "Ġrealise": 18996, + "Ġembarrassing": 18997, + "alking": 18998, + "bucks": 18999, + "Ġverified": 19000, + "Ġoutline": 19001, + "years": 19002, + "ĠIncome": 19003, + "202": 19004, + "Ġzombies": 19005, + "Final": 19006, + "ĠMillenn": 19007, + "Ġmodifications": 19008, + "ĠVision": 19009, + "ĠMoses": 19010, + "verb": 19011, + "iterranean": 19012, + "ĠJet": 19013, + "Ġnaval": 19014, + "ĠAgg": 19015, + "Ġurl": 19016, + "Ġvictories": 19017, + "Ġnonetheless": 19018, + "Ġinjust": 19019, + "ĠFact": 19020, + "çļ": 19021, + "Ġinsufficient": 19022, + "review": 19023, + "facebook": 19024, + "Ġnegotiating": 19025, + "Ġguarantees": 19026, + "imen": 19027, + "utenberg": 19028, + "Ġgambling": 19029, + "Ġcongr": 19030, + "Loading": 19031, + "Ġnevertheless": 19032, + "Ġpresidents": 19033, + "ĠIndustrial": 19034, + "Ġ118": 19035, + "Ġpoured": 19036, + "ĠTory": 19037, + "Ġ175": 19038, + "Ġ:=": 19039, + "Scott": 19040, + "angered": 19041, + "Tok": 19042, + "Ġorganizers": 19043, + "Mat": 19044, + "ĠGrowth": 19045, + "Ġadul": 19046, + "Ġensures": 19047, + "Ġ117": 19048, + "é¾įå": 19049, + "Ġmassacre": 19050, + "Ġgrades": 19051, + "before": 19052, + "ADVERTISEMENT": 19053, + "ĠSlow": 19054, + "ĠMMA": 19055, + "âĢĶ\"": 19056, + "ĠVatican": 19057, + "Qaeda": 19058, + "Ġowe": 19059, + "6666": 19060, + "ĠSorry": 19061, + "ĠGrass": 19062, + "Ġbackgrounds": 19063, + "Ġexhausted": 19064, + "Ġclan": 19065, + "Ġcompromised": 19066, + "ĠElf": 19067, + "ĠIsaac": 19068, + "enson": 19069, + "Invest": 19070, + "IFA": 19071, + "Ġinterrupted": 19072, + "ãĥīãĥ©": 19073, + "Ġtwisted": 19074, + "ĠDragons": 19075, + "Mode": 19076, + "ĠKremlin": 19077, + "Ġfertil": 19078, + "heres": 19079, + "phan": 19080, + "ĠNode": 19081, + "fed": 19082, + "ĠOrc": 19083, + "Ġunwilling": 19084, + "Cent": 19085, + "Ġpriorit": 19086, + "Ġgraduates": 19087, + "Ġsubjective": 19088, + "Ġissuing": 19089, + "ĠLt": 19090, + "Ġviewer": 19091, + "Ġwoke": 19092, + "Thus": 19093, + "brook": 19094, + "Ġdepressed": 19095, + "Ġbracket": 19096, + "ĠGor": 19097, + "ĠFighting": 19098, + "Ġstriker": 19099, + "Report": 19100, + "ĠPortugal": 19101, + "Ġneo": 19102, + "wed": 19103, + "199": 19104, + "Ġfleeing": 19105, + "shadow": 19106, + "identified": 19107, + "USE": 19108, + "Steam": 19109, + "Ġstretched": 19110, + "Ġrevelations": 19111, + "arted": 19112, + "ĠDw": 19113, + "Ġalignment": 19114, + "eston": 19115, + "ĠJared": 19116, + "Sep": 19117, + "Ġblogs": 19118, + "update": 19119, + "gom": 19120, + "risk": 19121, + "Ġclash": 19122, + "ĠHour": 19123, + "Ġruntime": 19124, + "Ġunwanted": 19125, + "Ġscam": 19126, + "Ġrack": 19127, + "Ġenlight": 19128, + "onest": 19129, + "ĠFerr": 19130, + "Ġconvictions": 19131, + "Ġpiano": 19132, + "Ġcirculation": 19133, + "ĠWelcome": 19134, + "Ġbacklash": 19135, + "ĠWade": 19136, + "Ġreceivers": 19137, + "otive": 19138, + "Jeff": 19139, + "Ġnetworking": 19140, + "ĠPrep": 19141, + "ĠExplorer": 19142, + "Ġlecture": 19143, + "Ġuploaded": 19144, + "ĠMeat": 19145, + "BLE": 19146, + "ĠNazis": 19147, + "ĠSynd": 19148, + "stud": 19149, + "roots": 19150, + "rians": 19151, + "Ġportrayed": 19152, + "Ġ??": 19153, + "ĠBuddha": 19154, + "sun": 19155, + "Robert": 19156, + "ĠComplex": 19157, + "Ġoversee": 19158, + "Ġstealth": 19159, + "Title": 19160, + "ĠJobs": 19161, + "ĠKum": 19162, + "Ġappreciation": 19163, + "ĠMOD": 19164, + "Ġbasics": 19165, + "Ġclips": 19166, + "Ġnursing": 19167, + "Ġproposition": 19168, + "Ġrealised": 19169, + "ĠNYC": 19170, + "Ġallocated": 19171, + "rium": 19172, + "aran": 19173, + "ĠProduction": 19174, + "ĠVote": 19175, + "Ġsmugg": 19176, + "Ġhunter": 19177, + "azer": 19178, + "ĠChanges": 19179, + "Ġfluct": 19180, + "yon": 19181, + "Array": 19182, + "Ġkits": 19183, + "Water": 19184, + "Ġuncommon": 19185, + "Ġresting": 19186, + "ells": 19187, + "would": 19188, + "Ġpursued": 19189, + "Ġassertion": 19190, + "ometown": 19191, + "ĠMosul": 19192, + "ĠPlatform": 19193, + "iolet": 19194, + "Ġshareholders": 19195, + "Ġtrails": 19196, + "Pay": 19197, + "ĠEnforcement": 19198, + "types": 19199, + "ĠAnonymous": 19200, + "Ġsatisfying": 19201, + "ilogy": 19202, + "Ġ('": 19203, + "wave": 19204, + "city": 19205, + "Steve": 19206, + "Ġconfrontation": 19207, + "ĠEld": 19208, + "Capt": 19209, + "ahan": 19210, + "htm": 19211, + "ĠCtrl": 19212, + "ONS": 19213, + "230": 19214, + "ifa": 19215, + "holding": 19216, + "Ġdelicate": 19217, + "Ġjaw": 19218, + "ĠGoing": 19219, + "orum": 19220, + "Sal": 19221, + "Ġdull": 19222, + "ĠBeth": 19223, + "Ġprisons": 19224, + "Ġego": 19225, + "ĠElsa": 19226, + "avorite": 19227, + "ĠGang": 19228, + "ĠNuclear": 19229, + "Ġspider": 19230, + "atsu": 19231, + "Ġsampling": 19232, + "Ġabsorbed": 19233, + "ĠPharm": 19234, + "ieth": 19235, + "Ġbucket": 19236, + "ĠRecomm": 19237, + "OF": 19238, + "ĠFactory": 19239, + "ANCE": 19240, + "Ġbacter": 19241, + "Has": 19242, + "ĠObserv": 19243, + "121": 19244, + "Ġpremiere": 19245, + "Develop": 19246, + "Ġcurrencies": 19247, + "Cast": 19248, + "Ġaccompanying": 19249, + "ĠNashville": 19250, + "Ġfatty": 19251, + "ĠBrend": 19252, + "Ġlocks": 19253, + "Ġcentered": 19254, + "ĠUT": 19255, + "aughs": 19256, + "orie": 19257, + "ĠAffordable": 19258, + "vance": 19259, + "DL": 19260, + "emet": 19261, + "Ġthrone": 19262, + "ĠBluetooth": 19263, + "Ġnaming": 19264, + "ifts": 19265, + "ADE": 19266, + "Ġcorrected": 19267, + "Ġpromptly": 19268, + "ĠSTR": 19269, + "Ġgenome": 19270, + "Ġcope": 19271, + "Ġvalley": 19272, + "Ġrounded": 19273, + "ĠKend": 19274, + "alion": 19275, + "pers": 19276, + "Ġtourism": 19277, + "Ġstark": 19278, + "vl": 19279, + "Ġblowing": 19280, + "ĠSchedule": 19281, + "std": 19282, + "Ġunhappy": 19283, + "Ġlitigation": 19284, + "cedes": 19285, + "Ġandroid": 19286, + "Ġintegral": 19287, + "erers": 19288, + "uded": 19289, + "tax": 19290, + "Ġreiter": 19291, + "ĠMotors": 19292, + "ociated": 19293, + "Ġwonders": 19294, + "ĠApost": 19295, + "ucking": 19296, + "ĠRoosevelt": 19297, + "fram": 19298, + "Ġyields": 19299, + "Ġconstitutes": 19300, + "awk": 19301, + "Interest": 19302, + "Ġinterim": 19303, + "Ġbreakthrough": 19304, + "ĠCher": 19305, + "Ġprosec": 19306, + "ĠDj": 19307, + "ĠMT": 19308, + "Resp": 19309, + "ĠPT": 19310, + "Ġsperm": 19311, + "edit": 19312, + "BT": 19313, + "Linux": 19314, + "country": 19315, + "league": 19316, + "Ġdick": 19317, + "Ġoct": 19318, + "Ġinserting": 19319, + "Ġscra": 19320, + "ĠBrewing": 19321, + "Ġ1966": 19322, + "Ġrunners": 19323, + "Ġplun": 19324, + "idy": 19325, + "ĠDian": 19326, + "Ġdysfunction": 19327, + "Ġexclusion": 19328, + "Ġdisgr": 19329, + "Ġincorporate": 19330, + "Ġreconc": 19331, + "Ġnominated": 19332, + "ĠArcher": 19333, + "draw": 19334, + "achelor": 19335, + "Ġwritings": 19336, + "Ġshallow": 19337, + "Ġhast": 19338, + "ĠBMW": 19339, + "ĠRS": 19340, + "Ġthigh": 19341, + "Ġ1963": 19342, + "Ġlamb": 19343, + "Ġfavored": 19344, + "agle": 19345, + "Ġcooler": 19346, + "ĠHours": 19347, + "ĠGU": 19348, + "ĠOrigin": 19349, + "Ġglimpse": 19350, + "--------------------": 19351, + "Lim": 19352, + "Ġcheek": 19353, + "Ġjealous": 19354, + "-'": 19355, + "Ġharness": 19356, + "ĠPoison": 19357, + "Ġdisabilities": 19358, + "neapolis": 19359, + "Ġoutlook": 19360, + "Ġnotify": 19361, + "ĠIndianapolis": 19362, + "Ġabrupt": 19363, + "nsic": 19364, + "Ġencrypted": 19365, + "Ġforfe": 19366, + "reath": 19367, + "Ġrabb": 19368, + "Ġfoundations": 19369, + "Ġcompliment": 19370, + "ĠInterview": 19371, + "ĠSwe": 19372, + "Ġadolesc": 19373, + "Ġmonitors": 19374, + "ĠSacramento": 19375, + "Ġtimely": 19376, + "Ġcontempl": 19377, + "Ġpositioned": 19378, + "Ġposters": 19379, + "phies": 19380, + "iovascular": 19381, + "void": 19382, + "ĠFifth": 19383, + "Ġinvestigative": 19384, + "OUN": 19385, + "Ġintegrate": 19386, + "ĠINC": 19387, + "isha": 19388, + "iblings": 19389, + "ĠRequest": 19390, + "ĠRodriguez": 19391, + "Ġslides": 19392, + "ĠDX": 19393, + "Ġfeminism": 19394, + "Ġdatas": 19395, + "Ġbend": 19396, + "irus": 19397, + "ĠNigeria": 19398, + "Fox": 19399, + "Change": 19400, + "Ġairplane": 19401, + "ĠLaden": 19402, + "Ġpublicity": 19403, + "ixty": 19404, + "Ġcommitments": 19405, + "Ġaggregate": 19406, + "Ġdisplaying": 19407, + "ĠArrow": 19408, + "Ġ122": 19409, + "Ġrespects": 19410, + "android": 19411, + "six": 19412, + "ĠSha": 19413, + "Ġrestoration": 19414, + ")\\": 19415, + "WS": 19416, + "oys": 19417, + "Ġillustrate": 19418, + "without": 19419, + "126": 19420, + "ĠâĶĤ": 19421, + "Ġpickup": 19422, + "nels": 19423, + "Ġ....": 19424, + "food": 19425, + "ĠFen": 19426, + ")?": 19427, + "Ġphenomena": 19428, + "Ġcompanions": 19429, + "ĠWrite": 19430, + "Ġspill": 19431, + "Ġbridges": 19432, + "ĠUpdated": 19433, + "ĠFo": 19434, + "Ġinsects": 19435, + "ASHINGTON": 19436, + "Ġscare": 19437, + "iltr": 19438, + "ĠZhang": 19439, + "Ġseverity": 19440, + "Ġindul": 19441, + "149": 19442, + "ĠCoffee": 19443, + "Ġnorms": 19444, + "Ġpulse": 19445, + "ĠFT": 19446, + "Ġhorrific": 19447, + "ĠDestroy": 19448, + "ĠJSON": 19449, + "Ġolive": 19450, + "Ġdiscusses": 19451, + "Rest": 19452, + "Elect": 19453, + "ĠWinn": 19454, + "ĠSurviv": 19455, + "ĠHait": 19456, + "Sure": 19457, + "oped": 19458, + "Ġrooted": 19459, + "ĠSke": 19460, + "ĠBronze": 19461, + "Ġlol": 19462, + "Default": 19463, + "Ġcommodity": 19464, + "redited": 19465, + "Ġlibertarian": 19466, + "Ġforbidden": 19467, + "Ġgran": 19468, + "à¨": 19469, + "Ġlag": 19470, + "enz": 19471, + "drive": 19472, + "Ġmathematics": 19473, + "Ġwires": 19474, + "Ġcritically": 19475, + "Ġcarbohyd": 19476, + "ĠChancellor": 19477, + "ĠEddie": 19478, + "Ġbanning": 19479, + "ĠFri": 19480, + "Ġcomplications": 19481, + "etric": 19482, + "ĠBangladesh": 19483, + "Ġbandwidth": 19484, + "Stop": 19485, + "ĠOriginally": 19486, + "Ġhalfway": 19487, + "ynasty": 19488, + "shine": 19489, + "Ġtales": 19490, + "rities": 19491, + "avier": 19492, + "Ġspinning": 19493, + "ĠWHO": 19494, + "Ġneighbourhood": 19495, + "bach": 19496, + "Ġcommerce": 19497, + "ĠSle": 19498, + "BU": 19499, + "Ġentrepreneur": 19500, + "Ġpeculiar": 19501, + "ĠComments": 19502, + "fre": 19503, + "320": 19504, + "ICS": 19505, + "Ġimagery": 19506, + "ĠCanon": 19507, + "ĠElectronic": 19508, + "short": 19509, + "((": 19510, + "Dig": 19511, + "Ġcommem": 19512, + "uced": 19513, + "Ġinclined": 19514, + "ĠSummon": 19515, + "Ġcliff": 19516, + "ĠMediterranean": 19517, + "Ġpoetry": 19518, + "Ġprosperity": 19519, + "ĠRece": 19520, + "Ġpills": 19521, + "member": 19522, + "Ġfinale": 19523, + "unc": 19524, + "ĠGig": 19525, + "ä½": 19526, + "Ġlod": 19527, + "Ġbackward": 19528, + "-+": 19529, + "ĠForward": 19530, + "Ġthri": 19531, + "sure": 19532, + "Ġsoap": 19533, + "ĠFX": 19534, + "RES": 19535, + "ĠSexual": 19536, + "oulos": 19537, + "Ġfoolish": 19538, + "Ġrighteous": 19539, + "Ġcoff": 19540, + "terrorism": 19541, + "ustain": 19542, + "oter": 19543, + "Ġabuses": 19544, + "next": 19545, + "Ġabusive": 19546, + "Ġthereafter": 19547, + "Ġprohibition": 19548, + "ĠSUP": 19549, + "Ġdip": 19550, + "Ġripped": 19551, + "Ġinherited": 19552, + "Ġbats": 19553, + "stru": 19554, + "GT": 19555, + "Ġflawed": 19556, + "phabet": 19557, + "Ġfog": 19558, + "doors": 19559, + "Ġimaging": 19560, + "Ġdigits": 19561, + "ĠHungary": 19562, + "Ġarrog": 19563, + "Ġteachings": 19564, + "Ġprotocols": 19565, + "ĠBanks": 19566, + "à¸": 19567, + "pound": 19568, + "ĠCurt": 19569, + ".\")": 19570, + "./": 19571, + "Ġexemption": 19572, + "endix": 19573, + "ĠMull": 19574, + "Ġimproves": 19575, + "ĠGamer": 19576, + "dimensional": 19577, + "Icon": 19578, + "ĠMargaret": 19579, + "Status": 19580, + "dates": 19581, + "Ġintends": 19582, + "Ġdepict": 19583, + "Ġparked": 19584, + "Joe": 19585, + "ĠMarines": 19586, + "chnology": 19587, + "!).": 19588, + "Ġjudged": 19589, + "Ġweights": 19590, + "Ray": 19591, + "Ġapartments": 19592, + "hester": 19593, + "Ġreinforce": 19594, + "Ġoffender": 19595, + "occup": 19596, + "Ġsore": 19597, + "ept": 19598, + "ĠPHP": 19599, + "ĠBrow": 19600, + "Ġauthorization": 19601, + "ĠRisk": 19602, + "ĠDelaware": 19603, + "ĠQU": 19604, + "Ġnotifications": 19605, + "Ġsunlight": 19606, + "Ġexclude": 19607, + "dat": 19608, + "Ġmesh": 19609, + "ĠSudan": 19610, + "Ġbelonged": 19611, + "Ġsubway": 19612, + "Ġnoon": 19613, + "ĠInterior": 19614, + "olics": 19615, + "ĠLakers": 19616, + "Ġcoding": 19617, + "Disclaimer": 19618, + "Calif": 19619, + "Old": 19620, + "Ġdisl": 19621, + "?????": 19622, + "Ġconfirms": 19623, + "Ġrecruitment": 19624, + "Ġhomicide": 19625, + "Consider": 19626, + "ĠJeffrey": 19627, + "fty": 19628, + "};": 19629, + "Ġobjection": 19630, + "doing": 19631, + "ĠLeo": 19632, + "Want": 19633, + "Ġglow": 19634, + "ĠClarke": 19635, + "ĠNorman": 19636, + "Ġverification": 19637, + "Ġpacket": 19638, + "ĠFormula": 19639, + "Ġplag": 19640, + "esville": 19641, + "Ġshouting": 19642, + "Ġov": 19643, + "ĠREC": 19644, + "ĠBub": 19645, + "Ġninth": 19646, + "Ġenerg": 19647, + "Ġvalidity": 19648, + "Ġups": 19649, + "jack": 19650, + "Ġneighboring": 19651, + "ĠNec": 19652, + "eworks": 19653, + "ĠHab": 19654, + "arez": 19655, + "Ġspine": 19656, + "Ġeventual": 19657, + "ĠLeaders": 19658, + "ĠCarn": 19659, + "Ġprobation": 19660, + "Ġromance": 19661, + "msg": 19662, + "ĠMechanical": 19663, + "ERY": 19664, + "Rock": 19665, + "Ġpartisan": 19666, + "Node": 19667, + "assets": 19668, + "minent": 19669, + "Ġforeigners": 19670, + "Ġtestify": 19671, + "ĠUsually": 19672, + "lords": 19673, + "ĠGren": 19674, + "ĠPowell": 19675, + "BIL": 19676, + "Ġsr": 19677, + "Ġaddict": 19678, + "Ġshells": 19679, + "Ġsigh": 19680, + "ĠYale": 19681, + "ternity": 19682, + "Ġ750": 19683, + "EU": 19684, + "ĠRifle": 19685, + "Ġpatron": 19686, + "ema": 19687, + "ĠBannon": 19688, + "anity": 19689, + "Ġtropical": 19690, + "ĠVII": 19691, + "cross": 19692, + "Everything": 19693, + "ĠISO": 19694, + "Ġhumble": 19695, + "assing": 19696, + "ĠFIG": 19697, + "Ġupdating": 19698, + "yson": 19699, + "Ġcalcium": 19700, + "Ġcompetent": 19701, + "Ġsteering": 19702, + "Prot": 19703, + "ĠSY": 19704, + "ĠFinals": 19705, + "ĠRug": 19706, + "159": 19707, + "137": 19708, + "ĠGolf": 19709, + "Ġ126": 19710, + "Ġaccommodation": 19711, + "ĠHughes": 19712, + "Ġaesthetic": 19713, + "artisan": 19714, + "ĠTwilight": 19715, + "Ġprince": 19716, + "ĠAgriculture": 19717, + "ĠDisco": 19718, + "Ġprecedent": 19719, + "Ġtyping": 19720, + "authorized": 19721, + "Option": 19722, + "ĠAub": 19723, + "lishes": 19724, + "acht": 19725, + "mag": 19726, + "Peter": 19727, + "ĠUFO": 19728, + "monton": 19729, + "ĠLith": 19730, + "Ġarom": 19731, + "Ġsecuring": 19732, + "Ġconfined": 19733, + "private": 19734, + "Ġswords": 19735, + "Ġmarkers": 19736, + "Ġmetabolic": 19737, + "select": 19738, + "ĠCurse": 19739, + "ĠOt": 19740, + "gressive": 19741, + "Ġincumb": 19742, + "ĠSaga": 19743, + "Ġpriced": 19744, + "Ġclearance": 19745, + "Content": 19746, + "Ġdrilling": 19747, + "Ġnotices": 19748, + "Ġbourgeois": 19749, + "Ġvest": 19750, + "Ġcookie": 19751, + "ĠGuardians": 19752, + "rys": 19753, + "inyl": 19754, + "Ġ124": 19755, + "Ġplausible": 19756, + "ongh": 19757, + "ĠOdin": 19758, + "Ġconception": 19759, + "ĠYuk": 19760, + "ĠBaghdad": 19761, + "ĠFlag": 19762, + "Austral": 19763, + "ĠIBM": 19764, + "Ġinternationally": 19765, + "ĠWikiLeaks": 19766, + "IED": 19767, + "Ġcyn": 19768, + "Ġchooses": 19769, + "ĠPill": 19770, + "Ġcombining": 19771, + "Ġradi": 19772, + "ĠMohammed": 19773, + "defense": 19774, + "atching": 19775, + "Subject": 19776, + "iciency": 19777, + "Frame": 19778, + "Ġ{\"": 19779, + "Ġchess": 19780, + "Ġtimer": 19781, + "190": 19782, + "Ġtin": 19783, + "Ġordinance": 19784, + "emetery": 19785, + "Ġaccusing": 19786, + "Ġnoticeable": 19787, + "Ġcentres": 19788, + "Ġlid": 19789, + "ĠMills": 19790, + "imgur": 19791, + "Ġzoom": 19792, + "ergic": 19793, + "Ġcompression": 19794, + "prim": 19795, + "find": 19796, + "Ġsurg": 19797, + "Ġpand": 19798, + "ĠKee": 19799, + "ĠChad": 19800, + "cellence": 19801, + "oyle": 19802, + "Ġsocialism": 19803, + "ĠTravis": 19804, + "ĠMHz": 19805, + "Ġguild": 19806, + "ALLY": 19807, + "ĠSubscribe": 19808, + "ĠRelated": 19809, + "Ġoccurrence": 19810, + "itching": 19811, + "Ġfictional": 19812, + "Ġcrush": 19813, + "ĠEA": 19814, + "cod": 19815, + "mix": 19816, + "ĠTriple": 19817, + "Ġretrieve": 19818, + "Ġstimulus": 19819, + "Ġpsychiat": 19820, + "ĠDoor": 19821, + "Ġhomosexuality": 19822, + "Ġelementary": 19823, + "Ġcellular": 19824, + "idian": 19825, + "ĠLaun": 19826, + "Ġintriguing": 19827, + "Ġfoam": 19828, + "ĠBass": 19829, + "idi": 19830, + "itsu": 19831, + "Ġassure": 19832, + "Ġcongrat": 19833, + "Ġbusinessman": 19834, + "ĠBoost": 19835, + "close": 19836, + "Ġlied": 19837, + "Ġsciences": 19838, + "ĠOmega": 19839, + "ĠGraphics": 19840, + "Ġ<=": 19841, + "spoken": 19842, + "Ġconnectivity": 19843, + "Saturday": 19844, + "ĠAvengers": 19845, + "Ġtoggle": 19846, + "Ġankle": 19847, + "Ġnationalist": 19848, + "model": 19849, + "ĠPool": 19850, + "ophobia": 19851, + "Var": 19852, + "ĠMons": 19853, + "atories": 19854, + "Ġaggressively": 19855, + "Clear": 19856, + "Forge": 19857, + "acters": 19858, + "Ġhedge": 19859, + "Ġpipes": 19860, + "Ġblunt": 19861, + "Ġsq": 19862, + "Ġremotely": 19863, + "Wed": 19864, + "asers": 19865, + "Ġrefriger": 19866, + "Ġtiles": 19867, + "Ġrescued": 19868, + "Ġcomprised": 19869, + "insky": 19870, + "Ġmanif": 19871, + "avanaugh": 19872, + "Ġprolifer": 19873, + "Ġaligned": 19874, + "xml": 19875, + "Ġtriv": 19876, + "Ġcoordination": 19877, + "ĠPER": 19878, + "ĠQuote": 19879, + "134": 19880, + "bf": 19881, + "ĠSaw": 19882, + "Ġtermination": 19883, + "Ġ190": 19884, + "Ġadditions": 19885, + "Ġtrio": 19886, + "Ġprojections": 19887, + "Ġpositively": 19888, + "Ġinclusive": 19889, + "Ġmembr": 19890, + "1990": 19891, + "older": 19892, + "Ġpracticed": 19893, + "inkle": 19894, + "Arch": 19895, + "Ġstarters": 19896, + "arius": 19897, + "Ġintermediate": 19898, + "ĠBenef": 19899, + "ĠKiller": 19900, + "Ġinterventions": 19901, + "ĠKil": 19902, + "ĠFlying": 19903, + "Inv": 19904, + "Ġpremature": 19905, + "Ġpsychiatric": 19906, + "Ġindie": 19907, + "Ġcollar": 19908, + "ĠRainbow": 19909, + "afi": 19910, + "Ġdisruption": 19911, + "ĠFOX": 19912, + "casting": 19913, + "Ġmisdem": 19914, + "cro": 19915, + "Ġwipe": 19916, + "ardon": 19917, + "Ġbast": 19918, + "ĠTommy": 19919, + "ĠRepresentative": 19920, + "Ġbelly": 19921, + "ĠPO": 19922, + "ĠBreitbart": 19923, + "132": 19924, + "Ġmessaging": 19925, + "Should": 19926, + "References": 19927, + "ĠGRE": 19928, + "istical": 19929, + "LP": 19930, + "ĠCav": 19931, + "ĠCrazy": 19932, + "Ġintuitive": 19933, + "keeping": 19934, + "ĠMoss": 19935, + "Ġdiscontin": 19936, + "ĠModule": 19937, + "Ġunrelated": 19938, + "ĠPractice": 19939, + "ĠTransport": 19940, + "Ġstatistically": 19941, + "orns": 19942, + "Ġsized": 19943, + "pu": 19944, + "Ġcaf": 19945, + "ĠWorlds": 19946, + "ĠRodgers": 19947, + "ĠLun": 19948, + "ĠComic": 19949, + "living": 19950, + "Ġcared": 19951, + "Ġclimbed": 19952, + "){": 19953, + "Ġconsisted": 19954, + "Ġmedieval": 19955, + "folk": 19956, + "Ġhacked": 19957, + "Ġdire": 19958, + "ĠHermione": 19959, + "Ġtended": 19960, + "ceans": 19961, + "Daniel": 19962, + "went": 19963, + "Ġlegislators": 19964, + "Ġredes": 19965, + "games": 19966, + "Ġgn": 19967, + "amiliar": 19968, + "Ġ++": 19969, + "ggy": 19970, + "threat": 19971, + "Ġmagnet": 19972, + "Ġperceive": 19973, + "Ġzip": 19974, + "Ġindictment": 19975, + "Ġcritique": 19976, + "gard": 19977, + "ĠSafe": 19978, + "ĠCream": 19979, + "Ġadvent": 19980, + "oba": 19981, + "Ġvowed": 19982, + "ousands": 19983, + "Ġski": 19984, + "Ġabortions": 19985, + "uart": 19986, + "Ġstunned": 19987, + "Ġadvancing": 19988, + "Ġlacked": 19989, + "Ġ\\\"": 19990, + "Ġschizophren": 19991, + "Ġelegant": 19992, + "Ġconferences": 19993, + "Ġcanceled": 19994, + "ĠHudson": 19995, + "ĠHopefully": 19996, + "Ġtrump": 19997, + "Ġfrequencies": 19998, + "Ġmeteor": 19999, + "ĠJunior": 20000, + "ĠFleet": 20001, + "ĠMalcolm": 20002, + "ĠTools": 20003, + "Ġ........": 20004, + "Ġhobby": 20005, + "ĠEuropeans": 20006, + "Ġ1500": 20007, + "ĠInto": 20008, + "Ġsway": 20009, + "ĠAppro": 20010, + "ĠCompl": 20011, + "Community": 20012, + "Ġtide": 20013, + "ĠSummit": 20014, + "ä»": 20015, + "Ġintervals": 20016, + "ĠEther": 20017, + "Ġhabitat": 20018, + "ĠStevens": 20019, + "lishing": 20020, + "ĠDomain": 20021, + "Ġtriggers": 20022, + "Ġchasing": 20023, + "Ġcharm": 20024, + "ĠFlower": 20025, + "itored": 20026, + "Ġblessing": 20027, + "Ġtextures": 20028, + "Five": 20029, + "Ġliquor": 20030, + "RP": 20031, + "FIN": 20032, + "Ġ1962": 20033, + "CAR": 20034, + "Unknown": 20035, + "Ġresil": 20036, + "ĠLily": 20037, + "Ġabundance": 20038, + "Ġpredictable": 20039, + "rar": 20040, + "Ġbullshit": 20041, + "leen": 20042, + "chet": 20043, + "Mor": 20044, + "Much": 20045, + "ä¹": 20046, + "Ġemphasized": 20047, + "Ġcrust": 20048, + "Ġprimitive": 20049, + "Ġenjoyable": 20050, + "ĠPictures": 20051, + "Ġteammate": 20052, + "pler": 20053, + "ĠTol": 20054, + "ĠKane": 20055, + "Ġsummoned": 20056, + "thy": 20057, + "rama": 20058, + "ĠHonda": 20059, + "Ġrealizing": 20060, + "Ġquicker": 20061, + "Ġconcentrate": 20062, + "clear": 20063, + "Ġ210": 20064, + "ĠErdogan": 20065, + "aris": 20066, + "Ġresponds": 20067, + "ĠBI": 20068, + "Ġeligibility": 20069, + "Ġpushes": 20070, + "ĠIdaho": 20071, + "Ġaggrav": 20072, + "Ġruins": 20073, + "urations": 20074, + "Ġbans": 20075, + "Ġanat": 20076, + "share": 20077, + "Ġgrind": 20078, + "hin": 20079, + "umen": 20080, + "Ġutilities": 20081, + "ĠYankees": 20082, + "Ġdatabases": 20083, + "ĠDD": 20084, + "Ġdisplaced": 20085, + "Ġdependencies": 20086, + "Ġstimulation": 20087, + "hun": 20088, + "houses": 20089, + "ĠPretty": 20090, + "ĠRavens": 20091, + "ĠTODAY": 20092, + "Ġassociates": 20093, + "Ġtherape": 20094, + "cled": 20095, + "Ġdeer": 20096, + "Ġrepairs": 20097, + "rentice": 20098, + "Ġreceptors": 20099, + "Ġremed": 20100, + "ĠCe": 20101, + "Ġmarriages": 20102, + "Ġballots": 20103, + "ĠSoldier": 20104, + "Ġhilarious": 20105, + "opl": 20106, + "138": 20107, + "Ġinherently": 20108, + "Ġignorant": 20109, + "Ġbounce": 20110, + "ĠEaster": 20111, + "RELATED": 20112, + "ĠCurrency": 20113, + "EV": 20114, + "ãĥŀ": 20115, + "ĠLead": 20116, + "Ġdeceased": 20117, + "Brien": 20118, + "ĠMusk": 20119, + "JS": 20120, + "Ġmerge": 20121, + "hearted": 20122, + "creat": 20123, + "mitt": 20124, + "mund": 20125, + "ĠâĢĭ": 20126, + "ĠBag": 20127, + "Ġprojection": 20128, + "Ġjava": 20129, + "ĠStandards": 20130, + "ĠLeonard": 20131, + "Ġcoconut": 20132, + "ĠPopulation": 20133, + "Ġtraject": 20134, + "Ġimply": 20135, + "Ġcuriosity": 20136, + "ĠDB": 20137, + "ĠFresh": 20138, + "ĠPor": 20139, + "Ġheavier": 20140, + "neys": 20141, + "gomery": 20142, + "Ġdeserved": 20143, + "Ġphrases": 20144, + "ĠGC": 20145, + "Ġyeast": 20146, + "desc": 20147, + "Death": 20148, + "Ġreboot": 20149, + "Ġmetadata": 20150, + "ICAL": 20151, + "Ġrepay": 20152, + "ĠIndependence": 20153, + "Ġsuburban": 20154, + "icals": 20155, + "Ġatop": 20156, + "Ġallocation": 20157, + "generation": 20158, + "ĠGram": 20159, + "Ġmoisture": 20160, + "Ġpine": 20161, + "ĠLiberals": 20162, + "Ġaides": 20163, + "Ġunderest": 20164, + "ĠBerry": 20165, + "Ġceremon": 20166, + "370": 20167, + "astrous": 20168, + "ĠPirates": 20169, + "Ġtense": 20170, + "ĠIndustries": 20171, + "ĠAppeals": 20172, + "ĠNear": 20173, + "Ġè£ıç": 20174, + "Ġlovers": 20175, + "ĠCAP": 20176, + "ĠCraw": 20177, + "Ġgiants": 20178, + "Ġefficacy": 20179, + "Element": 20180, + "ĠBehavior": 20181, + "ĠToyota": 20182, + "Ġintest": 20183, + "Priv": 20184, + "AI": 20185, + "Ġmaneuver": 20186, + "Ġperfection": 20187, + "Ġbang": 20188, + "paper": 20189, + "rill": 20190, + "George": 20191, + "border": 20192, + "inters": 20193, + "ĠSeth": 20194, + "Ġclues": 20195, + "ĠLevi": 20196, + "ĠRevenue": 20197, + "147": 20198, + "Ġvapor": 20199, + "Ġfortunate": 20200, + "Ġthreatens": 20201, + "Ġvet": 20202, + "Ġdependency": 20203, + "ersed": 20204, + "article": 20205, + "ĠBlizzard": 20206, + "Ġchlor": 20207, + "Ġminus": 20208, + "ĠBills": 20209, + "Ġcryptocurrency": 20210, + "Ġmetabolism": 20211, + "tering": 20212, + "Ġpestic": 20213, + "steps": 20214, + "ĠTreasure": 20215, + "racted": 20216, + "ĠConstant": 20217, + "Ġtemp": 20218, + "139": 20219, + "ĠDetective": 20220, + "urally": 20221, + "Ġrecovering": 20222, + "Ġcortex": 20223, + "Ġ144": 20224, + "closed": 20225, + "Ġprejudice": 20226, + "aunted": 20227, + "Ġstorms": 20228, + "ĠNOW": 20229, + "Ġmachinery": 20230, + "Address": 20231, + "Ġcompelled": 20232, + "270": 20233, + "Ġdespair": 20234, + "bane": 20235, + "Ġvegetable": 20236, + "Ġbeds": 20237, + "Learn": 20238, + "Ġcolorful": 20239, + "Ġspike": 20240, + "Ġmargins": 20241, + "Ġsympathy": 20242, + "Ġworkshop": 20243, + "ĠCBC": 20244, + "Sat": 20245, + "Ġburns": 20246, + "ĠGender": 20247, + "Ġ129": 20248, + "ĠCable": 20249, + "Ġdebts": 20250, + "ĠTheresa": 20251, + "Ġreflecting": 20252, + "Ġairst": 20253, + "Ġrim": 20254, + "ramid": 20255, + "Ġweaknesses": 20256, + "Writ": 20257, + "oggle": 20258, + "ti": 20259, + "ĠCharge": 20260, + "Ġweighed": 20261, + "Ġ(.": 20262, + "Ġlaughter": 20263, + "Ġrouter": 20264, + "ĠDemocracy": 20265, + "Dear": 20266, + "Ġhasht": 20267, + "Ġdy": 20268, + "Ġhints": 20269, + "running": 20270, + "Ġfinishes": 20271, + "arus": 20272, + "Mass": 20273, + "result": 20274, + "ascus": 20275, + "Ġvintage": 20276, + "Ġconqu": 20277, + "Ġwildly": 20278, + "acist": 20279, + "Ġlingu": 20280, + "Ġprotagonist": 20281, + "strom": 20282, + "teenth": 20283, + "ĠSolo": 20284, + "mac": 20285, + "filled": 20286, + "Ġrenown": 20287, + "itives": 20288, + "Ġmotive": 20289, + "ĠAntar": 20290, + "ĠMann": 20291, + "ĠAdjust": 20292, + "Ġrockets": 20293, + "Ġtroubling": 20294, + "ei": 20295, + "Ġorganisms": 20296, + "assis": 20297, + "Christian": 20298, + "Ġ145": 20299, + "ĠHass": 20300, + "Ġswall": 20301, + "Ġwax": 20302, + "ĠSurvival": 20303, + "VS": 20304, + "ĠMurd": 20305, + "vd": 20306, + "standard": 20307, + "Ġdragons": 20308, + "Ġacceleration": 20309, + "rational": 20310, + "final": 20311, + "Ġpaired": 20312, + "ĠEthereum": 20313, + "Ġinterfaces": 20314, + "Ġresent": 20315, + "Ġartifacts": 20316, + "Å«": 20317, + "arel": 20318, + "Ġcompetitor": 20319, + "ĠNicholas": 20320, + "ĠSurface": 20321, + "cpp": 20322, + "ĠTot": 20323, + "Ġeconomically": 20324, + "Ġorganised": 20325, + "Ġenforced": 20326, + "inho": 20327, + "Ġvarieties": 20328, + "Ġabdom": 20329, + "ĠBailey": 20330, + "idav": 20331, + "ĠSalv": 20332, + "paid": 20333, + "Ġaltitude": 20334, + "essert": 20335, + "ĠGutenberg": 20336, + "area": 20337, + "opoulos": 20338, + "Ġprofessors": 20339, + "iggs": 20340, + "ĠFate": 20341, + "hey": 20342, + "Ġ3000": 20343, + "Dist": 20344, + "Ġtwins": 20345, + "cill": 20346, + "ĠMaps": 20347, + "Ġtraps": 20348, + "Ġweed": 20349, + "ĠKiss": 20350, + "Ġyoga": 20351, + "Ġrecipients": 20352, + "ĠWestminster": 20353, + "Ġpools": 20354, + "ĠWalmart": 20355, + "188": 20356, + "ĠSchools": 20357, + "attack": 20358, + "ĠARM": 20359, + "paragraph": 20360, + "Warning": 20361, + "jl": 20362, + "Ġselfish": 20363, + "anchez": 20364, + "ĠHeights": 20365, + "Fre": 20366, + "ĠSoph": 20367, + "Ġ--------------------------------": 20368, + "tml": 20369, + "333": 20370, + "Ġraids": 20371, + "Ġsatellites": 20372, + "KEY": 20373, + "Ġlasts": 20374, + "ÑĤ": 20375, + "Ins": 20376, + "ĠDame": 20377, + "Ġunpredict": 20378, + "///": 20379, + "ghai": 20380, + "Ġartillery": 20381, + "Ġcruise": 20382, + "Ġgel": 20383, + "ĠCabinet": 20384, + "Ġblows": 20385, + "ĠEsp": 20386, + "Ġproximity": 20387, + "othe": 20388, + "ĠSkills": 20389, + "ĠUpper": 20390, + "obo": 20391, + "ĠNDP": 20392, + "Ġenjoys": 20393, + "Ġrepeating": 20394, + "ĠConstruction": 20395, + "ĠQuestions": 20396, + "Hillary": 20397, + "Ġuint": 20398, + "Ġprocessors": 20399, + "ĠGibson": 20400, + "ĠMultiple": 20401, + "qa": 20402, + "ĠBom": 20403, + "ĠMiles": 20404, + "ventional": 20405, + "Ġhurts": 20406, + "skin": 20407, + "ĠAIDS": 20408, + "Ġadvisers": 20409, + "ĠRoot": 20410, + "Ġmethodology": 20411, + "ĠDale": 20412, + "Ġdeton": 20413, + "ĠKnowledge": 20414, + "sequently": 20415, + "Ġ121": 20416, + "Ġconnects": 20417, + "Cy": 20418, + "ĠDanger": 20419, + "Ġcontributors": 20420, + "ĠBent": 20421, + "Ġbrass": 20422, + "ĠGuns": 20423, + "into": 20424, + "ĠFortune": 20425, + "Ġbroker": 20426, + "balance": 20427, + "Ġlengths": 20428, + "Ġvic": 20429, + "Ġaveraging": 20430, + "Ġappropriately": 20431, + "ĠCamera": 20432, + "Ġsandwich": 20433, + "ĠCDC": 20434, + "Ġcoordinate": 20435, + "Ġnavig": 20436, + "Ġgoodness": 20437, + "laim": 20438, + "Ġbrake": 20439, + "Ġextremist": 20440, + "ĠWake": 20441, + "ĠMend": 20442, + "ĠTiny": 20443, + "ĠCOL": 20444, + "ĠRF": 20445, + "ĠDual": 20446, + "ĠWine": 20447, + "Case": 20448, + "Ġrefined": 20449, + "Ġlamp": 20450, + "Lead": 20451, + "Ġbapt": 20452, + "ĠCarb": 20453, + "ĠSadd": 20454, + "ĠMinneapolis": 20455, + "PDF": 20456, + "Early": 20457, + "ĠHidden": 20458, + "Its": 20459, + "ĠTIME": 20460, + "Ġpap": 20461, + "Ġcommissioned": 20462, + "ĠFew": 20463, + "ĠColts": 20464, + "ĠBren": 20465, + "Ġbothered": 20466, + "Ġlikewise": 20467, + "Exper": 20468, + "ĠSchw": 20469, + "cry": 20470, + "nn": 20471, + "ĠMitch": 20472, + "imon": 20473, + "MG": 20474, + "bm": 20475, + "UMP": 20476, + "rays": 20477, + "Ġregistry": 20478, + "Ġ270": 20479, + "achine": 20480, + "rella": 20481, + "anting": 20482, + "00000": 20483, + "Ġruined": 20484, + "spot": 20485, + "Ġta": 20486, + "Ġmaximize": 20487, + "Ġinconven": 20488, + "Dead": 20489, + "Human": 20490, + "Enabled": 20491, + "ĠMarie": 20492, + "Ġchill": 20493, + "ĠParadise": 20494, + "Ġstarring": 20495, + "ĠLatino": 20496, + "ĠProtocol": 20497, + "ĠEVER": 20498, + "Ġsuppliers": 20499, + "message": 20500, + "ĠBrock": 20501, + "Ġserum": 20502, + "âĸĪâĸĪâĸĪâĸĪ": 20503, + "Ġencomp": 20504, + "Ġambition": 20505, + "uese": 20506, + "Ġarrows": 20507, + "Andrew": 20508, + "Ġantenna": 20509, + "Ġ1961": 20510, + "ĠBark": 20511, + "Ġbool": 20512, + "ãĤª": 20513, + "ĠStorage": 20514, + "Ġrailway": 20515, + "Ġtougher": 20516, + "ĠCad": 20517, + "Ġwashing": 20518, + "Py": 20519, + "']": 20520, + "embed": 20521, + "ĠMemphis": 20522, + "ackle": 20523, + "Ġfamously": 20524, + "ĠFortunately": 20525, + "ovies": 20526, + "Ġmindset": 20527, + "Ġsneak": 20528, + "ĠDh": 20529, + "RAW": 20530, + "ĠSimpson": 20531, + "Ġlivest": 20532, + "Ġlandmark": 20533, + "Ġcement": 20534, + "Low": 20535, + "Ġthrilled": 20536, + "ĠCourse": 20537, + "inel": 20538, + "Ġchuck": 20539, + "idate": 20540, + "global": 20541, + "Ġwhit": 20542, + "Ġ�": 20543, + "adays": 20544, + "ski": 20545, + "ĠSV": 20546, + "Ġviruses": 20547, + "306": 20548, + "ĠRespons": 20549, + "Ġtheaters": 20550, + "ĠBranch": 20551, + "ĠGeneva": 20552, + "ĠMK": 20553, + "Ġunbeliev": 20554, + "Ġcommunist": 20555, + "Original": 20556, + "ĠReceived": 20557, + "ĠTransfer": 20558, + "ĠArg": 20559, + "Input": 20560, + "ĠStrategy": 20561, + "Ġpalace": 20562, + "thening": 20563, + "Dri": 20564, + "Ġsentencing": 20565, + "umbnail": 20566, + "Ġpins": 20567, + "recy": 20568, + "Ġsiblings": 20569, + "Getting": 20570, + "ĠBU": 20571, + "ĠNorthwest": 20572, + "Ġprolonged": 20573, + "ĠSakura": 20574, + "Comb": 20575, + "ĠBour": 20576, + "Ġinadequate": 20577, + "ĠKash": 20578, + "Ġusername": 20579, + "ĠImprove": 20580, + "Ġbattling": 20581, + "ĠMAC": 20582, + "Ġcurriculum": 20583, + "Ġsoda": 20584, + "ĠCannon": 20585, + "Ġsensible": 20586, + "spons": 20587, + "December": 20588, + "Ġwicked": 20589, + "ĠPengu": 20590, + "Ġdictators": 20591, + "ĠHearts": 20592, + "ogyn": 20593, + "Ġsimilarities": 20594, + "ĠStats": 20595, + "Ġhollow": 20596, + "itations": 20597, + "\":[": 20598, + "Ġhover": 20599, + "ĠListen": 20600, + "sch": 20601, + "Sund": 20602, + "Ġcad": 20603, + "ĠParks": 20604, + "Ġlur": 20605, + "Ġhype": 20606, + "ĠLem": 20607, + "NAME": 20608, + "isure": 20609, + "Friday": 20610, + "Ġshoots": 20611, + "Ġcloses": 20612, + "Ġdb": 20613, + "ĠRidge": 20614, + "ĠDifferent": 20615, + "Ġreplies": 20616, + "ĠBroadway": 20617, + "opers": 20618, + "Ġintoler": 20619, + "ĠZeus": 20620, + "akespe": 20621, + "Ġproprietary": 20622, + "Ġrequesting": 20623, + "Ġcontrollers": 20624, + "ĠMIN": 20625, + "imedia": 20626, + "becca": 20627, + "Ġexpans": 20628, + "Ġoils": 20629, + "Bot": 20630, + "ĠChand": 20631, + "Ġprinter": 20632, + "Ġtopped": 20633, + "ĠPOL": 20634, + "ĠEarlier": 20635, + "Social": 20636, + "avin": 20637, + "Ġdecreases": 20638, + "ĠSeb": 20639, + "Ġspecifications": 20640, + "ĠBlast": 20641, + "ĠKurt": 20642, + "Ġfreel": 20643, + "Brown": 20644, + "Ġdilig": 20645, + "roe": 20646, + "ĠProblem": 20647, + "ĠQuad": 20648, + "Ġdecentral": 20649, + "ĠVector": 20650, + "anut": 20651, + "Ġplugins": 20652, + "ĠGregory": 20653, + "Ġfucked": 20654, + "elines": 20655, + "ĠAmbassador": 20656, + "take": 20657, + "Ġcleans": 20658, + "ongyang": 20659, + "Anonymous": 20660, + "stro": 20661, + "\"}": 20662, + "aline": 20663, + "ĠOdd": 20664, + "ĠEug": 20665, + "216": 20666, + "Ġboil": 20667, + "ĠPowers": 20668, + "Ġnurses": 20669, + "Obviously": 20670, + "ĠTechnical": 20671, + "Ġexceeded": 20672, + "ORS": 20673, + "Ġextremists": 20674, + "Ġtraces": 20675, + "expl": 20676, + "Ġcomr": 20677, + "ĠSach": 20678, + ")/": 20679, + "Ġmasks": 20680, + "Ġsci": 20681, + "Bon": 20682, + "Ġregression": 20683, + "wegian": 20684, + "Ġadvisor": 20685, + "itures": 20686, + "ĠVo": 20687, + "example": 20688, + "ĠInstruct": 20689, + "Ġsiege": 20690, + "Ġreductions": 20691, + "ptr": 20692, + "Ġstatutory": 20693, + "Ġremoves": 20694, + "Ġpuck": 20695, + "redits": 20696, + "Ġbee": 20697, + "Ġsalad": 20698, + "Ġpromotions": 20699, + "ĠJoshua": 20700, + "withstanding": 20701, + "ETH": 20702, + "ĠCha": 20703, + "imus": 20704, + "Ġexpenditure": 20705, + "aunting": 20706, + "Ġdelighted": 20707, + "Ġ155": 20708, + "beh": 20709, + "Ġcarpet": 20710, + "ĠSpart": 20711, + "Ġjungle": 20712, + "lists": 20713, + "Ġbullying": 20714, + "ĠNobel": 20715, + "ĠGlen": 20716, + "Ġreferenced": 20717, + "Ġintroduces": 20718, + "sein": 20719, + "Ġchopped": 20720, + "glass": 20721, + "ĠWrest": 20722, + "Ġneutrality": 20723, + "ĠâĻ": 20724, + "Ġinvestigator": 20725, + "Ġshelves": 20726, + "Ġunconstitutional": 20727, + "Ġreproduction": 20728, + "Ġmerchant": 20729, + "mia": 20730, + "Ġmetrics": 20731, + "Ġexplosives": 20732, + "ĠSonia": 20733, + "Ġbodily": 20734, + "Ġthickness": 20735, + "Ġpredominantly": 20736, + "ĠAbility": 20737, + "Ġmonitored": 20738, + "ICH": 20739, + "Ġ].": 20740, + "ĠMartinez": 20741, + "Ġvisibility": 20742, + "Ġqueries": 20743, + "Ġgenocide": 20744, + "ĠWarfare": 20745, + "Query": 20746, + "Ġstudios": 20747, + "Ġembry": 20748, + "Ġcorridor": 20749, + "Ġcleaned": 20750, + "complete": 20751, + "ĠMH": 20752, + "Ġenrollment": 20753, + "INGS": 20754, + "Ġimpacted": 20755, + "Ġdisastrous": 20756, + "ĠYun": 20757, + "ĠClaire": 20758, + "ĠBasically": 20759, + "yt": 20760, + "usterity": 20761, + "Ġindirectly": 20762, + "wik": 20763, + "Ġdod": 20764, + "ĠCarr": 20765, + "Ġamp": 20766, + "Ġprohibit": 20767, + "ĠInitial": 20768, + "ĠRd": 20769, + "iji": 20770, + "Ġeducate": 20771, + "corn": 20772, + "iott": 20773, + "ĠBeauty": 20774, + "Ġdetective": 20775, + "ĠConn": 20776, + "since": 20777, + "Ġstagger": 20778, + "Ġobese": 20779, + "Ġbree": 20780, + "ologic": 20781, + "isse": 20782, + "walker": 20783, + "Ġblades": 20784, + "Ġlawful": 20785, + "func": 20786, + "ĠBehind": 20787, + "Ġappetite": 20788, + "Ġ(*": 20789, + "Ġtennis": 20790, + "Ġoffspring": 20791, + "Ġjets": 20792, + "Ġstructured": 20793, + "Ġaforementioned": 20794, + "Nov": 20795, + "Ġscaling": 20796, + "fill": 20797, + "Ġstew": 20798, + "Ġcurb": 20799, + "ĠStephan": 20800, + "edIn": 20801, + "SF": 20802, + "obic": 20803, + "éŃĶ": 20804, + "oug": 20805, + "ĠMM": 20806, + "Ġgenetically": 20807, + "opez": 20808, + "136": 20809, + "Ġumb": 20810, + "ancers": 20811, + "Ġcohort": 20812, + "Ġmerchandise": 20813, + "Ġimposing": 20814, + "ĠLegislature": 20815, + "ĠArchive": 20816, + "ivia": 20817, + "ĠNaval": 20818, + "Ġoffences": 20819, + "Ġmiracle": 20820, + "Ġsnapped": 20821, + "Ġfoes": 20822, + "Ġextensively": 20823, + "ĠRaf": 20824, + "Ġcater": 20825, + "edience": 20826, + "Kit": 20827, + "ĠBin": 20828, + "Ġrecommends": 20829, + "ĠCities": 20830, + "Ġrigid": 20831, + "ĠREAD": 20832, + "ĠNoble": 20833, + "ĠTian": 20834, + "Ġcertificates": 20835, + "antis": 20836, + "oiler": 20837, + "ĠBuddhist": 20838, + "did": 20839, + "Ġsurveyed": 20840, + "Ġdownward": 20841, + "Ġprints": 20842, + "ĠMotion": 20843, + "ronics": 20844, + "ĠSans": 20845, + "ossibly": 20846, + "uctions": 20847, + "Ġcolonies": 20848, + "ĠDanish": 20849, + "unit": 20850, + "Ġspoil": 20851, + "Ġadvisory": 20852, + "berries": 20853, + "Plan": 20854, + "Ġspecification": 20855, + "ophers": 20856, + "ĠResource": 20857, + "Ġshirts": 20858, + "prisingly": 20859, + "communications": 20860, + "Ġtrivial": 20861, + "Ġmentioning": 20862, + "isexual": 20863, + "Ġsupplements": 20864, + "Ġsupervision": 20865, + "BP": 20866, + "vor": 20867, + "Ġwit": 20868, + "Ġcooldown": 20869, + "Ġplaintiff": 20870, + "ĠReviews": 20871, + "ĠSri": 20872, + "ĠMint": 20873, + "ĠSugar": 20874, + "Ġafterward": 20875, + "ĠPriest": 20876, + "ĠInvestment": 20877, + "ogene": 20878, + "ĠTaking": 20879, + "Ġstretching": 20880, + "Ġinflammation": 20881, + "ĠTehran": 20882, + "Ġlining": 20883, + "Ġfreezing": 20884, + "ĠEntity": 20885, + "Ġinspiring": 20886, + "special": 20887, + "price": 20888, + "Ġsue": 20889, + "ĠPorter": 20890, + "ounge": 20891, + "ETA": 20892, + "ĠDerek": 20893, + "ĠLuis": 20894, + "uo": 20895, + "ymph": 20896, + "Ġexterior": 20897, + "ihil": 20898, + "ĠAshley": 20899, + "inator": 20900, + "Ġnutrients": 20901, + "ĠThrones": 20902, + "Ġfinances": 20903, + "ĠInspect": 20904, + "Ġspecially": 20905, + "ĠRequired": 20906, + "ĠPTS": 20907, + "ĠViolence": 20908, + "ointed": 20909, + "shots": 20910, + "Ġexcerpt": 20911, + "coon": 20912, + "INS": 20913, + "ĠGri": 20914, + "Ġrecognised": 20915, + "Week": 20916, + "Young": 20917, + "Ġvom": 20918, + "isle": 20919, + "ĠCurry": 20920, + "ĠBuddh": 20921, + "Ġnotebook": 20922, + "Ġdurable": 20923, + "/?": 20924, + "ĠGad": 20925, + "ĠPupp": 20926, + "Ġforgive": 20927, + "park": 20928, + "Ġpersonalities": 20929, + "analysis": 20930, + "clamation": 20931, + "Ġelevator": 20932, + "Ġwarehouse": 20933, + "ĠRole": 20934, + "unn": 20935, + "Ġillustration": 20936, + "ĠScan": 20937, + "Ġatmospheric": 20938, + "Import": 20939, + "ANC": 20940, + "ricted": 20941, + "fu": 20942, + "010": 20943, + "Ġarche": 20944, + "Ġrewarded": 20945, + "akespeare": 20946, + "Ġinternally": 20947, + "ĠRBI": 20948, + "alker": 20949, + "Ġelephant": 20950, + "owitz": 20951, + "ĠPizza": 20952, + "Ġbipartisan": 20953, + "és": 20954, + "Ġslowed": 20955, + "ĠStark": 20956, + "Ġoverride": 20957, + "OUS": 20958, + "Ġ320": 20959, + "undreds": 20960, + "ĠDeck": 20961, + "ĠCensus": 20962, + "bee": 20963, + "146": 20964, + "otor": 20965, + "Ġip": 20966, + "Ġub": 20967, + "ocations": 20968, + "ĠButton": 20969, + "rice": 20970, + "Ġcripp": 20971, + "fff": 20972, + "Ġoriginated": 20973, + "Ġoverwhelmed": 20974, + "appa": 20975, + "Ġforemost": 20976, + "âĢij": 20977, + "ĠLEG": 20978, + "release": 20979, + "eatured": 20980, + "atches": 20981, + "Ġreps": 20982, + "Ġlending": 20983, + "ĠReference": 20984, + "ĠClient": 20985, + "165": 20986, + "venth": 20987, + "Complete": 20988, + "ĠPatrol": 20989, + "Ġsworn": 20990, + "cam": 20991, + "Ġshuttle": 20992, + "ĠRalph": 20993, + "Ġhometown": 20994, + "-,": 20995, + "onal": 20996, + "ĠBP": 20997, + "åı": 20998, + "Ġpersuade": 20999, + "ĠAlexand": 21000, + "Ġcombines": 21001, + "Ġvivid": 21002, + "ĠLag": 21003, + "Ġencoding": 21004, + "Ġsalvation": 21005, + "wen": 21006, + "ĠRecovery": 21007, + "iya": 21008, + "University": 21009, + "ĠBiden": 21010, + "Ġbudgets": 21011, + "ĠTexans": 21012, + "fits": 21013, + "Ġhonored": 21014, + "Ġpython": 21015, + "TD": 21016, + "###": 21017, + "clone": 21018, + "Ġblink": 21019, + "ĠLiquid": 21020, + "Ġunemployed": 21021, + "Ġclashes": 21022, + "ĠCounsel": 21023, + "Ġdirecting": 21024, + "Ġpunct": 21025, + "ĠFalcons": 21026, + "Ġshark": 21027, + "ĠDamascus": 21028, + "Ġjeans": 21029, + "Ġembark": 21030, + "Ġseize": 21031, + "Ġupwards": 21032, + "280": 21033, + "ĠEz": 21034, + "ĠAnything": 21035, + "Ġexotic": 21036, + "lower": 21037, + "ĠCreator": 21038, + "ĠUm": 21039, + "Ġsuburbs": 21040, + "berger": 21041, + "ĠWend": 21042, + "Ġmint": 21043, + "ĠXX": 21044, + "ĠDro": 21045, + "Ġsuffers": 21046, + "Ġherb": 21047, + "tree": 21048, + "Ġfragile": 21049, + "Ġflooded": 21050, + "ĠAlcohol": 21051, + "olean": 21052, + "nyder": 21053, + "ĠKO": 21054, + "Fram": 21055, + "Ġ136": 21056, + "Ġowed": 21057, + "ĠMelee": 21058, + "ĠHash": 21059, + "Ġwhisk": 21060, + "Ġsudo": 21061, + "rr": 21062, + "Quick": 21063, + "appro": 21064, + "Ġii": 21065, + "ĠExamples": 21066, + "hee": 21067, + "Ġpromotes": 21068, + "perature": 21069, + "kar": 21070, + "ĠHonor": 21071, + "Ġsodium": 21072, + "ĠLif": 21073, + "rosso": 21074, + "intendent": 21075, + "Ġcorrespondent": 21076, + "Found": 21077, + "secret": 21078, + "Ġidentifies": 21079, + "agne": 21080, + "Ġlou": 21081, + "ĠPP": 21082, + "Ġcoincidence": 21083, + "move": 21084, + "Ġmilitia": 21085, + "Ġinfiltr": 21086, + "ĠPrimary": 21087, + "Ġpitching": 21088, + "ĠIb": 21089, + "ĠGOOD": 21090, + "ãĤ¸": 21091, + "ĠWizards": 21092, + "iral": 21093, + "ĠVenus": 21094, + "RR": 21095, + "ĠâĢķ": 21096, + "ĠCasey": 21097, + "Ġsadly": 21098, + "Ġadmire": 21099, + "Ġembarrassed": 21100, + "cb": 21101, + "Mel": 21102, + "Ġtubes": 21103, + "Ġbeautifully": 21104, + "ĠQueensland": 21105, + "Below": 21106, + "rez": 21107, + "quet": 21108, + "pleasant": 21109, + "Ġ«": 21110, + "Camp": 21111, + "Ġdecisive": 21112, + "1998": 21113, + "ĠLamb": 21114, + "utton": 21115, + "hn": 21116, + "ĠJagu": 21117, + "aunder": 21118, + "ĠCord": 21119, + "Ġclerk": 21120, + "Ġcaffe": 21121, + "Ġwiped": 21122, + "Ġreim": 21123, + "ĠMountains": 21124, + "Ġimprisoned": 21125, + "Ġdevelops": 21126, + "ĠPra": 21127, + "Ġmodeling": 21128, + "Anyone": 21129, + "ancel": 21130, + "ĠSit": 21131, + "Ġshields": 21132, + "Ġlawn": 21133, + "Ġcardiovascular": 21134, + "Ġdemonstrating": 21135, + "Ġparse": 21136, + "ĠIsraelis": 21137, + "Ġeuros": 21138, + "143": 21139, + "Ġglorious": 21140, + "inski": 21141, + "ecd": 21142, + "Ġconditioning": 21143, + "Ġhelpless": 21144, + "Ġmicrosc": 21145, + "ĠHarbor": 21146, + "Ġstakes": 21147, + "Ġ260": 21148, + "Ġunequ": 21149, + "ĠFloyd": 21150, + "Ġdamp": 21151, + "Ġapparatus": 21152, + "ĠLaws": 21153, + "Ġcounters": 21154, + "Ġinduce": 21155, + "atable": 21156, + "ĠAhmed": 21157, + "Ġslam": 21158, + "November": 21159, + "Ġpersist": 21160, + "Ġimminent": 21161, + "án": 21162, + "Ġshred": 21163, + "Ġphases": 21164, + "ĠEdmonton": 21165, + "ĠArmstrong": 21166, + "ĠMeet": 21167, + "ĠKitty": 21168, + "ÑĢ": 21169, + "circ": 21170, + "ĠAdult": 21171, + "Ġarose": 21172, + "ĠXen": 21173, + "Dan": 21174, + "gow": 21175, + "Ġsuperf": 21176, + "ĠAdmir": 21177, + "Ġendure": 21178, + "Ġkeyword": 21179, + "yrus": 21180, + "Ġyarn": 21181, + "Ġpathway": 21182, + "ĠHopkins": 21183, + "midt": 21184, + "Ġcensorship": 21185, + "dependent": 21186, + "Ġinstructor": 21187, + "Sources": 21188, + "Ġtoe": 21189, + "Ġballoon": 21190, + "Nob": 21191, + "Ġswear": 21192, + "ĠCastro": 21193, + "Ġgloss": 21194, + "ĠKavanaugh": 21195, + "Ġremarkably": 21196, + "Photos": 21197, + "ĠNom": 21198, + "ĠSoutheast": 21199, + "yers": 21200, + "Ġvalidation": 21201, + "Ġcannon": 21202, + "ĠVictory": 21203, + "ĠPierre": 21204, + "Ġcautious": 21205, + "Audio": 21206, + "Ġfetch": 21207, + "ĠGift": 21208, + "ĠHyp": 21209, + "Ġremedy": 21210, + "ZE": 21211, + "Ġscent": 21212, + "Ġbeard": 21213, + "ĠRut": 21214, + "-\"": 21215, + "Ġpatents": 21216, + "Hy": 21217, + "Ġunjust": 21218, + "Ġpotato": 21219, + "Ġforthcoming": 21220, + "Ġchef": 21221, + "ĠRift": 21222, + "affe": 21223, + "ĠROM": 21224, + "ĠLaunch": 21225, + "Ġpads": 21226, + "ĠNeo": 21227, + "Ġonset": 21228, + "Ġsqueeze": 21229, + "safe": 21230, + "Ġprefix": 21231, + "ĠTM": 21232, + "ĠNearly": 21233, + "ĠClinical": 21234, + "ĠMental": 21235, + "otiation": 21236, + "ĠUnic": 21237, + "antry": 21238, + "ĠCir": 21239, + "Ġepit": 21240, + "æ": 21241, + "Ġextracted": 21242, + "versely": 21243, + "riad": 21244, + "Ġstrains": 21245, + "Ġtops": 21246, + "Ġpoem": 21247, + "ĠRandy": 21248, + "ĠMaple": 21249, + "THER": 21250, + "upiter": 21251, + "ĠSSD": 21252, + "ļé": 21253, + "Ġuncon": 21254, + "pering": 21255, + "Ġslept": 21256, + "iners": 21257, + "Ġunderwater": 21258, + "ĠEvidence": 21259, + "gone": 21260, + "205": 21261, + "Ġhistorians": 21262, + "Ġsynthesis": 21263, + "Ġfrog": 21264, + "basketball": 21265, + "Ġvibrant": 21266, + "Ġsubord": 21267, + "Ġ365": 21268, + "ĠDial": 21269, + "Ġcooperate": 21270, + "HAHA": 21271, + "Ġgreeted": 21272, + "158": 21273, + "Ġjazz": 21274, + "Ġintox": 21275, + "ĠWalking": 21276, + "Ġsupervisor": 21277, + "ĠFusion": 21278, + "ĠMercedes": 21279, + "send": 21280, + "Ham": 21281, + "sd": 21282, + "nl": 21283, + "Ġtours": 21284, + "ĠFIFA": 21285, + "Ġculp": 21286, + "gd": 21287, + "304": 21288, + "Ġpleas": 21289, + "Ġillustrates": 21290, + "ĠColombia": 21291, + "Ġhighlighting": 21292, + "ĠSummary": 21293, + "Ġexposing": 21294, + "ĠDru": 21295, + "Ġirony": 21296, + "ritional": 21297, + "ĠCarroll": 21298, + "ĠEllis": 21299, + "Pict": 21300, + "ĠRapt": 21301, + "Ġadapter": 21302, + "Ġunm": 21303, + "Ġcorpse": 21304, + "Ġcelebrities": 21305, + "Den": 21306, + "atum": 21307, + "ĠApocalypse": 21308, + "ĠWag": 21309, + "lining": 21310, + "Ġhormones": 21311, + "Rub": 21312, + "ĠXi": 21313, + "ĠVaults": 21314, + "208": 21315, + "alkyrie": 21316, + "inosaur": 21317, + "Ġfeeds": 21318, + "vity": 21319, + "Ġdefeating": 21320, + "Wait": 21321, + "Ġemphasize": 21322, + "ĠSteelers": 21323, + "yrinth": 21324, + "leys": 21325, + "ĠWhenever": 21326, + "Currently": 21327, + "ĠClock": 21328, + "Ġcollectively": 21329, + "anyon": 21330, + "ĠJP": 21331, + "Ġmentality": 21332, + "Ġdownloads": 21333, + "Ġsurroundings": 21334, + "ĠBarnes": 21335, + "Ġflagship": 21336, + "Ġindicators": 21337, + "Ġgrapp": 21338, + "January": 21339, + "ĠElemental": 21340, + "ĠAthena": 21341, + "ibal": 21342, + "Ġsights": 21343, + "Ġcapita": 21344, + "ĠTreaty": 21345, + "Ġvoiced": 21346, + "ĠGaz": 21347, + "lette": 21348, + "Ġya": 21349, + "Ġexpired": 21350, + "Legend": 21351, + "Hot": 21352, + "nature": 21353, + "Ġunstable": 21354, + "Ġ280": 21355, + "ú": 21356, + "Comment": 21357, + "ALE": 21358, + "Ġquests": 21359, + "Ġhandler": 21360, + "nis": 21361, + "Ġversatile": 21362, + "Ġconceal": 21363, + "engeance": 21364, + "ĠInteractive": 21365, + "Ġobsessed": 21366, + "ĠDogs": 21367, + "Ġcracked": 21368, + "Sound": 21369, + "sv": 21370, + "ĠDylan": 21371, + "roads": 21372, + "fx": 21373, + "ĠCatholics": 21374, + "ĠHag": 21375, + "Ġslammed": 21376, + "Ġglowing": 21377, + "sale": 21378, + "Ġtissues": 21379, + "ĠChi": 21380, + "nee": 21381, + "Ġcher": 21382, + "sic": 21383, + "urrection": 21384, + "Ġbacon": 21385, + "ulatory": 21386, + ").\"": 21387, + "Ġirregular": 21388, + "FORM": 21389, + "assed": 21390, + "Ġintentional": 21391, + "Ġcompensate": 21392, + "ĠSpeaking": 21393, + "ĠSets": 21394, + "153": 21395, + "Ġconventions": 21396, + "bands": 21397, + "emade": 21398, + "Ġecc": 21399, + "ĠWinston": 21400, + "ĠAssassin": 21401, + "ĠBelgian": 21402, + "Ġdependence": 21403, + "Ġniche": 21404, + "Ġbark": 21405, + "ĠJazz": 21406, + "Ġdisadvantage": 21407, + "Ġgasoline": 21408, + "Ġ165": 21409, + "çļĦ": 21410, + "essa": 21411, + "module": 21412, + "angular": 21413, + "OY": 21414, + "ĠTreatment": 21415, + "itas": 21416, + "olation": 21417, + "ĠArnold": 21418, + "Ġfeud": 21419, + "ĠNest": 21420, + "Ġtheatre": 21421, + "ewater": 21422, + "Ġminors": 21423, + "olicy": 21424, + "ĠHaven": 21425, + "division": 21426, + "Ġtrunk": 21427, + "Far": 21428, + "ĠPull": 21429, + "Ġcapturing": 21430, + "Ġ1800": 21431, + "ĠTeen": 21432, + "Ġexempl": 21433, + "Ġclinics": 21434, + "ĠBurg": 21435, + "Ġsubstit": 21436, + "Ġpayload": 21437, + "ĠLav": 21438, + "ĠTroy": 21439, + "ĠWitness": 21440, + "Ġfragments": 21441, + "Ġpasswords": 21442, + "Ġgospel": 21443, + "ĠGin": 21444, + "Ġtenants": 21445, + "olith": 21446, + "Six": 21447, + "Previous": 21448, + "ĠAges": 21449, + "ĠDarwin": 21450, + "Ġblat": 21451, + "Ġempathy": 21452, + "smith": 21453, + "bag": 21454, + "ĠEcho": 21455, + "ĠCamb": 21456, + "ĠMadd": 21457, + "ĠBoo": 21458, + "Ġrede": 21459, + "ĠBurning": 21460, + "Ġsmoothly": 21461, + "ĠAdrian": 21462, + "ĠVampire": 21463, + "ĠMonsters": 21464, + "steam": 21465, + "Style": 21466, + "Ma": 21467, + "rea": 21468, + "ĠDwar": 21469, + "alyst": 21470, + "ursor": 21471, + "Ġelimination": 21472, + "Ġcrypto": 21473, + "cht": 21474, + "ĠEternal": 21475, + "âĢ¦]": 21476, + "ĠSorce": 21477, + "Ill": 21478, + "NER": 21479, + "Ġuh": 21480, + "Conclusion": 21481, + "wage": 21482, + "Ġrespir": 21483, + "Ġreminis": 21484, + "hetical": 21485, + "Ġgy": 21486, + "Ġutilized": 21487, + "icidal": 21488, + "Ġ1900": 21489, + "Ġhunters": 21490, + "ĠSwan": 21491, + "ĠReact": 21492, + "Ġvisitor": 21493, + "ĠThanksgiving": 21494, + "308": 21495, + "Posts": 21496, + "Ġhips": 21497, + "1997": 21498, + "omers": 21499, + "Ġknocking": 21500, + "ĠVehicle": 21501, + "Ġtil": 21502, + "Ġ138": 21503, + "Ġmi": 21504, + "ĠInvestigation": 21505, + "ĠKenya": 21506, + "Ġcasino": 21507, + "Ġmotives": 21508, + "Ġregain": 21509, + "rex": 21510, + "Ġweekends": 21511, + "Ġstabbed": 21512, + "boro": 21513, + "Ġexploited": 21514, + "ĠHAVE": 21515, + "ĠTelevision": 21516, + "cock": 21517, + "Ġpreparations": 21518, + "Ġendeav": 21519, + "ĠRemote": 21520, + "ĠMaker": 21521, + "ĠProdu": 21522, + "ĠEvan": 21523, + "Ġinformational": 21524, + "ĠLouisville": 21525, + "154": 21526, + "ĠDreams": 21527, + "Ġplots": 21528, + "ĠRunner": 21529, + "Ġhurting": 21530, + "Ġacademy": 21531, + "ĠMontgomery": 21532, + "nm": 21533, + "ĠLanc": 21534, + "ĠAlz": 21535, + "210": 21536, + "elong": 21537, + "Ġretailer": 21538, + "Ġarising": 21539, + "Ġrebellion": 21540, + "Ġblonde": 21541, + "played": 21542, + "Ġinstrumental": 21543, + "Cross": 21544, + "Ġretention": 21545, + "Ġtherapeutic": 21546, + "Ġseas": 21547, + "Ġinfantry": 21548, + "ĠClint": 21549, + "Ġprompting": 21550, + "Ġbitch": 21551, + "Ġstems": 21552, + "ĠKra": 21553, + "Ġthesis": 21554, + "ĠBog": 21555, + "rued": 21556, + "Ġkings": 21557, + "Ġclay": 21558, + "ificent": 21559, + "ĠYES": 21560, + "ĠThing": 21561, + "ĠCubs": 21562, + "veyard": 21563, + "elsh": 21564, + "inarily": 21565, + "ĠEy": 21566, + "ĠRolling": 21567, + "Ġevolving": 21568, + "India": 21569, + "Ġrecognizes": 21570, + "Ġgraduation": 21571, + "isers": 21572, + "Ġfertility": 21573, + "ĠMilan": 21574, + "Command": 21575, + "Ġboxing": 21576, + "Ġ1943": 21577, + "Ġgluten": 21578, + "ĠEmir": 21579, + "Ġidol": 21580, + "Ġconceived": 21581, + "ĠCreation": 21582, + "Merit": 21583, + "uddy": 21584, + "ussions": 21585, + "ĠLieutenant": 21586, + "ietal": 21587, + "Ġunchanged": 21588, + "ĠScale": 21589, + "ĠCrimea": 21590, + "balls": 21591, + "atorial": 21592, + "Ġdepths": 21593, + "Ġempirical": 21594, + "Ġtransm": 21595, + "Ġunsafe": 21596, + "missible": 21597, + "comfort": 21598, + "156": 21599, + "Ġmechanic": 21600, + "002": 21601, + "lins": 21602, + "Ġsmoked": 21603, + "Pos": 21604, + "Ġslowing": 21605, + "Ġlav": 21606, + "Texas": 21607, + "Ġcheating": 21608, + "ĠMetropolitan": 21609, + "ethyl": 21610, + "Ġdiscovering": 21611, + "asse": 21612, + "Ġpencil": 21613, + "ĠPyongyang": 21614, + "Ġcloset": 21615, + "ĠSheet": 21616, + "ĠEntry": 21617, + "oustic": 21618, + "Ġmyst": 21619, + "erate": 21620, + "ariat": 21621, + "Ġminerals": 21622, + "Ġmusician": 21623, + "ĠPul": 21624, + "ĠMaz": 21625, + "249": 21626, + "Ġpermissions": 21627, + "Ġiv": 21628, + "enary": 21629, + "ickers": 21630, + "ĠBing": 21631, + "hea": 21632, + "enable": 21633, + "Ġgriev": 21634, + "Ġasserted": 21635, + "ĠColonel": 21636, + "Ġaffidav": 21637, + "wo": 21638, + "Ġseated": 21639, + "ĠRide": 21640, + "Ġpaintings": 21641, + "ĠPix": 21642, + "Ġ137": 21643, + "ishi": 21644, + "umbai": 21645, + "gotten": 21646, + "ĠEarl": 21647, + "Ġinning": 21648, + "Ġcensus": 21649, + "Ġtravelled": 21650, + "ĠConsult": 21651, + "185": 21652, + "bind": 21653, + "Ġsimplicity": 21654, + "Ġoverlooked": 21655, + "ĠHelpful": 21656, + "Ġmonkey": 21657, + "Ġoverwhelmingly": 21658, + "Blood": 21659, + "ĠFlint": 21660, + "ĠJama": 21661, + "ĠPresent": 21662, + "ĠRage": 21663, + "ĠTA": 21664, + "ptive": 21665, + "Ġturnout": 21666, + "wald": 21667, + "ĠDolphins": 21668, + "ĠVPN": 21669, + "Ġonion": 21670, + "Ġcrafting": 21671, + "mma": 21672, + "ĠMercury": 21673, + "Ġarrange": 21674, + "Ġalerts": 21675, + "ĠOT": 21676, + "zbollah": 21677, + "Ġgases": 21678, + "ĠRichardson": 21679, + "sal": 21680, + "lar": 21681, + "Ġfrost": 21682, + "Ġlowering": 21683, + "Ġacclaim": 21684, + "Ġstartups": 21685, + "ĠGain": 21686, + "essment": 21687, + "Ġguardian": 21688, + "人": 21689, + "ĠPie": 21690, + "ĠLinks": 21691, + "Ġmerits": 21692, + "Ġawake": 21693, + "Ġparental": 21694, + "Ġexceeds": 21695, + "Ġidle": 21696, + "ĠPilot": 21697, + "ĠeBay": 21698, + "ĠAccept": 21699, + "ipeg": 21700, + "Cam": 21701, + "ĠKot": 21702, + "Ġtraders": 21703, + "olitics": 21704, + "unker": 21705, + "ĠPale": 21706, + "osi": 21707, + "anmar": 21708, + "Ġ1947": 21709, + "ĠFell": 21710, + "estial": 21711, + "itating": 21712, + "GF": 21713, + "ĠSr": 21714, + "ifted": 21715, + "Ġconnector": 21716, + "ĠBone": 21717, + "illes": 21718, + "260": 21719, + "hma": 21720, + "Ġoverlap": 21721, + "ĠGitHub": 21722, + "Ġcleaner": 21723, + "ĠBaptist": 21724, + "ĠWAS": 21725, + "Ġlungs": 21726, + "Ñģ": 21727, + "ĠBUT": 21728, + "Ġcite": 21729, + "Ġpitched": 21730, + "reatment": 21731, + "Ġtrophies": 21732, + "ĠNu": 21733, + "386": 21734, + "ĠPride": 21735, + "Ġattendees": 21736, + "[]": 21737, + "179": 21738, + "Ġspatial": 21739, + "Ġprizes": 21740, + "ĠReligion": 21741, + "Ġshowcase": 21742, + "ĠCategory": 21743, + "vidia": 21744, + "Target": 21745, + "Property": 21746, + "?,": 21747, + "Ġfusion": 21748, + "pie": 21749, + "ĠUCLA": 21750, + "Ġsoundtrack": 21751, + "Ġprincess": 21752, + "ĠCaval": 21753, + "should": 21754, + "Ġlimbs": 21755, + "Background": 21756, + "Ġlonely": 21757, + "Ġcores": 21758, + "ĠTail": 21759, + "sheet": 21760, + "Ġ132": 21761, + "Ra": 21762, + "ãĤ«": 21763, + "ĠBolt": 21764, + "Ġbooked": 21765, + "Ġadminister": 21766, + "Ġequals": 21767, + "wy": 21768, + "Ġobserving": 21769, + "ĠBaron": 21770, + "ĠAdobe": 21771, + "Ġvirgin": 21772, + "ĠSocialist": 21773, + "Move": 21774, + "ghazi": 21775, + "ĠLinda": 21776, + "212": 21777, + "Ġbrewing": 21778, + "Ġmerchants": 21779, + "burse": 21780, + "Ġdivor": 21781, + "Ġmetals": 21782, + "ĠNer": 21783, + "Ġsums": 21784, + "ĠEnemy": 21785, + "Ġenvision": 21786, + "Ġgranting": 21787, + "ĠHoney": 21788, + "ĠSkyrim": 21789, + "Ġsocio": 21790, + "graded": 21791, + "Ġselective": 21792, + "WASHINGTON": 21793, + "Ġ1948": 21794, + "ĠSirius": 21795, + "ĠGross": 21796, + "activity": 21797, + "ĠIvan": 21798, + "Ġfurious": 21799, + "BSD": 21800, + "ĠPrevious": 21801, + "Ġresponsive": 21802, + "Ġcharitable": 21803, + "Ġleaning": 21804, + "ĠPew": 21805, + "Ġviolates": 21806, + "\\\\\\\\\\\\\\\\": 21807, + "ĠComing": 21808, + "wire": 21809, + "Ġpoet": 21810, + "Ġresolutions": 21811, + "command": 21812, + "ĠPortuguese": 21813, + "Ġnickname": 21814, + "Ġdeaf": 21815, + "February": 21816, + "Ġrecognise": 21817, + "Ġentirety": 21818, + "Ġseasonal": 21819, + "placed": 21820, + "ĠTelegraph": 21821, + "Ġmicrophone": 21822, + "ouring": 21823, + "Ġgrains": 21824, + "Ġgoverned": 21825, + "Ġpostp": 21826, + "ĠWaters": 21827, + "inement": 21828, + "Ġundocumented": 21829, + "ĠComcast": 21830, + "Ġfox": 21831, + "Ġassaults": 21832, + "reon": 21833, + "many": 21834, + "ĠJenkins": 21835, + "ĠAnyway": 21836, + "Ġassessments": 21837, + "Ġdowns": 21838, + "ĠMouse": 21839, + "Ġsuperb": 21840, + "kt": 21841, + "ĠDow": 21842, + "Ġtaxation": 21843, + "401": 21844, + "Ġsmiles": 21845, + "Ġundertaken": 21846, + "Ġexh": 21847, + "Ġenthusiastic": 21848, + "Ġtwent": 21849, + "Ġgovernmental": 21850, + "Ġautonomy": 21851, + "ĠTechnologies": 21852, + "ĠChain": 21853, + "Ġprevalent": 21854, + "fb": 21855, + "Ġnicotine": 21856, + "ogram": 21857, + "job": 21858, + "Ġawaiting": 21859, + "ĠMenu": 21860, + "Ġdeputies": 21861, + "kov": 21862, + "ishops": 21863, + "Button": 21864, + "ĠShanghai": 21865, + "Ġdiesel": 21866, + "ĠDuck": 21867, + "Ryan": 21868, + "ĠPCs": 21869, + "NF": 21870, + "jury": 21871, + "ente": 21872, + "Ġinaccurate": 21873, + "eddy": 21874, + "Whatever": 21875, + "Ġshowc": 21876, + "ĠNad": 21877, + "odus": 21878, + "etr": 21879, + "Ġplaintiffs": 21880, + "ĠWOR": 21881, + "ĠAssange": 21882, + "Ġprivat": 21883, + "Ġpremiums": 21884, + "Ġtam": 21885, + "URL": 21886, + "Ġelites": 21887, + "ĠRanger": 21888, + "ottenham": 21889, + "ĠHoff": 21890, + "ĠAthens": 21891, + "Ġdefinite": 21892, + "Ġsighed": 21893, + "Ġevenly": 21894, + "211": 21895, + "ĠAmber": 21896, + "akia": 21897, + "Ġmailing": 21898, + "Ġcrashing": 21899, + "ĠConfederate": 21900, + "rugged": 21901, + "Wal": 21902, + "ĠDepths": 21903, + "Ġjuvenile": 21904, + "Ġreactor": 21905, + "Introduction": 21906, + "ĠDeluxe": 21907, + "1995": 21908, + "ĠSanchez": 21909, + "ĠMead": 21910, + "ivable": 21911, + ":-": 21912, + "ĠPlanning": 21913, + "ĠTrap": 21914, + "quin": 21915, + "ĠProtect": 21916, + "vered": 21917, + "Information": 21918, + "Ġkidney": 21919, + "innamon": 21920, + "las": 21921, + "Ġpolicing": 21922, + "Ġtolerate": 21923, + "ĠQi": 21924, + "Ġbiased": 21925, + "Fort": 21926, + "ĠKi": 21927, + "save": 21928, + "Ġprivileged": 21929, + "Ġbeasts": 21930, + "ĠGlas": 21931, + "ĠCinem": 21932, + "Ġcomeback": 21933, + "Sunday": 21934, + "Ġextinction": 21935, + "hops": 21936, + "Ġtransmit": 21937, + "Ġdoubles": 21938, + "ĠFlat": 21939, + "167": 21940, + "Ġdisputed": 21941, + "Ġinjustice": 21942, + "foo": 21943, + "Vict": 21944, + "roleum": 21945, + "ĠJulie": 21946, + "Context": 21947, + "ĠRarity": 21948, + "issue": 21949, + "Component": 21950, + "Ġcounseling": 21951, + "anne": 21952, + "dark": 21953, + "Ġobjections": 21954, + "uilt": 21955, + "Ġgast": 21956, + "Ġplac": 21957, + "Ġunused": 21958, + "ãĥĩ": 21959, + "ĠTrial": 21960, + "ĠJas": 21961, + "hedral": 21962, + "obb": 21963, + "Ġtemporal": 21964, + "ĠPRO": 21965, + "ĠNW": 21966, + "ĠAnniversary": 21967, + "Large": 21968, + "Ġtherm": 21969, + "Ġdavid": 21970, + "Ġsystemic": 21971, + "ĠShir": 21972, + "mut": 21973, + "ĠNept": 21974, + "address": 21975, + "Ġscanning": 21976, + "Ġunderstandable": 21977, + "Ġcanvas": 21978, + "Cat": 21979, + "ĠZoo": 21980, + "Ġangels": 21981, + "LO": 21982, + "ĠStatement": 21983, + "ĠSig": 21984, + "ovable": 21985, + "ĠAway": 21986, + "sharing": 21987, + "ocrats": 21988, + "stated": 21989, + "Ġweighing": 21990, + "Nor": 21991, + "wild": 21992, + "Bey": 21993, + "Ġastonishing": 21994, + "ĠReynolds": 21995, + "Ġopener": 21996, + "Ġtrainer": 21997, + "Ġsurgical": 21998, + "pn": 21999, + "Ġadjusting": 22000, + "wheel": 22001, + "Ġfrown": 22002, + "ervative": 22003, + "Ġsuspend": 22004, + "Within": 22005, + "tein": 22006, + "Ġobstacle": 22007, + "Ġliberties": 22008, + "ymes": 22009, + "Ġuranium": 22010, + "ansom": 22011, + "anol": 22012, + "uba": 22013, + "ĠLoss": 22014, + "Ġarous": 22015, + "ĠHenderson": 22016, + "Wow": 22017, + "spl": 22018, + "cur": 22019, + "ĠÂŃ": 22020, + "Ġtheirs": 22021, + "Damage": 22022, + "Ġdownloading": 22023, + "Ġdiscern": 22024, + "ĠSto": 22025, + "ĠFla": 22026, + "Ġhath": 22027, + "ĠAj": 22028, + "Ġunpleasant": 22029, + "European": 22030, + "expensive": 22031, + "Ġscreenshot": 22032, + "ĠUV": 22033, + "Ġallied": 22034, + "ĠPersian": 22035, + "Ġmonopoly": 22036, + "Ġatom": 22037, + "ĠRedskins": 22038, + "\"><": 22039, + "Ġcancell": 22040, + "Ġcinema": 22041, + "131": 22042, + "fair": 22043, + "ĠAlfred": 22044, + "Ġduck": 22045, + "args": 22046, + "223": 22047, + "ĠISI": 22048, + "Ġsignaling": 22049, + "inar": 22050, + "Ġlaughs": 22051, + "Ġforwards": 22052, + "Ġreckless": 22053, + "Ġlisteners": 22054, + "ativity": 22055, + "Ġvastly": 22056, + "nant": 22057, + "Less": 22058, + "ĠHunting": 22059, + "ĠScientific": 22060, + "ITED": 22061, + "Ġknight": 22062, + "ĠHTC": 22063, + "usa": 22064, + "tmp": 22065, + "Ġrude": 22066, + "ĠLegendary": 22067, + "Ġarises": 22068, + "Bad": 22069, + "ĠClaim": 22070, + "peg": 22071, + "Ġrealities": 22072, + "Think": 22073, + "Ġ°": 22074, + "Ġrode": 22075, + "Ġstrive": 22076, + "Ġanecd": 22077, + "Ġshorts": 22078, + "Ġhypothes": 22079, + "Ġcoordinated": 22080, + "ĠGandhi": 22081, + "ĠFPS": 22082, + "RED": 22083, + "Ġsusceptible": 22084, + "Ġshrink": 22085, + "ĠChart": 22086, + "Help": 22087, + "Ġion": 22088, + "deep": 22089, + "ribes": 22090, + "ĠKai": 22091, + "ĠCustomer": 22092, + "Summary": 22093, + "Ġcough": 22094, + "wife": 22095, + "Ġlend": 22096, + "Ġpositioning": 22097, + "Ġlottery": 22098, + "ĠCanyon": 22099, + "Ġfade": 22100, + "Ġbronze": 22101, + "ĠKenny": 22102, + "Ġboasts": 22103, + "ĠEnhanced": 22104, + "record": 22105, + "Ġemergence": 22106, + "Ġakin": 22107, + "ĠBert": 22108, + "itous": 22109, + "âĸij": 22110, + "Ġstip": 22111, + "Ġexchanged": 22112, + "omore": 22113, + "alsh": 22114, + "Ġreservoir": 22115, + "Ġstandpoint": 22116, + "WM": 22117, + "Ġinitiate": 22118, + "Ġdecay": 22119, + "Ġbrewery": 22120, + "Ġterribly": 22121, + "Ġmortal": 22122, + "levard": 22123, + "Ġrevis": 22124, + "NI": 22125, + "elo": 22126, + "Ġconfess": 22127, + "ĠMSNBC": 22128, + "Ġsubmissions": 22129, + "Controller": 22130, + "Ġ202": 22131, + "ĠRuth": 22132, + "});": 22133, + "ĠAzure": 22134, + "Ġ.\"": 22135, + "206": 22136, + "ĠMarketing": 22137, + "Ġlaund": 22138, + "iencies": 22139, + "Ġrenowned": 22140, + "ĠTrou": 22141, + "ĠNGO": 22142, + "blems": 22143, + "Ġterrified": 22144, + "Ġwarns": 22145, + "Ġpert": 22146, + "Ġunsure": 22147, + "480": 22148, + "alez": 22149, + "ultz": 22150, + "ĠOutside": 22151, + "Ġstyl": 22152, + "ĠUnderground": 22153, + "Ġpanc": 22154, + "Ġdictionary": 22155, + "Ġfoe": 22156, + "riminal": 22157, + "ĠNorwegian": 22158, + "Ġjailed": 22159, + "Ġmaternal": 22160, + "ée": 22161, + "ĠLucy": 22162, + "cop": 22163, + "Cho": 22164, + "Ġunsigned": 22165, + "ĠZelda": 22166, + "ĠInsider": 22167, + "ĠContinued": 22168, + "Ġ133": 22169, + "ĠNaruto": 22170, + "ĠMajority": 22171, + "169": 22172, + "ĠWo": 22173, + "ãĤĵ": 22174, + "Ġpastor": 22175, + "Ġinformal": 22176, + "н": 22177, + "anthrop": 22178, + "join": 22179, + "ãģĹ": 22180, + "itational": 22181, + "NP": 22182, + "ĠWriting": 22183, + "fn": 22184, + "ĠBever": 22185, + "195": 22186, + "Ġyelling": 22187, + "Ġdrastically": 22188, + "Ġeject": 22189, + "Ġneut": 22190, + "Ġthrive": 22191, + "ĠFrequ": 22192, + "oux": 22193, + "Ġpossesses": 22194, + "ĠSenators": 22195, + "ĠDES": 22196, + "ĠShakespeare": 22197, + "ĠFranco": 22198, + "ĠLB": 22199, + "uchi": 22200, + "Ġincarn": 22201, + "Ġfounders": 22202, + "Function": 22203, + "Ġbrightness": 22204, + "ĠBT": 22205, + "Ġwhale": 22206, + "ĠTheater": 22207, + "mass": 22208, + "ĠDoll": 22209, + "Something": 22210, + "Ġechoed": 22211, + "ĠHex": 22212, + "crit": 22213, + "afia": 22214, + "Ġgoddess": 22215, + "Ġeleven": 22216, + "ĠPreview": 22217, + "ĠAurora": 22218, + "Ġ401": 22219, + "ulsive": 22220, + "ĠLogan": 22221, + "inburgh": 22222, + "ĠCenters": 22223, + "ĠONLY": 22224, + "ĠAid": 22225, + "Ġparadox": 22226, + "Ġhurd": 22227, + "ĠLC": 22228, + "Due": 22229, + "court": 22230, + "Ġoffended": 22231, + "Ġevaluating": 22232, + "ĠMatthews": 22233, + "Ġtomb": 22234, + "Ġpayroll": 22235, + "Ġextraction": 22236, + "ĠHands": 22237, + "ifi": 22238, + "Ġsupernatural": 22239, + "ĠCOMM": 22240, + "]=": 22241, + "dogs": 22242, + "Ġ512": 22243, + "ĠMeeting": 22244, + "Richard": 22245, + "ĠMaximum": 22246, + "Ġideals": 22247, + "Things": 22248, + "mand": 22249, + "ĠRegardless": 22250, + "Ġhumili": 22251, + "buffer": 22252, + "Little": 22253, + "ĠDani": 22254, + "ĠNak": 22255, + "Ġliberation": 22256, + "ĠAbe": 22257, + "ĠOL": 22258, + "Ġstuffed": 22259, + "aca": 22260, + "inda": 22261, + "raphic": 22262, + "Ġmosqu": 22263, + "Ġcampaigning": 22264, + "Ġoccupy": 22265, + "Squ": 22266, + "rina": 22267, + "ĠWel": 22268, + "ĠVS": 22269, + "Ġphysic": 22270, + "Ġpuls": 22271, + "rint": 22272, + "oaded": 22273, + "ETF": 22274, + "ĠArchives": 22275, + "Ġvenues": 22276, + "hner": 22277, + "ĠTurbo": 22278, + "Ġlust": 22279, + "Ġappealed": 22280, + "quez": 22281, + "ilib": 22282, + "ĠTimothy": 22283, + "Ġomn": 22284, + "dro": 22285, + "Ġobsession": 22286, + "ĠSavage": 22287, + "1996": 22288, + "Global": 22289, + "Jes": 22290, + "214": 22291, + "Ġsliding": 22292, + "Ġdisappro": 22293, + "ĠMagical": 22294, + "Ġvoluntarily": 22295, + "gb": 22296, + "aney": 22297, + "Ġprophet": 22298, + "ĠRein": 22299, + "ĠJulia": 22300, + "ĠWorth": 22301, + "aurus": 22302, + "Ġbounds": 22303, + "ieu": 22304, + ")))": 22305, + "Ġcrore": 22306, + "ĠCitizen": 22307, + "Sky": 22308, + "Ġcolumnist": 22309, + "Ġseekers": 22310, + "ondo": 22311, + "ISA": 22312, + "ĠLength": 22313, + "Ġnostalg": 22314, + "Ġnewcom": 22315, + "Ġdetrim": 22316, + "entric": 22317, + "375": 22318, + "ĠGE": 22319, + "Ġautop": 22320, + "Ġacademics": 22321, + "AppData": 22322, + "ĠShen": 22323, + "Ġidiot": 22324, + "ĠTransit": 22325, + "Ġteaspoon": 22326, + "Wil": 22327, + "KO": 22328, + "ĠComedy": 22329, + ">,": 22330, + "Ġpopulated": 22331, + "WD": 22332, + "Ġpigs": 22333, + "ĠOculus": 22334, + "Ġsympathetic": 22335, + "Ġmarathon": 22336, + "198": 22337, + "Ġseizure": 22338, + "sided": 22339, + "Ġdop": 22340, + "irtual": 22341, + "Land": 22342, + "ĠFloor": 22343, + "osaurs": 22344, + "...]": 22345, + "Ġlos": 22346, + "Ġsubsidiary": 22347, + "EY": 22348, + "ĠParts": 22349, + "ĠStef": 22350, + "ĠJudiciary": 22351, + "Ġ134": 22352, + "Ġmirrors": 22353, + "Ġket": 22354, + "times": 22355, + "Ġneurolog": 22356, + "Ġcav": 22357, + "ĠGuest": 22358, + "Ġtumor": 22359, + "scill": 22360, + "ĠLloyd": 22361, + "Est": 22362, + "Ġclearer": 22363, + "Ġstereotypes": 22364, + "Ġdur": 22365, + "nothing": 22366, + "Reddit": 22367, + "Ġnegotiated": 22368, + "------------------------": 22369, + "235": 22370, + "Ġflown": 22371, + "ĠSeoul": 22372, + "ĠResident": 22373, + "ĠSCH": 22374, + "Ġdisappearance": 22375, + "ĠVince": 22376, + "grown": 22377, + "Ġgrabs": 22378, + "ril": 22379, + "ĠInfinite": 22380, + "ĠTwenty": 22381, + "Ġpedestrian": 22382, + "Ġjersey": 22383, + "ĠFur": 22384, + "ĠInfinity": 22385, + "ĠElliott": 22386, + "Ġmentor": 22387, + "Ġmorally": 22388, + "Ġobey": 22389, + "secure": 22390, + "iffe": 22391, + "Ġantibiotics": 22392, + "angled": 22393, + "ĠFreeman": 22394, + "ĠIntroduction": 22395, + "Jun": 22396, + "Ġmarsh": 22397, + "icans": 22398, + "ĠEVENTS": 22399, + "ochond": 22400, + "Wall": 22401, + "iculty": 22402, + "Ġmisdemeanor": 22403, + "Ġly": 22404, + "Thomas": 22405, + "ĠResolution": 22406, + "Ġanimations": 22407, + "ĠDry": 22408, + "Ġintercourse": 22409, + "ĠNewcastle": 22410, + "ĠHog": 22411, + "ĠEquipment": 22412, + "177": 22413, + "Ġterritorial": 22414, + "Ġarchives": 22415, + "203": 22416, + "Filter": 22417, + "ĠMunich": 22418, + "Ġcommanded": 22419, + "ĠWand": 22420, + "Ġpitches": 22421, + "ĠCroat": 22422, + "Ġratios": 22423, + "ĠMits": 22424, + "Ġaccumulated": 22425, + "ĠSpecifically": 22426, + "Ġgentleman": 22427, + "acerb": 22428, + "Ġpenn": 22429, + "Ġaka": 22430, + "ĠFuk": 22431, + "Ġintervene": 22432, + "ĠRefuge": 22433, + "ĠAlzheimer": 22434, + "Ġsuccession": 22435, + "ohan": 22436, + "does": 22437, + "Lord": 22438, + "Ġseparat": 22439, + "Ġcorrespondence": 22440, + "Ġshiny": 22441, + "Prior": 22442, + "Ġsulf": 22443, + "Ġmiserable": 22444, + "Ġdedication": 22445, + "().": 22446, + "Ġspecialists": 22447, + "Ġdefects": 22448, + "ĠCult": 22449, + "ĠXia": 22450, + "Ġjeopard": 22451, + "ĠOre": 22452, + "Ability": 22453, + "Ġlear": 22454, + "Ġambitions": 22455, + "ĠBMI": 22456, + "ĠArabs": 22457, + "Ġ1942": 22458, + "Ġpreservation": 22459, + "ificate": 22460, + "Ġashamed": 22461, + "loss": 22462, + "ĠRestaur": 22463, + "Ġresemble": 22464, + "Ġenrich": 22465, + "ĠKN": 22466, + "ĠClan": 22467, + "float": 22468, + "Ġplayable": 22469, + "ITT": 22470, + "Ġharmony": 22471, + "arrison": 22472, + "ĠWeinstein": 22473, + "were": 22474, + "Ġpoisoning": 22475, + "ĠComput": 22476, + "ĠWordPress": 22477, + "major": 22478, + "ĠValve": 22479, + "Fan": 22480, + "ĠThrow": 22481, + "ĠRomans": 22482, + "ĠDepression": 22483, + "ados": 22484, + "Ġtortured": 22485, + "Ġbalancing": 22486, + "bottom": 22487, + "Ġacquiring": 22488, + "ĠMonte": 22489, + "ardi": 22490, + "Ġaura": 22491, + "Ġ##": 22492, + "ĠStanding": 22493, + "ĠAtlas": 22494, + "CF": 22495, + "Ġintrins": 22496, + "ĠBenghazi": 22497, + "Ġcamping": 22498, + "Ġtapped": 22499, + "blade": 22500, + "strous": 22501, + "ĠRabb": 22502, + "ĠWritten": 22503, + "tip": 22504, + "ĠNeigh": 22505, + "sterdam": 22506, + "ĠAllow": 22507, + "ĠHealing": 22508, + "ĠRhod": 22509, + "num": 22510, + "Ġcaffeine": 22511, + "ĠPercent": 22512, + "Ġboo": 22513, + "Ġapples": 22514, + "305": 22515, + "Ġwelcoming": 22516, + "Ġapplaud": 22517, + "Ġausterity": 22518, + "±": 22519, + "ĠReality": 22520, + "efe": 22521, + "å®": 22522, + "Ġsucks": 22523, + "Ġtabs": 22524, + "ĠPayPal": 22525, + "Ġbackpack": 22526, + "Ġgifted": 22527, + "abulary": 22528, + "ĠScout": 22529, + "irteen": 22530, + "Ġchin": 22531, + "Ġomitted": 22532, + "Ġnegatively": 22533, + "Ġaccessing": 22534, + "ĠEarn": 22535, + "Ġambulance": 22536, + "Ġheadphones": 22537, + "Ġ205": 22538, + "ĠRefresh": 22539, + "president": 22540, + "ĠKitchen": 22541, + "ĠEntered": 22542, + "ĠSnyder": 22543, + "005": 22544, + "omical": 22545, + "Ġborrowed": 22546, + "ĠNem": 22547, + "Ġaviation": 22548, + "Ġstall": 22549, + "rimination": 22550, + "Ġuniforms": 22551, + "itime": 22552, + "ĠSimmons": 22553, + "energy": 22554, + "ablished": 22555, + "yy": 22556, + "qualified": 22557, + "Ġrallies": 22558, + "ĠStuart": 22559, + "flight": 22560, + "Ġgangs": 22561, + "rag": 22562, + "Ġvault": 22563, + "lux": 22564, + "ĠCompar": 22565, + "Ġdesignation": 22566, + "209": 22567, + "ĠJos": 22568, + "dollar": 22569, + "zero": 22570, + "Ġwells": 22571, + "303": 22572, + "Ġconstituents": 22573, + "Ġheck": 22574, + "Ġcows": 22575, + "Ġcommanders": 22576, + "Ġdifferential": 22577, + "ĠCatherine": 22578, + "299": 22579, + "Ġvalve": 22580, + "Ġbrace": 22581, + "Ġperspectives": 22582, + "cert": 22583, + "fact": 22584, + "icularly": 22585, + "ĠMcN": 22586, + "planes": 22587, + "Ġintric": 22588, + "Ġpeas": 22589, + "ovan": 22590, + "Ġtossed": 22591, + "retch": 22592, + "ĠLopez": 22593, + "Ġunfamiliar": 22594, + "death": 22595, + "ĠApart": 22596, + "ĠChang": 22597, + "Ġrelieved": 22598, + "rophe": 22599, + "Ġairports": 22600, + "Ġfreak": 22601, + "util": 22602, + "Mill": 22603, + "ĠChin": 22604, + "ĠOwen": 22605, + "male": 22606, + "ĠBroken": 22607, + "ĠWinds": 22608, + "rob": 22609, + "rising": 22610, + "Ġfirefighters": 22611, + "Ġauthoritarian": 22612, + "Ġ148": 22613, + "Bitcoin": 22614, + "external": 22615, + "Ġbrowsers": 22616, + "ichever": 22617, + "orian": 22618, + "Ġunb": 22619, + "Ġpoke": 22620, + "ĠZot": 22621, + "Mid": 22622, + "ĠPopular": 22623, + "Ġcovert": 22624, + "Ġcontributes": 22625, + "Ġ650": 22626, + "Ġcontention": 22627, + "Gate": 22628, + "Ġconsoles": 22629, + "Ġchromos": 22630, + "ĠIX": 22631, + "Ġvisually": 22632, + "ĠEisen": 22633, + "Ġjewelry": 22634, + "Ġdelegation": 22635, + "Ġaccelerate": 22636, + "ĠRiley": 22637, + "Ġslope": 22638, + "Ġindoor": 22639, + "itially": 22640, + "Ġhugely": 22641, + "Ġtunnels": 22642, + "Ġfined": 22643, + "Ġdirective": 22644, + "Ġforehead": 22645, + "ustomed": 22646, + "Ġskate": 22647, + "Music": 22648, + "gas": 22649, + "Ġrecognizing": 22650, + "ambo": 22651, + "Ġoverweight": 22652, + "ĠGrade": 22653, + "ÙĬ": 22654, + "Ġsounding": 22655, + "Ġlocking": 22656, + "ĠREM": 22657, + "Store": 22658, + "Ġexcav": 22659, + "ĠLikewise": 22660, + "ĠLights": 22661, + "Ġelbow": 22662, + "ĠSupply": 22663, + "wic": 22664, + "Ġhandsome": 22665, + "1994": 22666, + "Coll": 22667, + "Ġadequately": 22668, + "ĠAssociate": 22669, + "Ġstrips": 22670, + "Ġcrackdown": 22671, + "Ġmarvel": 22672, + "ĠKun": 22673, + "Ġpassages": 22674, + "@@@@": 22675, + "ĠTall": 22676, + "Ġthoughtful": 22677, + "namese": 22678, + "Ġprostitution": 22679, + "business": 22680, + "Ġballistic": 22681, + "personal": 22682, + "cig": 22683, + "izational": 22684, + "Round": 22685, + "ĠÂłĠÂłĠÂłĠÂł": 22686, + "ĠColeman": 22687, + "Ġadmitting": 22688, + "ĠPlug": 22689, + "Ġbitcoins": 22690, + "ĠSuz": 22691, + "Ġfairness": 22692, + "Ġsupplier": 22693, + "Ġcatastrophic": 22694, + "ĠHelen": 22695, + "oqu": 22696, + "Marc": 22697, + "ĠArticles": 22698, + "gie": 22699, + "Ġendangered": 22700, + "Ġdestiny": 22701, + "ĠVolt": 22702, + "olia": 22703, + "axis": 22704, + "Ġcheat": 22705, + "Ġunified": 22706, + "ICO": 22707, + "quote": 22708, + "302": 22709, + "ĠSed": 22710, + "Ġsuppression": 22711, + "Ġanalyzing": 22712, + "Ġsquat": 22713, + "Ġfiguring": 22714, + "Ġcoordinates": 22715, + "Ġchunks": 22716, + "Ġ1946": 22717, + "Ġsubp": 22718, + "Ġwiki": 22719, + "ĠForbes": 22720, + "ĠJupiter": 22721, + "ĠErik": 22722, + "imer": 22723, + "ĠCommercial": 22724, + "\\)": 22725, + "Ġlegitimacy": 22726, + "Ġdental": 22727, + "ĠMean": 22728, + "Ġdeficits": 22729, + "550": 22730, + "Originally": 22731, + "ĠHorror": 22732, + "Ġcontamination": 22733, + "llah": 22734, + "Ġconfisc": 22735, + "ĠClare": 22736, + "TB": 22737, + "ĠFailed": 22738, + "aned": 22739, + "Ġruler": 22740, + "ĠController": 22741, + "Ġfeminists": 22742, + "Fix": 22743, + "gay": 22744, + "207": 22745, + "Ġrabbit": 22746, + "Third": 22747, + "owntown": 22748, + "Ġglue": 22749, + "Ġvolatile": 22750, + "Ġshining": 22751, + "Ġfoll": 22752, + "Ġimpaired": 22753, + "Ġsupers": 22754, + "æĪ": 22755, + "Ġclutch": 22756, + "ļéĨĴ": 22757, + "Ġprolet": 22758, + "Ġ(!": 22759, + "Ġyelled": 22760, + "ĠKiev": 22761, + "ĠErn": 22762, + "ĠShock": 22763, + "KB": 22764, + "Ġsituated": 22765, + "query": 22766, + "ĠNas": 22767, + "Ġannex": 22768, + "character": 22769, + "ĠHoliday": 22770, + "Ġautomation": 22771, + "ĠJill": 22772, + "ĠRemastered": 22773, + "Ġlinem": 22774, + "Ġwilderness": 22775, + "ĠHorizon": 22776, + "ĠGuinea": 22777, + "AZ": 22778, + "Ġmainland": 22779, + "Ġsecrecy": 22780, + "LEASE": 22781, + "Ġpunk": 22782, + "ĠProvince": 22783, + "(),": 22784, + "Speed": 22785, + "Ġhanding": 22786, + "ĠSebast": 22787, + "Sir": 22788, + "rase": 22789, + "Ġjournals": 22790, + "Ġcongest": 22791, + "ĠTut": 22792, + "irrel": 22793, + "Ġschizophrenia": 22794, + "Ġmisogyn": 22795, + "healthy": 22796, + "Iron": 22797, + "Ġreacted": 22798, + "-$": 22799, + "252": 22800, + "Ġplural": 22801, + "Ġplum": 22802, + "Ġbargain": 22803, + "Ġgrounded": 22804, + "finder": 22805, + "Ġdisse": 22806, + "ĠLaz": 22807, + "OOD": 22808, + "Ġatroc": 22809, + "Factory": 22810, + "Ġminions": 22811, + "Ġori": 22812, + "ĠBrave": 22813, + "ĠPRE": 22814, + "ĠMyanmar": 22815, + "ĠHod": 22816, + "Ġexpedition": 22817, + "Ġexplode": 22818, + "ĠCoord": 22819, + "Ġextr": 22820, + "ĠBrief": 22821, + "ĠADHD": 22822, + "Ġhardcore": 22823, + "feeding": 22824, + "Ġdile": 22825, + "ĠFruit": 22826, + "Ġvaccination": 22827, + "ĠMao": 22828, + "osphere": 22829, + "Ġcontests": 22830, + "-|": 22831, + "Ġfren": 22832, + "isphere": 22833, + "Rom": 22834, + "ĠSharp": 22835, + "ĠTrend": 22836, + "Ġdisconnect": 22837, + "âĢ¢âĢ¢": 22838, + "Ġpersecution": 22839, + "Earth": 22840, + "Ġhealthier": 22841, + "384": 22842, + "Ġcob": 22843, + "ĠTrinity": 22844, + "OWS": 22845, + "ANN": 22846, + "Ġspecialty": 22847, + "Ġgru": 22848, + "Ġcooperative": 22849, + "why": 22850, + "Starting": 22851, + "ĠIssues": 22852, + "stre": 22853, + "ensor": 22854, + "Ġ185": 22855, + "Adv": 22856, + "!?": 22857, + "ĠRevel": 22858, + "emia": 22859, + "ĠHulk": 22860, + "Ġcelebrations": 22861, + "ĠSou": 22862, + "raud": 22863, + "ĠKlein": 22864, + "Ġunreal": 22865, + "context": 22866, + "Ġpartnerships": 22867, + "Ġadopting": 22868, + "tical": 22869, + "Ġsplash": 22870, + "ĠHezbollah": 22871, + "category": 22872, + "cyclop": 22873, + "xton": 22874, + "ĠDot": 22875, + "urdy": 22876, + "tz": 22877, + "Ġenvelope": 22878, + "ĠNL": 22879, + "âķ": 22880, + "Ġwherein": 22881, + "Spec": 22882, + "184": 22883, + "Ġtelev": 22884, + "aliation": 22885, + "Ġmyths": 22886, + "å°": 22887, + "Ġrigorous": 22888, + "Ġcommunicating": 22889, + "Ġobserver": 22890, + "Ġrehe": 22891, + "ĠWash": 22892, + "Ġapologized": 22893, + "ĠTin": 22894, + "Ġexpenditures": 22895, + "workers": 22896, + "document": 22897, + "Ġhesitate": 22898, + "ĠLenin": 22899, + "Ġunpredictable": 22900, + "Ġrenewal": 22901, + "cler": 22902, + "okia": 22903, + "ĠCONT": 22904, + "Ġpostseason": 22905, + "Tokens": 22906, + "Ġexacerb": 22907, + "Ġbetting": 22908, + "Ġ147": 22909, + "Ġelevation": 22910, + "Wood": 22911, + "ĠSolomon": 22912, + "194": 22913, + "004": 22914, + "output": 22915, + "Ġredund": 22916, + "ĠMumbai": 22917, + "ĠpH": 22918, + "Ġreproduce": 22919, + "ĠDuration": 22920, + "MAX": 22921, + "Ġbog": 22922, + "CBS": 22923, + "ĠBalance": 22924, + "ĠSgt": 22925, + "ĠRecent": 22926, + "Ġcd": 22927, + "Ġpopped": 22928, + "Ġincompet": 22929, + "prop": 22930, + "ayan": 22931, + "guy": 22932, + "Pacific": 22933, + "Ġtyr": 22934, + "Ġ{{": 22935, + "ĠMystic": 22936, + "ĠDana": 22937, + "Ġmasturb": 22938, + "Ġgeometry": 22939, + "â": 22940, + "ĠCorrect": 22941, + "Ġtrajectory": 22942, + "Ġdistracted": 22943, + "Ġfoo": 22944, + "ĠWelsh": 22945, + "Luc": 22946, + "mith": 22947, + "Ġrugby": 22948, + "Ġrespiratory": 22949, + "Ġtriangle": 22950, + "Ġ215": 22951, + "Ġundergraduate": 22952, + "ĠSuperior": 22953, + "changing": 22954, + "_-": 22955, + "Ġrightly": 22956, + "Ġreferee": 22957, + "Ġlucrative": 22958, + "Ġunauthorized": 22959, + "Ġresembles": 22960, + "ĠGNU": 22961, + "ĠDerby": 22962, + "Ġpathways": 22963, + "ĠLed": 22964, + "Ġendurance": 22965, + "Ġstint": 22966, + "Ġcollector": 22967, + "Fast": 22968, + "Ġdots": 22969, + "Ġnationals": 22970, + "ĠSecurities": 22971, + "Ġwhip": 22972, + "Param": 22973, + "Ġlearns": 22974, + "Magic": 22975, + "Ġdetailing": 22976, + "moon": 22977, + "Ġbroadcasting": 22978, + "Ġbaked": 22979, + "265": 22980, + "holm": 22981, + "ĠSah": 22982, + "ĠHussein": 22983, + "ĠCourtesy": 22984, + "174": 22985, + "Ġ146": 22986, + "Ġgeographic": 22987, + "peace": 22988, + "Ġjudging": 22989, + "ĠStern": 22990, + "Bur": 22991, + "Ġstoryline": 22992, + "Gun": 22993, + "ĠStick": 22994, + "245": 22995, + "307": 22996, + "ãĤ´ãĥ³": 22997, + "ĠAdministrator": 22998, + "Ġburnt": 22999, + "Ġpave": 23000, + "choes": 23001, + "Exec": 23002, + "Ġcampuses": 23003, + "Result": 23004, + "Ġmutations": 23005, + "ĠCharter": 23006, + "Ġcaptures": 23007, + "Ġcompares": 23008, + "Ġbadge": 23009, + "Scient": 23010, + "Ġerad": 23011, + "iery": 23012, + "oi": 23013, + "ettes": 23014, + "ĠEstate": 23015, + "Ġstrap": 23016, + "Ġproudly": 23017, + "Ġfried": 23018, + "Ġwithdrawn": 23019, + "ĠVoy": 23020, + "phony": 23021, + "Items": 23022, + "ĠPierce": 23023, + "bard": 23024, + "Ġannotation": 23025, + "anton": 23026, + "illon": 23027, + "Impro": 23028, + "...)": 23029, + "Ġhappier": 23030, + "------": 23031, + "adjust": 23032, + "Ġstaffers": 23033, + "Ġactivism": 23034, + "Ġperf": 23035, + "Ġalright": 23036, + "Need": 23037, + "Ġcommence": 23038, + "Ġopioid": 23039, + "ĠAmanda": 23040, + "Es": 23041, + "ĠPars": 23042, + "ĠKaw": 23043, + "Works": 23044, + "248": 23045, + "Ġindo": 23046, + "tc": 23047, + "endant": 23048, + "ĠMoto": 23049, + "Ġlegalization": 23050, + "OTE": 23051, + "Ġtasked": 23052, + "Ġtsp": 23053, + "ĠACTIONS": 23054, + "166": 23055, + "Ġrefreshing": 23056, + "ĠNR": 23057, + "ĠPerez": 23058, + "Ġinfringement": 23059, + "SY": 23060, + "Listen": 23061, + "inning": 23062, + "ku": 23063, + "Ġrotate": 23064, + "program": 23065, + "arah": 23066, + "Design": 23067, + "Ġ(£": 23068, + "Ġstoring": 23069, + "Ġwarrants": 23070, + "Ġjudgement": 23071, + "ĠBrist": 23072, + "usually": 23073, + "photo": 23074, + "ĠRan": 23075, + "ĠPine": 23076, + "Ġoutrageous": 23077, + "ĠValentine": 23078, + "luence": 23079, + "ĠEverybody": 23080, + "Altern": 23081, + "Ġrelevance": 23082, + "Ġterminated": 23083, + "Ġdessert": 23084, + "Ġfulfilled": 23085, + "Ġprosecuted": 23086, + "ĠWords": 23087, + "Ġmigrant": 23088, + "Ġcultivation": 23089, + "ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ": 23090, + "idelity": 23091, + "ĠVern": 23092, + "ĠLogin": 23093, + "Ġmetaphor": 23094, + "ĠTip": 23095, + "Ġrecruits": 23096, + "ĠPig": 23097, + "ribing": 23098, + "Ġenthusiasts": 23099, + "exper": 23100, + "Ġfrightening": 23101, + "ĠHair": 23102, + "anson": 23103, + "strate": 23104, + "Ġhi": 23105, + "Height": 23106, + "Ġowning": 23107, + "none": 23108, + "Ġdislike": 23109, + "Ġknives": 23110, + "pherd": 23111, + "Ġloudly": 23112, + "ĠAPIs": 23113, + "Display": 23114, + "ĠLac": 23115, + "ĠUSS": 23116, + "abl": 23117, + "verages": 23118, + "Jew": 23119, + "Ġ172": 23120, + "ĠHistorical": 23121, + "atoon": 23122, + "ĠPhysics": 23123, + "intern": 23124, + "Ġwarmth": 23125, + "Ġtopp": 23126, + "DM": 23127, + "Ġgunman": 23128, + "Ġemperor": 23129, + "odi": 23130, + "ãĥ£": 23131, + "inatory": 23132, + "ĠRib": 23133, + "Ġ131": 23134, + "ĠSaturn": 23135, + "ĠShining": 23136, + "Ġwaking": 23137, + "Quotes": 23138, + "Ġcomedian": 23139, + "enberg": 23140, + "½": 23141, + "Ġbelievers": 23142, + "Ġpaperwork": 23143, + "custom": 23144, + "Ġlev": 23145, + "Ġlament": 23146, + "Ġpouring": 23147, + "222": 23148, + "political": 23149, + "ĠSupplement": 23150, + "maid": 23151, + "Ġcruelty": 23152, + "Ġtread": 23153, + "ysics": 23154, + "Aw": 23155, + "rites": 23156, + "Ġmodifier": 23157, + "ĠPosition": 23158, + "Adam": 23159, + "lb": 23160, + "ubs": 23161, + "Ġimperfect": 23162, + "Ġclusters": 23163, + "ĠEngineer": 23164, + "ĠCherry": 23165, + "Ġinauguration": 23166, + "ĠSau": 23167, + "Ġembodiment": 23168, + "ĠUncle": 23169, + "Ġoverr": 23170, + "Ġexplosions": 23171, + "cule": 23172, + "ĠPrinceton": 23173, + "ĠAndrea": 23174, + "Ġincorrectly": 23175, + "Ġearnest": 23176, + "Ġpilgr": 23177, + "ĠSprint": 23178, + "Ġsleeve": 23179, + "Ġhears": 23180, + "ĠAmazing": 23181, + "Ġbrowsing": 23182, + "agin": 23183, + "Ġhomeland": 23184, + "Ġhaw": 23185, + "Ġdiving": 23186, + "istered": 23187, + "178": 23188, + "Ġbargaining": 23189, + "ĠArcade": 23190, + "Ġdelegate": 23191, + "terson": 23192, + "................................................................": 23193, + "ĠJacksonville": 23194, + "275": 23195, + "Ġstagn": 23196, + "Ġadam": 23197, + "ĠSherman": 23198, + "CB": 23199, + "Ġsuburb": 23200, + "ĠFoods": 23201, + "Ġconverting": 23202, + "ĠArist": 23203, + "Ġchambers": 23204, + "love": 23205, + "Ġamino": 23206, + "ĠGan": 23207, + "Ġmadness": 23208, + "mc": 23209, + "ĠUSE": 23210, + "defined": 23211, + "Ġultr": 23212, + "indust": 23213, + "Ġwolves": 23214, + "lance": 23215, + "Additionally": 23216, + "Ġcracks": 23217, + "asia": 23218, + "ĠReason": 23219, + "ĠPump": 23220, + "Ġaccidental": 23221, + "ĠLaser": 23222, + "ĠRid": 23223, + "Ġinitialized": 23224, + "elli": 23225, + "Ġunnamed": 23226, + "Ġnoun": 23227, + "ĠPassed": 23228, + "Ġhostage": 23229, + "ĠEthiop": 23230, + "shirts": 23231, + "Ġunrel": 23232, + "ĠEmbassy": 23233, + "Ġ1941": 23234, + "Ġatoms": 23235, + "Ġpurported": 23236, + "164": 23237, + "ĠFi": 23238, + "Ġgallons": 23239, + "ĠMonica": 23240, + "Ġpg": 23241, + "enment": 23242, + "Ġsorted": 23243, + "ĠGospel": 23244, + "Ġheights": 23245, + "Ġtraced": 23246, + "Ġundergoing": 23247, + "Shell": 23248, + "Ġsacks": 23249, + "Ġproportions": 23250, + "Ġhalluc": 23251, + "Font": 23252, + "acet": 23253, + "Ġwarmer": 23254, + "ĠINTER": 23255, + "Ġgrabbing": 23256, + "Plug": 23257, + "Ġrealization": 23258, + "ĠBurke": 23259, + "Ġenchant": 23260, + "ATER": 23261, + "ĠSeed": 23262, + "Ġabundant": 23263, + "FM": 23264, + "Ġcivic": 23265, + "Vs": 23266, + "isi": 23267, + "Ġvow": 23268, + "Ġreper": 23269, + "ĠPartnership": 23270, + "Ġpenetration": 23271, + "Ġaxe": 23272, + "Ġshattered": 23273, + "ĠZombies": 23274, + "Ġvinyl": 23275, + "ĠAlert": 23276, + "eon": 23277, + "Ġobliged": 23278, + "ĠIllust": 23279, + "ĠPlaza": 23280, + "ĠFrontier": 23281, + "Ġdavidjl": 23282, + "ĠSerial": 23283, + "ĠHav": 23284, + "ĠNutrition": 23285, + "Bi": 23286, + "ĠâĸĪ": 23287, + "ĠJays": 23288, + "linux": 23289, + "Ġhurry": 23290, + "Ġvoy": 23291, + "Ġhopeless": 23292, + "ĠStealth": 23293, + "Ġãģ": 23294, + "essors": 23295, + "ttle": 23296, + "borg": 23297, + "ĠSafari": 23298, + "fell": 23299, + "Ġwary": 23300, + "due": 23301, + "ĠAbove": 23302, + "Ha": 23303, + "ELL": 23304, + "Ġnotor": 23305, + "ĠWon": 23306, + "Too": 23307, + "Ġoccupations": 23308, + "Ġpossessions": 23309, + "Ġinviting": 23310, + "Ġpredators": 23311, + "Ġaccelerated": 23312, + "Ġ157": 23313, + "uterte": 23314, + "ĠCube": 23315, + "east": 23316, + "account": 23317, + "Give": 23318, + "Ġtransplant": 23319, + "redients": 23320, + "idable": 23321, + "Ġscreenshots": 23322, + "ĠGund": 23323, + "ĠFS": 23324, + "Ġtravelers": 23325, + "Ġsensory": 23326, + "ĠFiat": 23327, + "ĠRockets": 23328, + "İĭ": 23329, + "_{": 23330, + "Friend": 23331, + "Ġcharming": 23332, + "ALS": 23333, + "Ġenjoyment": 23334, + "mph": 23335, + "Ġ5000": 23336, + "ĠREG": 23337, + "ÙĨ": 23338, + "bia": 23339, + "Ġcompilation": 23340, + "rost": 23341, + "ĠVP": 23342, + "ĠSchne": 23343, + "2019": 23344, + "Ġcopying": 23345, + "MORE": 23346, + "ĠFlore": 23347, + "falls": 23348, + "215": 23349, + "total": 23350, + "Ġdisciples": 23351, + "double": 23352, + "Ġexceeding": 23353, + "Ġsmashed": 23354, + "Ġconceptual": 23355, + "ĠRomania": 23356, + "ĠBrent": 23357, + "ĠICE": 23358, + "ĠTou": 23359, + "Ġgrap": 23360, + "Ġnails": 23361, + "189": 23362, + "ãĥĺ": 23363, + "Ġprocure": 23364, + "eur": 23365, + "Ġconfirming": 23366, + "ĠCec": 23367, + "awi": 23368, + "ĠEden": 23369, + "Ġng": 23370, + "Ġengineered": 23371, + "atics": 23372, + "Ġhooked": 23373, + "Ġdisgusting": 23374, + "ĠMurder": 23375, + "ãĤ¿": 23376, + "Library": 23377, + "Ġ168": 23378, + "Almost": 23379, + "hematic": 23380, + "Menu": 23381, + "ĠNotre": 23382, + "ĠJur": 23383, + "Ġkidnapped": 23384, + "Ġhacker": 23385, + "ĠJade": 23386, + "Ġcreepy": 23387, + "Ġdrawings": 23388, + "ĠSponsor": 23389, + "Ġcyclists": 23390, + "ĠGoblin": 23391, + "Ġoptimized": 23392, + "Ġstaged": 23393, + "ĠMcD": 23394, + "between": 23395, + "Age": 23396, + "eno": 23397, + "Sex": 23398, + "ĠWide": 23399, + "nings": 23400, + "avis": 23401, + "Ġincapable": 23402, + "ĠKob": 23403, + "Ġrewarding": 23404, + "ĠLone": 23405, + "olescent": 23406, + "Ġcontracted": 23407, + "Ġsticky": 23408, + "Jose": 23409, + "Ball": 23410, + "fest": 23411, + "ĠInput": 23412, + "ĠRecently": 23413, + "Ġtomat": 23414, + "square": 23415, + "Application": 23416, + "Ġnitrogen": 23417, + "Ġduplicate": 23418, + "ĠRecon": 23419, + "ĠDear": 23420, + "London": 23421, + "Ġintra": 23422, + "Ġdock": 23423, + "Ġoutreach": 23424, + "ĠMillion": 23425, + "Ġmammals": 23426, + "ampton": 23427, + "VAL": 23428, + "Ġsnaps": 23429, + "Ġdos": 23430, + "ĠWhole": 23431, + "ĠReady": 23432, + "Try": 23433, + "ĠWinnipeg": 23434, + "earance": 23435, + "Ġincurred": 23436, + "renched": 23437, + "ĠNSW": 23438, + "ilot": 23439, + "raine": 23440, + "Ġcube": 23441, + "got": 23442, + "Ġrunway": 23443, + "etermined": 23444, + "ĠHawks": 23445, + "Ġsurvivor": 23446, + "ĠWish": 23447, + "ĠDin": 23448, + "ĠDEF": 23449, + "ĠVault": 23450, + "187": 23451, + "Ġmushrooms": 23452, + "Ġcrisp": 23453, + "bey": 23454, + "ĠDiscovery": 23455, + "Ġdevelopmental": 23456, + "Ġparadigm": 23457, + "Ġchaotic": 23458, + "ĠTsu": 23459, + "Ġ333": 23460, + "bons": 23461, + "Ġbacterial": 23462, + "Ġcommits": 23463, + "Ġcosmic": 23464, + "Ġmega": 23465, + "ocative": 23466, + "ĠPaint": 23467, + "ophobic": 23468, + "Ġvain": 23469, + "Ġcarved": 23470, + "ĠThief": 23471, + "ĠGul": 23472, + "owship": 23473, + "Ġcites": 23474, + "ĠEdinburgh": 23475, + "Ġdiminished": 23476, + "Ġacknowledges": 23477, + "ĠKills": 23478, + "Ġmicrow": 23479, + "ĠHera": 23480, + "Ġseniors": 23481, + "Ġwhereby": 23482, + "Hop": 23483, + "atron": 23484, + "Ġunavailable": 23485, + "ĠNate": 23486, + "Ġ480": 23487, + "Ġslated": 23488, + "ĠRebecca": 23489, + "ĠBattery": 23490, + "Ġgrammar": 23491, + "Ġheadset": 23492, + "Ġcursor": 23493, + "Ġexcluding": 23494, + "anye": 23495, + "aundering": 23496, + "ebin": 23497, + "Ġfeasible": 23498, + "ĠPublishing": 23499, + "ĠLabs": 23500, + "ĠCliff": 23501, + "ĠFerrari": 23502, + "Ġpac": 23503, + "visible": 23504, + "marked": 23505, + "pell": 23506, + "Ġpolite": 23507, + "Ġstaggering": 23508, + "ĠGalactic": 23509, + "Ġsuperst": 23510, + "Ġparan": 23511, + "ĠOfficers": 23512, + "ãĢģ": 23513, + "Ġspecifics": 23514, + "ulus": 23515, + "239": 23516, + "ĠPaste": 23517, + "AMP": 23518, + "ĠPanama": 23519, + "ĠDelete": 23520, + "anguard": 23521, + "restrial": 23522, + "Ġheroic": 23523, + "ĠDy": 23524, + "اÙĦ": 23525, + "Ġincumbent": 23526, + "Ġcrunch": 23527, + "tro": 23528, + "Ġscoop": 23529, + "Ġblogger": 23530, + "Ġsellers": 23531, + "uren": 23532, + "Ġmedicines": 23533, + "ĠCaps": 23534, + "ĠAnimation": 23535, + "oxy": 23536, + "Ġoutward": 23537, + "Ġinquiries": 23538, + "229": 23539, + "Ġpsychologist": 23540, + "ĠSask": 23541, + "evil": 23542, + "Ġcontaminated": 23543, + "ãĤ¨": 23544, + "herence": 23545, + "Ġbranded": 23546, + "ĠAbdul": 23547, + "zh": 23548, + "Ġparagraphs": 23549, + "Ġmins": 23550, + "Ġcorrelated": 23551, + "erb": 23552, + "Ġimpart": 23553, + "Ġmilestone": 23554, + "ĠSolutions": 23555, + "otle": 23556, + "Ġundercover": 23557, + "Ġmarched": 23558, + "ĠChargers": 23559, + "fax": 23560, + "ĠSecrets": 23561, + "Ġruth": 23562, + "weather": 23563, + "Ġfeminine": 23564, + "Ġsham": 23565, + "Ġprestigious": 23566, + "iggins": 23567, + "Ġsung": 23568, + "history": 23569, + "ettle": 23570, + "ggie": 23571, + "Ġoutdated": 23572, + "oland": 23573, + "Ġperceptions": 23574, + "ĠSession": 23575, + "ĠDodgers": 23576, + "uj": 23577, + "ĠEND": 23578, + "Doc": 23579, + "Ġdeficiency": 23580, + "Grand": 23581, + "ĠJoker": 23582, + "Ġretrospect": 23583, + "Ġdiagnostic": 23584, + "Ġharmless": 23585, + "Ġrogue": 23586, + "ĠAval": 23587, + "Equ": 23588, + "Ġtransc": 23589, + "ĠRobertson": 23590, + "ĠDepending": 23591, + "ĠBurns": 23592, + "ivo": 23593, + "Ġhostility": 23594, + "Features": 23595, + "ĵĺ": 23596, + "Ġdiscomfort": 23597, + "ĠLCD": 23598, + "specified": 23599, + "ĠExpect": 23600, + "340": 23601, + "Ġimperative": 23602, + "ĠRegular": 23603, + "Chinese": 23604, + "Ġstatewide": 23605, + "Ġsymm": 23606, + "Ġloops": 23607, + "Ġautumn": 23608, + "Nick": 23609, + "Ġshaping": 23610, + "Ġquot": 23611, + "Ġcherry": 23612, + "ĠCrossref": 23613, + "è¦ļéĨĴ": 23614, + "Standard": 23615, + "heed": 23616, + "ĠDell": 23617, + "ĠVietnamese": 23618, + "Ġost": 23619, + "ĠValkyrie": 23620, + "OA": 23621, + "Assad": 23622, + "Ġrebound": 23623, + "ĠTraffic": 23624, + "places": 23625, + "æĺ": 23626, + "ĠBuc": 23627, + "172": 23628, + "Ġshelters": 23629, + "Ġinsisting": 23630, + "ĠCertainly": 23631, + "ĠKenneth": 23632, + "ĠTCP": 23633, + "Ġpenal": 23634, + "ĠReplay": 23635, + "heard": 23636, + "Ġdialect": 23637, + "iza": 23638, + "ĠFY": 23639, + "itcher": 23640, + "ĠDL": 23641, + "Ġspiral": 23642, + "Ġquarterbacks": 23643, + "Ġhull": 23644, + "Ġgoogle": 23645, + "Ġtodd": 23646, + "ĠSterling": 23647, + "ĠPlate": 23648, + "Ġspying": 23649, + "mbol": 23650, + "ĠRealm": 23651, + "ĠProced": 23652, + "ĠCrash": 23653, + "Ġterminate": 23654, + "Ġprotesting": 23655, + "Center": 23656, + "guided": 23657, + "Ġuncover": 23658, + "Ġboycott": 23659, + "Ġrealizes": 23660, + "sound": 23661, + "Ġpretending": 23662, + "ĠVas": 23663, + "1980": 23664, + "Ġframed": 23665, + "Ġ139": 23666, + "Ġdescended": 23667, + "Ġrehabilitation": 23668, + "Ġborrowing": 23669, + "ĠBuch": 23670, + "Ġblur": 23671, + "Ron": 23672, + "ĠFrozen": 23673, + "enza": 23674, + "Chief": 23675, + "ĠPoor": 23676, + "Ġtranslates": 23677, + "MIN": 23678, + "Ġ212": 23679, + "JECT": 23680, + "Ġerupted": 23681, + "Ġsuccesses": 23682, + "SEC": 23683, + "Ġplague": 23684, + "Ġgems": 23685, + "doms": 23686, + "Ġstretches": 23687, + "ĠSpy": 23688, + "Ġstorytelling": 23689, + "Credit": 23690, + "ĠPush": 23691, + "Ġtraction": 23692, + "Ġineffective": 23693, + "ĠLuna": 23694, + "Ġtapes": 23695, + "Ġanalytics": 23696, + "ercise": 23697, + "Ġprogrammes": 23698, + "ĠCarbon": 23699, + "Ġbehold": 23700, + "heavy": 23701, + "ĠConservation": 23702, + "ĠFIR": 23703, + "Ġsack": 23704, + "termin": 23705, + "ricks": 23706, + "Ġhoused": 23707, + "Ġunusually": 23708, + "Ice": 23709, + "Ġexecuting": 23710, + "ĠMoroc": 23711, + "eday": 23712, + "Ġeditions": 23713, + "Ġsmarter": 23714, + "ĠBA": 23715, + "Ġoutlaw": 23716, + "Ġvanished": 23717, + "iba": 23718, + "ALSE": 23719, + "ĠSilva": 23720, + "238": 23721, + "Could": 23722, + "Ġphilosopher": 23723, + "Ġevacuated": 23724, + "Secret": 23725, + "142": 23726, + "Ġvisas": 23727, + "ãĤ¬": 23728, + "ĠMalt": 23729, + "ĠClearly": 23730, + "ĠNiger": 23731, + "ĠCairo": 23732, + "ĠFist": 23733, + "380": 23734, + "ĠXML": 23735, + "auto": 23736, + "itant": 23737, + "Ġreinforced": 23738, + "Record": 23739, + "ĠSurvivor": 23740, + "GHz": 23741, + "Ġscrews": 23742, + "parents": 23743, + "Ġoceans": 23744, + "mares": 23745, + "Ġbrakes": 23746, + "vasive": 23747, + "Ġhello": 23748, + "ĠSIM": 23749, + "rimp": 23750, + "Ġore": 23751, + "ĠArmour": 23752, + "247": 23753, + "Ġterrific": 23754, + "Ġtones": 23755, + "141": 23756, + "ĠMinutes": 23757, + "Episode": 23758, + "Ġcurves": 23759, + "Ġinflammatory": 23760, + "Ġbatting": 23761, + "ĠBeautiful": 23762, + "Lay": 23763, + "Ġunpop": 23764, + "vable": 23765, + "Ġriots": 23766, + "ĠTactics": 23767, + "baugh": 23768, + "ĠCock": 23769, + "Ġorgasm": 23770, + "ĠSas": 23771, + "Ġconstructor": 23772, + "etz": 23773, + "Gov": 23774, + "Ġantagon": 23775, + "Ġtheat": 23776, + "Ġdeeds": 23777, + "hao": 23778, + "cuts": 23779, + "ĠMcCl": 23780, + "Ġum": 23781, + "ĠScientists": 23782, + "Ġgrassroots": 23783, + "yssey": 23784, + "\"]=>": 23785, + "Ġsurfaced": 23786, + "Ġshades": 23787, + "Ġneighbours": 23788, + "Ġadvertis": 23789, + "oya": 23790, + "Ġmerged": 23791, + "Upon": 23792, + "Ġgad": 23793, + "Ġanticipate": 23794, + "Anyway": 23795, + "Ġslogan": 23796, + "Ġdisrespect": 23797, + "Iran": 23798, + "ĠTB": 23799, + "acted": 23800, + "Ġsubpoen": 23801, + "mediately": 23802, + "OOOO": 23803, + "Ġwaiver": 23804, + "Ġvulnerabilities": 23805, + "ottesville": 23806, + "ĠHuffington": 23807, + "Josh": 23808, + "ĠDH": 23809, + "Monday": 23810, + "ĠEllen": 23811, + "Know": 23812, + "xon": 23813, + "items": 23814, + "228": 23815, + "Ġfills": 23816, + "ĠNike": 23817, + "Ġcumulative": 23818, + "andals": 23819, + "Ir": 23820, + "Ġì": 23821, + "Ġfriction": 23822, + "igator": 23823, + "Ġscans": 23824, + "ĠVienna": 23825, + "ldom": 23826, + "Ġperformers": 23827, + "Prim": 23828, + "Ġbidding": 23829, + "Mur": 23830, + "Ġleaned": 23831, + "ĠPrix": 23832, + "alks": 23833, + "Ġ[âĢ¦]": 23834, + "ĠTwitch": 23835, + "ĠDeveloper": 23836, + "ĠGir": 23837, + "Ġcallback": 23838, + "Abstract": 23839, + "Ġaccustomed": 23840, + "Ġfreedoms": 23841, + "ĠPG": 23842, + "uracy": 23843, + "Ġlump": 23844, + "isman": 23845, + ",,,,": 23846, + "1992": 23847, + "ĠRED": 23848, + "Ġworm": 23849, + "Match": 23850, + "ĠPlatinum": 23851, + "IJ": 23852, + "ĠOwner": 23853, + "Trivia": 23854, + "compl": 23855, + "Ġnewborn": 23856, + "Ġfantas": 23857, + "Own": 23858, + "Ġ1959": 23859, + "Ġsympath": 23860, + "Ġubiqu": 23861, + "Ġoutputs": 23862, + "Ġallev": 23863, + "Ġprag": 23864, + "Kevin": 23865, + "Ġfavors": 23866, + "Ġburial": 23867, + "Ġnurt": 23868, + "solete": 23869, + "cache": 23870, + "Ġ156": 23871, + "Ġunlocks": 23872, + "techn": 23873, + "Making": 23874, + "Ġconquer": 23875, + "adic": 23876, + "æĸ": 23877, + "Ġelf": 23878, + "Ġelectorate": 23879, + "ĠKurds": 23880, + "ĠStack": 23881, + "ĠSamurai": 23882, + "Ġâĺħ": 23883, + "Ġ{}": 23884, + "ĠSaid": 23885, + "ĠFallout": 23886, + "Ġkindness": 23887, + "ĠCustoms": 23888, + "ĠBoulevard": 23889, + "Ġhelicopters": 23890, + "otics": 23891, + "ĠVeget": 23892, + "comment": 23893, + "Ġcriticised": 23894, + "Ġpolished": 23895, + "ĠRemix": 23896, + "ĠCultural": 23897, + "Ġrecons": 23898, + "Ġdoi": 23899, + "atem": 23900, + "Screen": 23901, + "Ġbarred": 23902, + "Comments": 23903, + "ĠGenerally": 23904, + "Ġslap": 23905, + "720": 23906, + "Vari": 23907, + "pine": 23908, + "Ġempt": 23909, + "Ġhats": 23910, + "ĠPlaying": 23911, + "lab": 23912, + "average": 23913, + "forms": 23914, + "ĠCotton": 23915, + "Ġcans": 23916, + "ĠDON": 23917, + "ĠSomalia": 23918, + "Crypt": 23919, + "ĠIncreases": 23920, + "Ever": 23921, + "modern": 23922, + "Ġsurgeon": 23923, + "3000": 23924, + "Ġrandomized": 23925, + "================================================================": 23926, + "Bern": 23927, + "impl": 23928, + "ĠCOR": 23929, + "Ġproclaim": 23930, + "thouse": 23931, + "Ġtoes": 23932, + "Ġample": 23933, + "Ġpreserving": 23934, + "Ġdisbel": 23935, + "grand": 23936, + "Besides": 23937, + "Ġsilk": 23938, + "ĠPattern": 23939, + "hm": 23940, + "Ġenterprises": 23941, + "Ġaffidavit": 23942, + "ĠAdvisory": 23943, + "Ġadvertised": 23944, + "ĠReligious": 23945, + "sections": 23946, + "psych": 23947, + "ĠFields": 23948, + "aways": 23949, + "Ġhashtag": 23950, + "ĠNightmare": 23951, + "Ġvampire": 23952, + "Ġforensic": 23953, + "rossover": 23954, + "nar": 23955, + "Ġnavy": 23956, + "Ġvacant": 23957, + "ĠDuel": 23958, + "Ġhallway": 23959, + "Ġfacebook": 23960, + "identally": 23961, + "ĠNRA": 23962, + "Ġmatt": 23963, + "Ġhurricane": 23964, + "ĠKirby": 23965, + "ĠPuzzle": 23966, + "Ġskirt": 23967, + "oust": 23968, + "dullah": 23969, + "Ġanalogy": 23970, + "inion": 23971, + "Ġtomatoes": 23972, + "ĠNV": 23973, + "ĠPeak": 23974, + "ĠMeyer": 23975, + "Ġappointments": 23976, + "Ġmasc": 23977, + "Ġalley": 23978, + "rehend": 23979, + "Ġcharities": 23980, + "Ġundo": 23981, + "Ġdestinations": 23982, + "ĠTesting": 23983, + "\">\"": 24618, + "cats": 24619, + "*.": 24620, + "Ġgestures": 24621, + "general": 24622, + "League": 24623, + "Ġpackets": 24624, + "ĠInspector": 24625, + "ĠBerg": 24626, + "Ġfraudulent": 24627, + "Ġcriticize": 24628, + "Fun": 24629, + "Ġblaming": 24630, + "ndra": 24631, + "Ġslash": 24632, + "ĠEston": 24633, + "Ġproposing": 24634, + "Ġwhales": 24635, + "Ġtherapist": 24636, + "Ġsubset": 24637, + "Ġleisure": 24638, + "ELD": 24639, + "ĠCVE": 24640, + "ĠActivity": 24641, + "Ġculmin": 24642, + "shop": 24643, + "ĠDAY": 24644, + "ischer": 24645, + "ĠAdmiral": 24646, + "ĠAttacks": 24647, + "Ġ1958": 24648, + "Ġmemoir": 24649, + "Ġfolded": 24650, + "Ġsexist": 24651, + "Ġ153": 24652, + "ĠLI": 24653, + "Ġreadings": 24654, + "Ġembarrassment": 24655, + "ĠEmployment": 24656, + "wart": 24657, + "chin": 24658, + "Ġcontinuation": 24659, + "lia": 24660, + "Recently": 24661, + "Ġduel": 24662, + "Ġevacuation": 24663, + "ĠKashmir": 24664, + "Ġdisposition": 24665, + "ĠRig": 24666, + "Ġbolts": 24667, + "Ġinsurers": 24668, + "467": 24669, + "Mex": 24670, + "Ġretaliation": 24671, + "Ġmisery": 24672, + "Ġunreasonable": 24673, + "raining": 24674, + "Imm": 24675, + "ĠPU": 24676, + "emer": 24677, + "Ġgenital": 24678, + "ãĤ³": 24679, + "ĠCandy": 24680, + "Ġonions": 24681, + "ĠPatt": 24682, + "liner": 24683, + "Ġconceded": 24684, + "Ġfa": 24685, + "Ġforc": 24686, + "ĠHernandez": 24687, + "ĠGeoff": 24688, + "debian": 24689, + "ĠTeams": 24690, + "Ġcries": 24691, + "Ġhomeowners": 24692, + "237": 24693, + "ABC": 24694, + "Ġstitch": 24695, + "Ġstatistic": 24696, + "Ġheaders": 24697, + "ĠBiology": 24698, + "Ġmotors": 24699, + "ĠGEN": 24700, + "ĠLip": 24701, + "Ġhates": 24702, + "Ġheel": 24703, + "Self": 24704, + "ipl": 24705, + "EDIT": 24706, + "orting": 24707, + "Ġannot": 24708, + "ĠSpeech": 24709, + "oldemort": 24710, + "ĠJavascript": 24711, + "ĠLeBron": 24712, + "Ġfootprint": 24713, + "Ġfn": 24714, + "Ġseizures": 24715, + "nas": 24716, + "hide": 24717, + "Ġ1954": 24718, + "ĠBee": 24719, + "ĠDeclaration": 24720, + "ĠKatie": 24721, + "Ġreservations": 24722, + "NR": 24723, + "female": 24724, + "Ġsaturated": 24725, + "Ġbiblical": 24726, + "Ġtrolls": 24727, + "Device": 24728, + "photos": 24729, + "Ġdrums": 24730, + "ãĥīãĥ©ãĤ´ãĥ³": 24731, + "Night": 24732, + "fighter": 24733, + "ĠHak": 24734, + "riber": 24735, + "Ġcush": 24736, + "Ġdisciplinary": 24737, + "baum": 24738, + "ĠGH": 24739, + "ĠSchmidt": 24740, + "ilibrium": 24741, + "Ġsixty": 24742, + "ĠKushner": 24743, + "rots": 24744, + "Ġpund": 24745, + "ĠRac": 24746, + "Ġsprings": 24747, + "Ġconve": 24748, + "Business": 24749, + "Fall": 24750, + "Ġqualifications": 24751, + "Ġverses": 24752, + "Ġnarciss": 24753, + "ĠKoh": 24754, + "ĠWow": 24755, + "ĠCharlottesville": 24756, + "edo": 24757, + "Ġinterrogation": 24758, + "ĠWool": 24759, + "365": 24760, + "Brian": 24761, + "Ġâľĵ": 24762, + "Ġalleges": 24763, + "onds": 24764, + "idation": 24765, + "ĠJackie": 24766, + "yu": 24767, + "Ġlakes": 24768, + "Ġworthwhile": 24769, + "Ġcrystals": 24770, + "ĠJuda": 24771, + "Ġcomprehend": 24772, + "Ġflush": 24773, + "Ġabsorption": 24774, + "ĠOC": 24775, + "Ġfrightened": 24776, + "ĠChocolate": 24777, + "Martin": 24778, + "Ġbuys": 24779, + "Ġbucks": 24780, + "Ġappell": 24781, + "ĠChampionships": 24782, + "Ġlistener": 24783, + "ĠDefensive": 24784, + "Ġcz": 24785, + "uds": 24786, + "ĠMate": 24787, + "Ġreplay": 24788, + "Ġdecorated": 24789, + "Ġsunk": 24790, + "ĠVIP": 24791, + "ĠAnk": 24792, + "Ġ195": 24793, + "aaaa": 24794, + "Nobody": 24795, + "ĠMilk": 24796, + "ĠGur": 24797, + "ĠMk": 24798, + "ĠSara": 24799, + "Ġseating": 24800, + "ĠWid": 24801, + "Track": 24802, + "Ġemploys": 24803, + "Ġgigantic": 24804, + "APP": 24805, + "ãĤ§": 24806, + "inventory": 24807, + "Ġtowel": 24808, + "atche": 24809, + "lasting": 24810, + "ĠTL": 24811, + "Ġlatency": 24812, + "Ġkne": 24813, + "Ber": 24814, + "meaning": 24815, + "Ġupheld": 24816, + "Ġplayground": 24817, + "Ġmant": 24818, + "Side": 24819, + "Ġstereo": 24820, + "Ġnorthwest": 24821, + "Ġexceptionally": 24822, + "Ġrays": 24823, + "Ġrecurring": 24824, + "Drive": 24825, + "Ġupright": 24826, + "Ġabduct": 24827, + "ĠMarathon": 24828, + "Ġgoodbye": 24829, + "Ġalphabet": 24830, + "hp": 24831, + "Ġcourtroom": 24832, + "rington": 24833, + "othing": 24834, + "Tag": 24835, + "Ġdiplomats": 24836, + "Ġbarbar": 24837, + "ĠAqua": 24838, + "183": 24839, + "3333": 24840, + "Ġmaturity": 24841, + "Ġinstability": 24842, + "ĠApache": 24843, + "Ġ===": 24844, + "Ġfasting": 24845, + "ĠGrid": 24846, + "ModLoader": 24847, + "Ġ152": 24848, + "Abs": 24849, + "ĠOperating": 24850, + "etti": 24851, + "Ġacquaint": 24852, + "Donnell": 24853, + "ĠKem": 24854, + "ĠForge": 24855, + "Ġarmored": 24856, + "Mil": 24857, + "Ġphilosophers": 24858, + "invest": 24859, + "Players": 24860, + "âĪ": 24861, + "Ġmyriad": 24862, + "Ġcomrades": 24863, + "Rot": 24864, + "Ġremembering": 24865, + "Ġcorresponds": 24866, + "Ġprogrammers": 24867, + "ĠLynn": 24868, + "Ġolig": 24869, + "Ġcoherent": 24870, + "ynchron": 24871, + "ĠChemical": 24872, + "Ġjugg": 24873, + "pair": 24874, + "posts": 24875, + "Eye": 24876, + "ĠInner": 24877, + "Ġsemester": 24878, + "ottest": 24879, + "ĠEmirates": 24880, + "ricanes": 24881, + "orously": 24882, + "mits": 24883, + "ĠWis": 24884, + "Ġdodge": 24885, + "location": 24886, + "Ġfaded": 24887, + "Amazon": 24888, + "ĠProceed": 24889, + "ĠINFO": 24890, + "journal": 24891, + "ĠTruck": 24892, + "Ten": 24893, + "Ġ217": 24894, + "Ġstatutes": 24895, + "mobile": 24896, + "ĠTypes": 24897, + "Recomm": 24898, + "buster": 24899, + "pex": 24900, + "Ġlegends": 24901, + "Ġheadache": 24902, + "faced": 24903, + "ĠWiFi": 24904, + "ifty": 24905, + "ĠHER": 24906, + "Ġcircuits": 24907, + "ERROR": 24908, + "226": 24909, + "olin": 24910, + "Ġcylinder": 24911, + "ospace": 24912, + "ikers": 24913, + "Prem": 24914, + "Quant": 24915, + "Ġconflicting": 24916, + "Ġslightest": 24917, + "Ġforged": 24918, + "ionage": 24919, + "Stephen": 24920, + "ĠKub": 24921, + "ĠOpportun": 24922, + "ĠHeal": 24923, + "Ġblo": 24924, + "Ġrulers": 24925, + "Ġhuh": 24926, + "Ġsubmarine": 24927, + "fy": 24928, + "asser": 24929, + "Ġallowance": 24930, + "ĠKasich": 24931, + "ĠTas": 24932, + "ĠAustralians": 24933, + "ForgeModLoader": 24934, + "ĠâĨij": 24935, + "ĠMatrix": 24936, + "amins": 24937, + "Ġ1200": 24938, + "ĠAcqu": 24939, + "236": 24940, + "Document": 24941, + "ĠBreaking": 24942, + "193": 24943, + "ĠSubst": 24944, + "ĠRoller": 24945, + "ĠProperties": 24946, + "ĠNI": 24947, + "tier": 24948, + "Ġcrushing": 24949, + "Ġadvocating": 24950, + "Furthermore": 24951, + "keepers": 24952, + "Ġsexism": 24953, + "xd": 24954, + "Ġcaller": 24955, + "ĠSense": 24956, + "chieve": 24957, + "ĠTF": 24958, + "Ġfueled": 24959, + "Ġreminiscent": 24960, + "Ġobsess": 24961, + "urst": 24962, + "Ġuphold": 24963, + "ĠFans": 24964, + "hetics": 24965, + "ĠâĹ": 24966, + "ĠBath": 24967, + "Ġbeverage": 24968, + "Ġoscill": 24969, + "254": 24970, + "Ġpoles": 24971, + "Ġgradual": 24972, + "Ġexting": 24973, + "ĠSuff": 24974, + "ĠSuddenly": 24975, + "Ġliking": 24976, + "Ġ1949": 24977, + "unciation": 24978, + "amination": 24979, + "ĠOmar": 24980, + "ĠLV": 24981, + "ĠConsequently": 24982, + "Ġsynthes": 24983, + "ĠGIF": 24984, + "Ġpains": 24985, + "Ġinteracting": 24986, + "uously": 24987, + "incre": 24988, + "Ġrumor": 24989, + "ĠScientology": 24990, + "197": 24991, + "ĠZig": 24992, + "Ġspelling": 24993, + "ĠASS": 24994, + "Ġextingu": 24995, + "mson": 24996, + "Ġgh": 24997, + "Ġremarked": 24998, + "ĠStrategic": 24999, + "ĠMON": 25000, + "å¥": 25001, + "gae": 25002, + "ĠWHAT": 25003, + "Eric": 25004, + "ĠCampus": 25005, + "Ġmethane": 25006, + "Ġimagin": 25007, + "JUST": 25008, + "ĠAlm": 25009, + "XT": 25010, + "iq": 25011, + "ĠRSS": 25012, + "Ġwrongdoing": 25013, + "atta": 25014, + "Ġbigot": 25015, + "Ġdemonstrators": 25016, + "ĠCalvin": 25017, + "ĠVilla": 25018, + "Ġmembrane": 25019, + "ĠAwesome": 25020, + "Ġbenefic": 25021, + "268": 25022, + "Ġmagnificent": 25023, + "ĠLots": 25024, + "Greg": 25025, + "ĠBoris": 25026, + "Ġdetainees": 25027, + "ĠHerman": 25028, + "Ġwhispered": 25029, + "Ġawe": 25030, + "Professor": 25031, + "funding": 25032, + "Ġphysiological": 25033, + "ĠDestruction": 25034, + "Ġlimb": 25035, + "Ġmanipulated": 25036, + "Ġbubbles": 25037, + "Ġpseud": 25038, + "Ġhydra": 25039, + "ĠBristol": 25040, + "Ġstellar": 25041, + "ĠExpansion": 25042, + "ĠKell": 25043, + "ĠInterestingly": 25044, + "Ġmans": 25045, + "Ġdragging": 25046, + "Ġecological": 25047, + "ĠFit": 25048, + "Ġgent": 25049, + "Ġbenefited": 25050, + "ĠHaiti": 25051, + "Ġpolyg": 25052, + "ãĥİ": 25053, + "Ġ2030": 25054, + "Ġprow": 25055, + "Ġreconstruction": 25056, + "Ġwast": 25057, + "Ġpsychic": 25058, + "ĠGreeks": 25059, + "Handler": 25060, + "162": 25061, + "ĠPulse": 25062, + "Ġsolicit": 25063, + "Ġsys": 25064, + "Ġinflux": 25065, + "ĠGentle": 25066, + "percent": 25067, + "Ġproliferation": 25068, + "Ġtaxable": 25069, + "Ġdisregard": 25070, + "Ġescaping": 25071, + "Ġginger": 25072, + "Ġwithstand": 25073, + "Ġdevastated": 25074, + "ĠDew": 25075, + "series": 25076, + "Ġinjected": 25077, + "elaide": 25078, + "Ġturnover": 25079, + "heat": 25080, + "ĻĤ": 25081, + "Happy": 25082, + "ĠSilent": 25083, + "ãĤŃ": 25084, + "ivism": 25085, + "Ġirrational": 25086, + "AMA": 25087, + "Ġreef": 25088, + "rub": 25089, + "Ġ162": 25090, + "Ġbankers": 25091, + "ĠEthics": 25092, + "vv": 25093, + "Ġcriticisms": 25094, + "Kn": 25095, + "186": 25096, + "Movie": 25097, + "ĠTories": 25098, + "Ġnood": 25099, + "Ġdistortion": 25100, + "False": 25101, + "odore": 25102, + "Ġtasty": 25103, + "Research": 25104, + "ĠUID": 25105, + "-)": 25106, + "Ġdivorced": 25107, + "ĠMU": 25108, + "ĠHayes": 25109, + "ĠIsn": 25110, + "iani": 25111, + "ĠHQ": 25112, + "Ġ\"#": 25113, + "ignant": 25114, + "Ġtraumatic": 25115, + "ĠLing": 25116, + "Hun": 25117, + "Ġsabot": 25118, + "online": 25119, + "random": 25120, + "Ġrenamed": 25121, + "rared": 25122, + "KA": 25123, + "dead": 25124, + "ét": 25125, + "ĠAssistance": 25126, + "Ġseaf": 25127, + "++++++++": 25128, + "Ġseldom": 25129, + "ĠWebb": 25130, + "Ġboolean": 25131, + "ulet": 25132, + "Ġrefrain": 25133, + "ĠDIY": 25134, + "rule": 25135, + "Ġshutting": 25136, + "Ġutilizing": 25137, + "loading": 25138, + "ĠParam": 25139, + "coal": 25140, + "ooter": 25141, + "Ġattracting": 25142, + "ĠDol": 25143, + "Ġhers": 25144, + "agnetic": 25145, + "ĠReach": 25146, + "imo": 25147, + "Ġdiscarded": 25148, + "ĠPip": 25149, + "015": 25150, + "ür": 25151, + "Ġmug": 25152, + "Imagine": 25153, + "COL": 25154, + "Ġcursed": 25155, + "ĠShows": 25156, + "ĠCurtis": 25157, + "ĠSachs": 25158, + "speaking": 25159, + "ĠVista": 25160, + "ĠFramework": 25161, + "ongo": 25162, + "Ġsubreddit": 25163, + "Ġcrus": 25164, + "ĠOval": 25165, + "Row": 25166, + "growing": 25167, + "Ġinstallment": 25168, + "Ġglac": 25169, + "ĠAdvance": 25170, + "ECK": 25171, + "ĠLGBTQ": 25172, + "LEY": 25173, + "Ġacet": 25174, + "Ġsuccessive": 25175, + "ĠNicole": 25176, + "Ġ1957": 25177, + "Quote": 25178, + "Ġcircumstance": 25179, + "ackets": 25180, + "Ġ142": 25181, + "ortium": 25182, + "Ġguessed": 25183, + "ĠFrame": 25184, + "Ġperpetrators": 25185, + "ĠAviation": 25186, + "ĠBench": 25187, + "Ġhandc": 25188, + "Ap": 25189, + "Ġ1956": 25190, + "259": 25191, + "rand": 25192, + "NetMessage": 25193, + "din": 25194, + "urtles": 25195, + "hig": 25196, + "ĠVIII": 25197, + "ffiti": 25198, + "ĠSwords": 25199, + "bial": 25200, + "Ġkidnapping": 25201, + "device": 25202, + "Ġbarn": 25203, + "ĠEli": 25204, + "aucas": 25205, + "Send": 25206, + "Constructed": 25207, + "Ġ½": 25208, + "Ġneedles": 25209, + "Ġadvertisements": 25210, + "Ġvou": 25211, + "Ġexhibited": 25212, + "ĠFortress": 25213, + "Ask": 25214, + "Berry": 25215, + "TYPE": 25216, + "Ġcancers": 25217, + "umping": 25218, + "ĠTerritory": 25219, + "Ġprud": 25220, + "Ġnas": 25221, + "Ġatheist": 25222, + "Ġbalances": 25223, + "ãģŁ": 25224, + "ĠShawn": 25225, + "&&": 25226, + "Ġlandsc": 25227, + "ĠRGB": 25228, + "Ġpetty": 25229, + "Ġexcellence": 25230, + "Ġtranslations": 25231, + "Ġparcel": 25232, + "ĠChev": 25233, + "East": 25234, + "ĠOutput": 25235, + "imi": 25236, + "Ġambient": 25237, + "ĠThreat": 25238, + "Ġvillains": 25239, + "Ġ550": 25240, + "ICA": 25241, + "Ġtaller": 25242, + "Ġleaking": 25243, + "cup": 25244, + "Ġpolish": 25245, + "Ġinfectious": 25246, + "ĠKC": 25247, + "Ġ@@": 25248, + "background": 25249, + "Ġbureaucracy": 25250, + "ĠSai": 25251, + "unless": 25252, + "itious": 25253, + "ĠSkype": 25254, + "Atl": 25255, + "IDENT": 25256, + "008": 25257, + "Ġhypocr": 25258, + "Ġpitchers": 25259, + "Ġguessing": 25260, + "ĠFINAL": 25261, + "Between": 25262, + "Ġvillagers": 25263, + "Ġ252": 25264, + "fashion": 25265, + "ĠTunis": 25266, + "Beh": 25267, + "ĠExc": 25268, + "ĠMID": 25269, + "288": 25270, + "ĠHaskell": 25271, + "196": 25272, + "ĠNOR": 25273, + "Ġspecs": 25274, + "Ġinvari": 25275, + "Ġglut": 25276, + "ĠCars": 25277, + "Ġimpulse": 25278, + "Ġhonors": 25279, + "gel": 25280, + "Ġjurisdictions": 25281, + "ĠBundle": 25282, + "ulas": 25283, + "California": 25284, + "ĠIncrease": 25285, + "Ġpear": 25286, + "Ġsingles": 25287, + "Ġcues": 25288, + "Ġunderwent": 25289, + "ĠWS": 25290, + "Ġexaggerated": 25291, + "Ġdubious": 25292, + "Ġflashing": 25293, + "LOG": 25294, + ")].": 25295, + "Journal": 25296, + "tg": 25297, + "Van": 25298, + "ĠIstanbul": 25299, + "ĠInsp": 25300, + "ĠFranken": 25301, + "Draw": 25302, + "Ġsadness": 25303, + "Ġironic": 25304, + "ĠFry": 25305, + "xc": 25306, + "Ġ164": 25307, + "isch": 25308, + "Way": 25309, + "ĠProtestant": 25310, + "horn": 25311, + "Ġunaff": 25312, + "ĠViv": 25313, + "illas": 25314, + "ĠProductions": 25315, + "ĠHogan": 25316, + "Ġperimeter": 25317, + "ĠSisters": 25318, + "Ġspontaneous": 25319, + "Ġdownside": 25320, + "Ġdescendants": 25321, + "Ġorn": 25322, + "worm": 25323, + "Japanese": 25324, + "Ġ1955": 25325, + "Ġ151": 25326, + "ĠDoing": 25327, + "elsen": 25328, + "umbles": 25329, + "Ġradically": 25330, + "ĠDrum": 25331, + "ĠBach": 25332, + "Ġliabilities": 25333, + "ĠOB": 25334, + "ĠElementary": 25335, + "Ġmeme": 25336, + "ynes": 25337, + "Ġfingerprint": 25338, + "ĠGrab": 25339, + "Ġundertake": 25340, + "Members": 25341, + "ĠReader": 25342, + "ĠSims": 25343, + "god": 25344, + "Ġhypothetical": 25345, + "scient": 25346, + "ĠAJ": 25347, + "Ġcharism": 25348, + "Ġadmissions": 25349, + "ĠMissile": 25350, + "trade": 25351, + "Ġexercising": 25352, + "ĠBackground": 25353, + "Written": 25354, + "Ġvocals": 25355, + "whether": 25356, + "Ġvi": 25357, + "ĠWinner": 25358, + "Ġlitter": 25359, + "ĠShooting": 25360, + "STEM": 25361, + "ãĤ¡": 25362, + "ĠAFL": 25363, + "Ġvariability": 25364, + "Ġeats": 25365, + "ĠDPS": 25366, + "brow": 25367, + "Ġelephants": 25368, + "Ġstrat": 25369, + "ĠÅ": 25370, + "Ġsettlers": 25371, + "Matthew": 25372, + "Ġinadvert": 25373, + "HI": 25374, + "ĠIMF": 25375, + "ĠGoal": 25376, + "Ġnerves": 25377, + "Johnson": 25378, + "eye": 25379, + "ablishment": 25380, + "Thursday": 25381, + "BILITY": 25382, + "Had": 25383, + "amoto": 25384, + "hetamine": 25385, + "eps": 25386, + "Ġmitochond": 25387, + "Ġcompressed": 25388, + "ĠTrevor": 25389, + "ĠAnimals": 25390, + "Tool": 25391, + "Lock": 25392, + "Ġtweak": 25393, + "Ġpinch": 25394, + "Ġcancellation": 25395, + "Pot": 25396, + "Ġfocal": 25397, + "ĠAstron": 25398, + "173": 25399, + "ĠASC": 25400, + "ĠOTHER": 25401, + "umni": 25402, + "Ġdemise": 25403, + "dl": 25404, + "Ùħ": 25405, + "Semitism": 25406, + "Ġcracking": 25407, + "Ġcollaborative": 25408, + "Ġexplores": 25409, + "sql": 25410, + "Ġherbs": 25411, + "Ġconfigurations": 25412, + "mis": 25413, + "ĠResult": 25414, + "acey": 25415, + "ĠSmoke": 25416, + "Ġsanct": 25417, + "elia": 25418, + "Ġdegener": 25419, + "Ġdeepest": 25420, + "Ġscreamed": 25421, + "Ġnap": 25422, + "Software": 25423, + "ĠSTAR": 25424, + "EF": 25425, + "ĠXin": 25426, + "sponsored": 25427, + "manship": 25428, + "233": 25429, + "Ġprimaries": 25430, + "Ġfiltering": 25431, + "Ġassemble": 25432, + "mil": 25433, + "ĠMyers": 25434, + "bows": 25435, + "Ġpunched": 25436, + "Mic": 25437, + "Ġinnovations": 25438, + "Ġfunc": 25439, + "ando": 25440, + "Ġfracking": 25441, + "ĠVul": 25442, + "оÐ": 25443, + "oshop": 25444, + "ĠImmun": 25445, + "Ġsettling": 25446, + "Ġadolescents": 25447, + "Ġrebuilding": 25448, + "Ġtransforming": 25449, + "Ġparole": 25450, + "Ġharbor": 25451, + "Ġbooking": 25452, + "otional": 25453, + "ongevity": 25454, + "ĠYo": 25455, + "bug": 25456, + "Ġemerges": 25457, + "ĠMethods": 25458, + "ĠChu": 25459, + "Pres": 25460, + "ĠDungeons": 25461, + "Ġtrailing": 25462, + "ĠRum": 25463, + "ĠHugh": 25464, + "天": 25465, + "ĠEra": 25466, + "ĠBattles": 25467, + "Results": 25468, + "ĠTrading": 25469, + "Ġversa": 25470, + "css": 25471, + "axies": 25472, + "heet": 25473, + "Ġgreed": 25474, + "1989": 25475, + "Ġgardens": 25476, + "Ġcontingent": 25477, + "Park": 25478, + "ĠLeafs": 25479, + "hook": 25480, + "robe": 25481, + "Ġdiplomacy": 25482, + "ĠFuel": 25483, + "ĠInvasion": 25484, + "Ġupgrading": 25485, + "Male": 25486, + "Ġelic": 25487, + "Ġrelentless": 25488, + "ĠCovenant": 25489, + "apesh": 25490, + "ĠTrop": 25491, + "Ty": 25492, + "production": 25493, + "arty": 25494, + "Ġpunches": 25495, + "ako": 25496, + "cyclopedia": 25497, + "ĠRabbit": 25498, + "ĠHDMI": 25499, + "Ġ141": 25500, + "Ġfoil": 25501, + "ItemImage": 25502, + "ĠFG": 25503, + "Ġimplementations": 25504, + "ĠPom": 25505, + "ixtures": 25506, + "Ġawait": 25507, + "Ġ330": 25508, + "amus": 25509, + "Ġumbrella": 25510, + "Ġforesee": 25511, + "separ": 25512, + "Ġcircumcision": 25513, + "Ġperipheral": 25514, + "Say": 25515, + "ĠExpert": 25516, + "Inc": 25517, + "Ġwithdrew": 25518, + "ĠAnders": 25519, + "fried": 25520, + "Ġradioactive": 25521, + "ĠOpening": 25522, + "Ġboarding": 25523, + "ĠND": 25524, + "Ġoverthrow": 25525, + "Activ": 25526, + "WP": 25527, + "ĠActs": 25528, + "×Ļ": 25529, + "Ġmotions": 25530, + "vic": 25531, + "ĠMighty": 25532, + "ĠDefender": 25533, + "aer": 25534, + "Ġthankful": 25535, + "ĠKilling": 25536, + "ĠBris": 25537, + "moil": 25538, + "Ġpredicting": 25539, + "266": 25540, + "choice": 25541, + "Ġkillers": 25542, + "Ġincub": 25543, + "ĠChest": 25544, + "athering": 25545, + "Ġproclaimed": 25546, + "flower": 25547, + "ossom": 25548, + "umbledore": 25549, + "ĠCycling": 25550, + "ĠOccupy": 25551, + "AGES": 25552, + "Pen": 25553, + "ĠYug": 25554, + "Ġpackaged": 25555, + "Ġheightened": 25556, + "cot": 25557, + "stack": 25558, + "Cond": 25559, + "Ġstamps": 25560, + "mage": 25561, + "Ġpersuaded": 25562, + "Ġensl": 25563, + "ĠCardinal": 25564, + "Ġsolitary": 25565, + "Ġpossessing": 25566, + "ĠCork": 25567, + "Ġevid": 25568, + "ĠTay": 25569, + "Ġblues": 25570, + "Ġextremism": 25571, + "Ġlunar": 25572, + "Ġclown": 25573, + "Techn": 25574, + "Ġfestivals": 25575, + "ĠPvP": 25576, + "ĠLar": 25577, + "Ġconsequently": 25578, + "present": 25579, + "Ġsomeday": 25580, + "çİĭ": 25581, + "ĠMeteor": 25582, + "Ġtouring": 25583, + "culture": 25584, + "Ġbeaches": 25585, + "Ship": 25586, + "cause": 25587, + "ĠFlood": 25588, + "ãĥ¯": 25589, + "Ġpurity": 25590, + "those": 25591, + "Ġemission": 25592, + "bolt": 25593, + "Ġchord": 25594, + "ĠScripture": 25595, + "Lu": 25596, + "Ġ${": 25597, + "created": 25598, + "Others": 25599, + "258": 25600, + "Ġelemental": 25601, + "Ġannoyed": 25602, + "ĠAE": 25603, + "dan": 25604, + "ĠSag": 25605, + "Researchers": 25606, + "Ġfairy": 25607, + "âĢĵâĢĵ": 25608, + "============": 25609, + "Smart": 25610, + "GGGG": 25611, + "Ġskeletons": 25612, + "Ġpupils": 25613, + "linked": 25614, + "Ġurgency": 25615, + "enabled": 25616, + "ĠFuck": 25617, + "Ġcouncill": 25618, + "rab": 25619, + "UAL": 25620, + "TI": 25621, + "Ġlifes": 25622, + "Ġconfessed": 25623, + "Bug": 25624, + "Ġharmon": 25625, + "ĠCONFIG": 25626, + "ĠNeutral": 25627, + "Double": 25628, + "Ġstaple": 25629, + "ĠSHA": 25630, + "British": 25631, + "ĠSNP": 25632, + "ATOR": 25633, + "oco": 25634, + "Ġswinging": 25635, + "gex": 25636, + "oleon": 25637, + "plain": 25638, + "ĠMissing": 25639, + "ĠTrophy": 25640, + "vari": 25641, + "ranch": 25642, + "Ġ301": 25643, + "440": 25644, + "0000000000000000": 25645, + "Ġrestoring": 25646, + "Ġhaul": 25647, + "ucing": 25648, + "nerg": 25649, + "Ġfutures": 25650, + "Ġstrategist": 25651, + "question": 25652, + "Ġlateral": 25653, + "ĠBard": 25654, + "Ġsor": 25655, + "ĠRhodes": 25656, + "ĠDowntown": 25657, + "?????-": 25658, + "ĠLit": 25659, + "ĠBened": 25660, + "Ġcoil": 25661, + "street": 25662, + "ĠPortal": 25663, + "FILE": 25664, + "ĠGru": 25665, + "*,": 25666, + "231": 25667, + "neum": 25668, + "Ġsucked": 25669, + "Ġrapper": 25670, + "Ġtendencies": 25671, + "ĠLauren": 25672, + "cellaneous": 25673, + "267": 25674, + "Ġbrowse": 25675, + "Ġoverc": 25676, + "header": 25677, + "oise": 25678, + "Ġbeet": 25679, + "ĠGle": 25680, + "Stay": 25681, + "Ġmum": 25682, + "Ġtyped": 25683, + "Ġdiscounts": 25684, + "Talk": 25685, + "ĠOg": 25686, + "existing": 25687, + "ĠSell": 25688, + "uph": 25689, + "CI": 25690, + "ĠAustrian": 25691, + "ĠWarm": 25692, + "Ġdismissal": 25693, + "Ġaverages": 25694, + "camera": 25695, + "Ġallegiance": 25696, + "LAN": 25697, + "=\"#": 25698, + "Ġcommentators": 25699, + "ĠSetting": 25700, + "ĠMidwest": 25701, + "Ġpharmac": 25702, + "ĠEXP": 25703, + "Ġstainless": 25704, + "Chicago": 25705, + "Ġtan": 25706, + "244": 25707, + "Ġcountryside": 25708, + "ĠVac": 25709, + "295": 25710, + "Ġpinned": 25711, + "Ġcrises": 25712, + "Ġstandardized": 25713, + "Task": 25714, + "ĠJail": 25715, + "ĠDocker": 25716, + "colored": 25717, + "forth": 25718, + "\"},": 25719, + "Ġpatrons": 25720, + "Ġspice": 25721, + "Ġmourn": 25722, + "ĠMood": 25723, + "Ġlaundry": 25724, + "Ġequip": 25725, + "ĠMole": 25726, + "yll": 25727, + "ĠTHC": 25728, + "nation": 25729, + "ĠSherlock": 25730, + "Ġissu": 25731, + "ĠKre": 25732, + "ĠAmericas": 25733, + "ĠAAA": 25734, + "Ġsystematically": 25735, + "Ġcontra": 25736, + "ĠSally": 25737, + "Ġrationale": 25738, + "Ġcarriage": 25739, + "Ġpeaks": 25740, + "Ġcontradiction": 25741, + "ensation": 25742, + "ĠFailure": 25743, + "Ġprops": 25744, + "Ġnamespace": 25745, + "Ġcove": 25746, + "fields": 25747, + "ãĤĭ": 25748, + "Ġwool": 25749, + "ĠCatch": 25750, + "Ġpresumed": 25751, + "ĠDiana": 25752, + "ragon": 25753, + "igi": 25754, + "Ġhamm": 25755, + "Ġstunt": 25756, + "ĠGUI": 25757, + "ĠObservatory": 25758, + "ĠShore": 25759, + "Ġsmells": 25760, + "annah": 25761, + "Ġcockpit": 25762, + "ĠDuterte": 25763, + "850": 25764, + "Ġoppressed": 25765, + "breaker": 25766, + "ĠContribut": 25767, + "ĠPeru": 25768, + "ĠMonsanto": 25769, + "ĠAttempt": 25770, + "Ġcommanding": 25771, + "Ġfridge": 25772, + "ĠRin": 25773, + "ĠChess": 25774, + "uality": 25775, + "Ġol": 25776, + "Republican": 25777, + "ĠGlory": 25778, + "ĠWIN": 25779, + ".......": 25780, + "agent": 25781, + "reading": 25782, + "Ġinh": 25783, + "Jones": 25784, + "Ġclicks": 25785, + "alan": 25786, + "Ġ[];": 25787, + "ĠMajesty": 25788, + "ĠCed": 25789, + "opus": 25790, + "atel": 25791, + "ê": 25792, + "ARC": 25793, + "ĠEcuador": 25794, + "ãĥł": 25795, + "ĠKuro": 25796, + "Ġrituals": 25797, + "Ġcaptive": 25798, + "Ġounce": 25799, + "Ġdisagreement": 25800, + "Ġslog": 25801, + "fuel": 25802, + "Pet": 25803, + "Mail": 25804, + "Ġexercised": 25805, + "Ġsolic": 25806, + "Ġrainfall": 25807, + "Ġdevotion": 25808, + "ĠAssessment": 25809, + "Ġrobotic": 25810, + "options": 25811, + "ĠRP": 25812, + "ĠFamilies": 25813, + "ĠFlames": 25814, + "Ġassignments": 25815, + "007": 25816, + "akedown": 25817, + "Ġvocabulary": 25818, + "Reilly": 25819, + "Ġcaval": 25820, + "gars": 25821, + "Ġsuppressed": 25822, + "ĠSET": 25823, + "ĠJohns": 25824, + "Ġwarp": 25825, + "broken": 25826, + "Ġstatues": 25827, + "Ġadvocated": 25828, + "Ġ275": 25829, + "Ġperil": 25830, + "omorph": 25831, + "ĠFemin": 25832, + "perfect": 25833, + "Ġhatch": 25834, + "Lib": 25835, + "512": 25836, + "Ġlifelong": 25837, + "313": 25838, + "Ġcheeks": 25839, + "Ġnumbered": 25840, + "ĠMug": 25841, + "Body": 25842, + "ravel": 25843, + "Weight": 25844, + "ĠJak": 25845, + "ĠHeath": 25846, + "Ġkissing": 25847, + "ĠJUST": 25848, + "Ġwaving": 25849, + "upload": 25850, + "Ġinsider": 25851, + "ĠProgressive": 25852, + "ĠFilter": 25853, + "tta": 25854, + "ĠBeam": 25855, + "Ġviolently": 25856, + "ipation": 25857, + "Ġskepticism": 25858, + "Ġ1918": 25859, + "ĠAnnie": 25860, + "ĠSI": 25861, + "Ġgenetics": 25862, + "Ġonboard": 25863, + "atl": 25864, + "ĠFriedman": 25865, + "ĠBri": 25866, + "ceptive": 25867, + "Ġpirate": 25868, + "ĠReporter": 25869, + "278": 25870, + "Ġmythology": 25871, + "Ġeclipse": 25872, + "Ġskins": 25873, + "Ġglyph": 25874, + "ingham": 25875, + "Files": 25876, + "Cour": 25877, + "women": 25878, + "Ġregimes": 25879, + "Ġphotographed": 25880, + "Kat": 25881, + "ĠMAX": 25882, + "Officials": 25883, + "Ġunexpectedly": 25884, + "Ġimpressions": 25885, + "Front": 25886, + ";;;;;;;;": 25887, + "Ġsupremacy": 25888, + "Ġsang": 25889, + "Ġaggravated": 25890, + "Ġabruptly": 25891, + "ĠSector": 25892, + "Ġexcuses": 25893, + "Ġcosting": 25894, + "idepress": 25895, + "Stack": 25896, + "ĠRNA": 25897, + "obil": 25898, + "Ġghosts": 25899, + "ldon": 25900, + "atibility": 25901, + "Topics": 25902, + "Ġreimburse": 25903, + "ĠHM": 25904, + "ĠDeg": 25905, + "Ġthief": 25906, + "yet": 25907, + "ogenesis": 25908, + "leaning": 25909, + "ĠKol": 25910, + "ĠBasketball": 25911, + "Ġfi": 25912, + "ĠSeeing": 25913, + "Ġrecycling": 25914, + "Ġ[-": 25915, + "Congress": 25916, + "Ġlectures": 25917, + "Psy": 25918, + "Ġnep": 25919, + "Ġmaid": 25920, + "Ġoriented": 25921, + "AX": 25922, + "Ġrespectful": 25923, + "rene": 25924, + "flush": 25925, + "ĠUnloaded": 25926, + "request": 25927, + "grid": 25928, + "ĠAlternatively": 25929, + "ĠHugo": 25930, + "Ġdecree": 25931, + "ĠBuddhism": 25932, + "andum": 25933, + "Android": 25934, + "ĠCongo": 25935, + "ĠJoyce": 25936, + "Ġacknowledging": 25937, + "hesive": 25938, + "ĠTomorrow": 25939, + "ĠHiro": 25940, + "thren": 25941, + "ĠMaced": 25942, + "Ġhoax": 25943, + "ĠIncreased": 25944, + "ĠPradesh": 25945, + "Wild": 25946, + "______": 25947, + "161": 25948, + "Ġaunt": 25949, + "Ġdistributing": 25950, + "ĠTucker": 25951, + "ĠSSL": 25952, + "ĠWolves": 25953, + "Building": 25954, + "oult": 25955, + "ĠLuo": 25956, + "ĠYas": 25957, + "ĠSpir": 25958, + "ĠShape": 25959, + "ĠCambod": 25960, + "ĠIPv": 25961, + "Ġml": 25962, + "Ġextrad": 25963, + "390": 25964, + "ĠPenny": 25965, + "dream": 25966, + "Ġstationed": 25967, + "optional": 25968, + "eworthy": 25969, + ".": 26700, + "ĠWorkshop": 26701, + "ĠRetail": 26702, + "ĠAvatar": 26703, + "625": 26704, + "Na": 26705, + "ĠVC": 26706, + "ĠSecure": 26707, + "MY": 26708, + "1988": 26709, + "ossip": 26710, + "Ġprostate": 26711, + "Ġunden": 26712, + "Ġgamer": 26713, + "ĠContents": 26714, + "ĠWarhammer": 26715, + "ĠSentinel": 26716, + "310": 26717, + "Ġsegregation": 26718, + "ĠFlex": 26719, + "ĠMAY": 26720, + "Ġdrills": 26721, + "ĠDrugs": 26722, + "Islamic": 26723, + "Ġspur": 26724, + "Ġcafe": 26725, + "Ġimaginary": 26726, + "Ġguiding": 26727, + "Ġswings": 26728, + "ĠTheme": 26729, + "oby": 26730, + "Ġnud": 26731, + "Ġbegging": 26732, + "Ġstrongh": 26733, + "Ġrejecting": 26734, + "Ġpedestrians": 26735, + "ĠProspect": 26736, + "Rare": 26737, + "sle": 26738, + "Ġconcessions": 26739, + "ĠConstitutional": 26740, + "Ġbeams": 26741, + "Ġfibers": 26742, + "poon": 26743, + "Ġinstincts": 26744, + "property": 26745, + "ĠBIG": 26746, + "Sanders": 26747, + "imates": 26748, + "Ġcoating": 26749, + "Ġcorpses": 26750, + "ĠTRUE": 26751, + "checked": 26752, + "Ġ166": 26753, + "Ash": 26754, + "ĠJS": 26755, + "ĠFiction": 26756, + "Ġcommunal": 26757, + "Ġenergetic": 26758, + "oooooooo": 26759, + "Ġnowadays": 26760, + "ILD": 26761, + "ibo": 26762, + "ĠSUV": 26763, + "Ren": 26764, + "Ġdwelling": 26765, + "Silver": 26766, + "Ġtally": 26767, + "ĠMoving": 26768, + "Ġcoward": 26769, + "Ġgenerals": 26770, + "Ġhorns": 26771, + "Ġcirculated": 26772, + "Ġrobbed": 26773, + "ĠUnlimited": 26774, + "Ġharassed": 26775, + "Ġinhibit": 26776, + "Ġcomposer": 26777, + "ĠSpotify": 26778, + "Ġspreads": 26779, + "364": 26780, + "Ġsuicidal": 26781, + "Ġnoises": 26782, + "ĠStur": 26783, + "Ġsaga": 26784, + "ĠKag": 26785, + "iso": 26786, + "Ġtheoretically": 26787, + "Money": 26788, + "Ġsimilarity": 26789, + "Ġsliced": 26790, + "utils": 26791, + "inges": 26792, + "\"-": 26793, + "Ġanth": 26794, + "Ġimped": 26795, + "Module": 26796, + "Throughout": 26797, + "Ġmenus": 26798, + "committee": 26799, + "andi": 26800, + "obj": 26801, + "inav": 26802, + "fired": 26803, + "ĠAbdullah": 26804, + "Ġundead": 26805, + "Ġfonts": 26806, + "Hold": 26807, + "ENG": 26808, + "Ġsustainability": 26809, + "Ġflick": 26810, + "Ġrazor": 26811, + "ĠFest": 26812, + "ĠCharacters": 26813, + "Ġwording": 26814, + "Ġpopulist": 26815, + "Ġcriticizing": 26816, + "Ġmuse": 26817, + "vine": 26818, + "Ġcardboard": 26819, + "Ġkindly": 26820, + "Ġfringe": 26821, + "ĠTheft": 26822, + "icultural": 26823, + "Ġgovernors": 26824, + "Ġ����": 26825, + "Ġ163": 26826, + "Ġtimeout": 26827, + "ĠAuth": 26828, + "Children": 26829, + "AU": 26830, + "Ġredemption": 26831, + "ĠAlger": 26832, + "Ġ1914": 26833, + "Ġwaved": 26834, + "Ġastronauts": 26835, + "ograms": 26836, + "Ġswamp": 26837, + "ĠFinnish": 26838, + "Ġcandle": 26839, + "Ġtonnes": 26840, + "utm": 26841, + "Ġray": 26842, + "Ġspun": 26843, + "Ġfearful": 26844, + "articles": 26845, + "Ġcaus": 26846, + "orically": 26847, + "ĠRequires": 26848, + "ĠGol": 26849, + "Ġpope": 26850, + "Ġinaugural": 26851, + "Ġgle": 26852, + "ADA": 26853, + "ĠISIL": 26854, + "ĠOffensive": 26855, + "Ġwatchdog": 26856, + "Ġbalcon": 26857, + "entity": 26858, + "ĠHoo": 26859, + "Ġgallon": 26860, + "ACC": 26861, + "Ġdoubling": 26862, + "Ġimplication": 26863, + "ĠSight": 26864, + "Ġdoctr": 26865, + "-------": 26866, + "Ġ\\\\": 26867, + "Ġmalt": 26868, + "Roll": 26869, + "Ġâī¥": 26870, + "Ġrecap": 26871, + "adding": 26872, + "uces": 26873, + "ĠBend": 26874, + "figure": 26875, + "Ġturkey": 26876, + "Ġsocietal": 26877, + "ĠTickets": 26878, + "Ġcommercially": 26879, + "Ġspicy": 26880, + "Ġ216": 26881, + "ĠRamp": 26882, + "Ġsuperiority": 26883, + "ï": 26884, + "ĠTracker": 26885, + "Carl": 26886, + "ĠCoy": 26887, + "ĠPatriot": 26888, + "Ġconsulted": 26889, + "Ġlistings": 26890, + "Ġslew": 26891, + "reenshot": 26892, + "ĠGone": 26893, + "Ġ[...]": 26894, + "309": 26895, + "Ġhottest": 26896, + "ر": 26897, + "Ġrocky": 26898, + "ĠDiaz": 26899, + "Ġmassage": 26900, + "Ġparaly": 26901, + "Ġpony": 26902, + "Az": 26903, + "Ġcartridge": 26904, + "ĠNZ": 26905, + "Ġsnack": 26906, + "ĠLamar": 26907, + "plement": 26908, + "ĠLeslie": 26909, + "Ġmater": 26910, + "Ġsnipp": 26911, + "246": 26912, + "Ġjointly": 26913, + "ĠBrisbane": 26914, + "ĠiPod": 26915, + "Ġpumping": 26916, + "Ġgoat": 26917, + "ĠSharon": 26918, + "ealing": 26919, + "Ġcoron": 26920, + "Ġanomal": 26921, + "rahim": 26922, + "ĠConnection": 26923, + "Ġsculpture": 26924, + "Ġscheduling": 26925, + "ĠDaddy": 26926, + "athing": 26927, + "Ġeyebrows": 26928, + "Ġcurved": 26929, + "Ġsentiments": 26930, + "Ġdrafting": 26931, + "Drop": 26932, + "([": 26933, + "Ġnominal": 26934, + "ĠLeadership": 26935, + "ĠGrow": 26936, + "Ġ176": 26937, + "Ġconstructive": 26938, + "ivation": 26939, + "Ġcorrupted": 26940, + "gerald": 26941, + "ĠCros": 26942, + "ĠChester": 26943, + "ĠLap": 26944, + "ãģª": 26945, + "OTH": 26946, + "DATA": 26947, + "Ġalmond": 26948, + "probably": 26949, + "Imp": 26950, + "Ġfeast": 26951, + "ĠWarcraft": 26952, + "Flor": 26953, + "Ġcheckpoint": 26954, + "Ġtranscription": 26955, + "Ġ204": 26956, + "Ġtweaks": 26957, + "Ġrelieve": 26958, + "Science": 26959, + "Ġperformer": 26960, + "Zone": 26961, + "Ġturmoil": 26962, + "igated": 26963, + "hibit": 26964, + "ĠCafe": 26965, + "themed": 26966, + "Ġfluor": 26967, + "bench": 26968, + "Ġdecom": 26969, + "ĠUnt": 26970, + "ĠBarrett": 26971, + "ĠFacts": 26972, + "Ġtasting": 26973, + "ĠPTSD": 26974, + "ĠSeal": 26975, + "ĠJudaism": 26976, + "ĠDynamic": 26977, + "ĠCors": 26978, + "Ve": 26979, + "ĠMing": 26980, + "ĠTransform": 26981, + "von": 26982, + "ĠDefenders": 26983, + "ĠTactical": 26984, + "ĠVon": 26985, + "ĠUnivers": 26986, + "Ġdistorted": 26987, + "ĠBreath": 26988, + "?'\"": 26989, + "Ġagon": 26990, + "ĠDeadly": 26991, + "Ġlan": 26992, + "ĠCycle": 26993, + "orned": 26994, + "Ġreliably": 26995, + "Ġglor": 26996, + "ĠMonkey": 26997, + "ãĥ¡": 26998, + "Ġadren": 26999, + "Ġmicrowave": 27000, + "ĠAlban": 27001, + "ircraft": 27002, + "digit": 27003, + "smart": 27004, + "ĠDread": 27005, + "¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯": 27006, + "{{": 27007, + "ĠRochester": 27008, + "Ġsimplified": 27009, + "Ġinflicted": 27010, + "Ġtakeover": 27011, + "Ġyourselves": 27012, + "aditional": 27013, + "Ġmuscular": 27014, + "KS": 27015, + "Ġingen": 27016, + "Tax": 27017, + "ĠFeature": 27018, + "277": 27019, + "Ġcruc": 27020, + "Ġcrate": 27021, + "Ġunidentified": 27022, + "Ġacclaimed": 27023, + "ĠManga": 27024, + "ĠFrances": 27025, + "ĠNepal": 27026, + "ĠGerald": 27027, + "ĠKuwait": 27028, + "Ġslain": 27029, + "ĠHeb": 27030, + "ĠGoku": 27031, + "ãģ®æ": 27032, + "286": 27033, + "Mrs": 27034, + "ĠCody": 27035, + "ĠSanctuary": 27036, + "016": 27037, + "Ġdismant": 27038, + "Ġdataset": 27039, + "ĠHond": 27040, + "buck": 27041, + "ĠPatterson": 27042, + "Ġpalette": 27043, + "ĠGD": 27044, + "icol": 27045, + "ĠLodge": 27046, + "Ġplanetary": 27047, + "akin": 27048, + "ĠRegistered": 27049, + "abwe": 27050, + "ĠPetersburg": 27051, + "Ġhailed": 27052, + "ĠPiece": 27053, + "Sche": 27054, + "ĠDOJ": 27055, + "Ġenumer": 27056, + "181": 27057, + "ĠObserver": 27058, + "ĠBold": 27059, + "founded": 27060, + "commerce": 27061, + "Ġexploits": 27062, + "ĠFinding": 27063, + "URN": 27064, + "ĠSne": 27065, + "ĠAcid": 27066, + "ayette": 27067, + "ĠValues": 27068, + "Ġdrastic": 27069, + "Ġarchitectural": 27070, + "Ġ\".": 27071, + "×ķ": 27072, + "umped": 27073, + "Ġwrapping": 27074, + "Ġwidow": 27075, + "ĠSlayer": 27076, + "lace": 27077, + "once": 27078, + "Germany": 27079, + "avoid": 27080, + "Ġtemples": 27081, + "PAR": 27082, + "ô": 27083, + "ĠLucifer": 27084, + "ĠFlickr": 27085, + "lov": 27086, + "forces": 27087, + "Ġscouting": 27088, + "Ġlouder": 27089, + "tesy": 27090, + "Ġbeforehand": 27091, + "Äĵ": 27092, + "ĠNeon": 27093, + "ĠWol": 27094, + "ĠTypically": 27095, + "ĠPolitico": 27096, + "-+-+": 27097, + "Ġbuilder": 27098, + "Ġderive": 27099, + "Kill": 27100, + "Ġpoker": 27101, + "Ġambiguous": 27102, + "Ġlifts": 27103, + "Ġcyt": 27104, + "Ġribs": 27105, + "oodle": 27106, + "ĠSounds": 27107, + "hair": 27108, + "ĠSyndrome": 27109, + "tf": 27110, + "Ġproportional": 27111, + "uid": 27112, + "Ġpertaining": 27113, + "ĠKindle": 27114, + "ĠNegro": 27115, + "Ġreiterated": 27116, + "ĠTonight": 27117, + "oths": 27118, + "ĠCornell": 27119, + "Ġowing": 27120, + "Ġ208": 27121, + "elfare": 27122, + "ocating": 27123, + "ĠBirds": 27124, + "Subscribe": 27125, + "Ġessays": 27126, + "Ġburdens": 27127, + "Ġillustrations": 27128, + "arious": 27129, + "ERAL": 27130, + "ĠCalcul": 27131, + "Ġxen": 27132, + "ĠLinkedIn": 27133, + "ĠJung": 27134, + "Ġredesign": 27135, + "Connor": 27136, + "296": 27137, + "Ġreversal": 27138, + "ĠAdelaide": 27139, + "ĠLL": 27140, + "Ġsinking": 27141, + "Ġgum": 27142, + "USH": 27143, + "capt": 27144, + "ĠGrimm": 27145, + "Ġfootsteps": 27146, + "ĠCBD": 27147, + "ispers": 27148, + "Ġprose": 27149, + "Wednesday": 27150, + "ĠMovies": 27151, + "edin": 27152, + "Ġoverturned": 27153, + "Ġcontentious": 27154, + "USB": 27155, + "~~~~~~~~~~~~~~~~": 27156, + "ĠCopper": 27157, + "Ġpointless": 27158, + "NV": 27159, + "values": 27160, + "olphin": 27161, + "dain": 27162, + "Ġdeposited": 27163, + "ĠGW": 27164, + "Ġpreceded": 27165, + "ĠCla": 27166, + "ĠGolem": 27167, + "ĠNim": 27168, + "Ġβ": 27169, + "ĠEngineers": 27170, + "middle": 27171, + "Ġflatt": 27172, + "operative": 27173, + "Ġcouncils": 27174, + "imbabwe": 27175, + "elin": 27176, + "Ġstressful": 27177, + "ĠLD": 27178, + "Ġresh": 27179, + "lake": 27180, + "Ġwheelchair": 27181, + "ĠAlternative": 27182, + "Ġoptimize": 27183, + "operation": 27184, + "Ġpeek": 27185, + "Ġoneself": 27186, + "igil": 27187, + "Ġtransitions": 27188, + "opathy": 27189, + "blank": 27190, + "Ġ169": 27191, + "171": 27192, + "________________________________________________________________": 27193, + "Ġlaundering": 27194, + "Enc": 27195, + "ĠDEC": 27196, + "Ġworkouts": 27197, + "Ġspikes": 27198, + "Ġdinosaurs": 27199, + "Ġdiscriminatory": 27200, + "Pool": 27201, + "Rather": 27202, + "385": 27203, + "RNA": 27204, + "testers": 27205, + "eto": 27206, + "ĠIdentity": 27207, + "Ġvein": 27208, + "ĠBurton": 27209, + "Ġarcade": 27210, + "420": 27211, + "Ultimately": 27212, + "ĠSadly": 27213, + "ð": 27214, + "pill": 27215, + "Ġcubic": 27216, + "ĠSpectrum": 27217, + "these": 27218, + "states": 27219, + "Ġunofficial": 27220, + "hawks": 27221, + "ĠEVERY": 27222, + "Ġrainbow": 27223, + "Ġincarceration": 27224, + "anding": 27225, + "Ġsyll": 27226, + "ĠEverton": 27227, + "Ġ179": 27228, + "ĠSerbia": 27229, + "Ġ189": 27230, + "meter": 27231, + "ĠMickey": 27232, + "Ġantiqu": 27233, + "Ġfactual": 27234, + "neck": 27235, + "ĠNare": 27236, + "norm": 27237, + "must": 27238, + "Ġhighways": 27239, + "Ġglam": 27240, + "Ġdividing": 27241, + "ĠSquadron": 27242, + "ĠMartha": 27243, + "Ġbirths": 27244, + "Cover": 27245, + "////////////////": 27246, + "ĠWong": 27247, + "Phot": 27248, + "ĠALS": 27249, + "rio": 27250, + "ĠNonetheless": 27251, + "ĠLemon": 27252, + "Ġ206": 27253, + "ĠEE": 27254, + "Ġderivative": 27255, + "ĠWWII": 27256, + "vote": 27257, + "Ġtherein": 27258, + "Ġseparating": 27259, + "446": 27260, + "sync": 27261, + "ĠStreets": 27262, + "Ġratt": 27263, + "Ġmunicipality": 27264, + "ĠShortly": 27265, + "Ġmonk": 27266, + "),\"": 27267, + "Ġscrub": 27268, + "Ġoperatives": 27269, + "Neither": 27270, + "Place": 27271, + "ĠLimit": 27272, + "Female": 27273, + "ĠActor": 27274, + "Character": 27275, + "Ġconstituted": 27276, + "357": 27277, + "Ġprotested": 27278, + "ĠStraw": 27279, + "ĠHeight": 27280, + "ilda": 27281, + "ĠTyph": 27282, + "Ġfloods": 27283, + "Ġcosmetic": 27284, + "WAY": 27285, + "perture": 27286, + "upon": 27287, + "tons": 27288, + "essing": 27289, + "ĠPocket": 27290, + "Ġrooft": 27291, + "ĠCaucas": 27292, + "Ġantidepress": 27293, + "Ġincompatible": 27294, + "ECD": 27295, + "Ġopera": 27296, + "ĠContest": 27297, + "Ġgenerators": 27298, + "lime": 27299, + "Defense": 27300, + "1987": 27301, + "forum": 27302, + "Ġsavage": 27303, + "ĠHungarian": 27304, + "nz": 27305, + "Ġmetallic": 27306, + "Ġexpelled": 27307, + "Ġresidency": 27308, + "Ġdresses": 27309, + "666": 27310, + "ĠClement": 27311, + "fires": 27312, + "Category": 27313, + "Ġgeek": 27314, + "alis": 27315, + "Ġcemetery": 27316, + "educated": 27317, + "Ġcrawl": 27318, + "ĠUnable": 27319, + "ĠTyson": 27320, + "akis": 27321, + "Ġpardon": 27322, + "ĠWra": 27323, + "Ġstrengthened": 27324, + "ĠFors": 27325, + "335": 27326, + "ĠHC": 27327, + "ĠMond": 27328, + "Ġvisuals": 27329, + "ĠBeatles": 27330, + "ettlement": 27331, + "Ġï": 27332, + "gro": 27333, + "Ġbash": 27334, + "Ġpoorest": 27335, + "Ġexcel": 27336, + "Ġaspirations": 27337, + "ĠMunicip": 27338, + "ensible": 27339, + "Ġceremonies": 27340, + "Ġintimidation": 27341, + "ĠCONTR": 27342, + "beck": 27343, + "ĠKap": 27344, + "asu": 27345, + "Ġtrademarks": 27346, + "ĠSew": 27347, + "ĠCompetition": 27348, + "network": 27349, + "ĠArri": 27350, + "ĠTet": 27351, + "Roaming": 27352, + "WC": 27353, + "Dat": 27354, + "Ġsob": 27355, + "Ġpairing": 27356, + "Ġoverdose": 27357, + "SAY": 27358, + "aber": 27359, + "Ġrevolt": 27360, + "ĠFah": 27361, + "acting": 27362, + "eq": 27363, + "estation": 27364, + "Fight": 27365, + "ĠMarks": 27366, + "273": 27367, + "Ġ178": 27368, + "Raw": 27369, + "ãģĭ": 27370, + "349": 27371, + "blocks": 27372, + "Ġverge": 27373, + "estine": 27374, + "ĠPodesta": 27375, + "Ġinvasive": 27376, + "Ġprofoundly": 27377, + "ĠAo": 27378, + "each": 27379, + "Ġlest": 27380, + "interpret": 27381, + "Ġshrinking": 27382, + "Ġerrone": 27383, + "Ġchees": 27384, + "lys": 27385, + "ĠIvy": 27386, + "ĠDirectory": 27387, + "Ġhinted": 27388, + "VICE": 27389, + "Ġcontacting": 27390, + "ĠGent": 27391, + "hei": 27392, + "Ġlabeling": 27393, + "Ġmercury": 27394, + "ĠLite": 27395, + "Ġexpires": 27396, + "Ġdestabil": 27397, + "ritis": 27398, + "cu": 27399, + "Ġfeathers": 27400, + "Ġsteer": 27401, + "Ġprogrammed": 27402, + "ĠVader": 27403, + "Going": 27404, + "ĠElim": 27405, + "Ġyo": 27406, + "ĠMiche": 27407, + "Ġ203": 27408, + "Ġsleeves": 27409, + "Ġbully": 27410, + "ĠHumans": 27411, + "368": 27412, + "Ġcompress": 27413, + "ĠBanner": 27414, + "ARS": 27415, + "Ġawhile": 27416, + "Ġcalib": 27417, + "Ġsponsorship": 27418, + "ĠDifficulty": 27419, + "ĠPapers": 27420, + "Ġidentifier": 27421, + "}.": 27422, + "Ġyog": 27423, + "ĠShia": 27424, + "Ġcleanup": 27425, + "Ġvibe": 27426, + "introdu": 27427, + "imming": 27428, + "Australia": 27429, + "Ġoutlines": 27430, + "ĠYoutube": 27431, + "train": 27432, + "ĠMakes": 27433, + "Ġdeported": 27434, + "Ġcentr": 27435, + "ĠDug": 27436, + "ĠBoulder": 27437, + "ĠBuffy": 27438, + "Ġinjunction": 27439, + "ĠHarley": 27440, + "ĠGroups": 27441, + "ĠDumbledore": 27442, + "ĠClara": 27443, + "Ġ\"-": 27444, + "Ġsacrificed": 27445, + "eph": 27446, + "Shadow": 27447, + "ibling": 27448, + "Ġfreelance": 27449, + "Ġevidently": 27450, + "phal": 27451, + "Ġretains": 27452, + "Mir": 27453, + "Ġfinite": 27454, + "dar": 27455, + "ĠCous": 27456, + "Ġrepaired": 27457, + "Ġperiodic": 27458, + "Ġchampionships": 27459, + "Ġasteroid": 27460, + "blind": 27461, + "Ġexpressly": 27462, + "ĠAstros": 27463, + "Ġscaled": 27464, + "Ġgeographical": 27465, + "ĠRapids": 27466, + "Enjoy": 27467, + "Ġelastic": 27468, + "ĠMohamed": 27469, + "Market": 27470, + "begin": 27471, + "Ġdiscovers": 27472, + "Ġtelecommunications": 27473, + "Ġscanner": 27474, + "Ġenlarge": 27475, + "Ġsharks": 27476, + "Ġpsychedel": 27477, + "ĠRouge": 27478, + "Ġsnapshot": 27479, + "isine": 27480, + "XP": 27481, + "Ġpesticides": 27482, + "ĠLSD": 27483, + "ĠDistribution": 27484, + "really": 27485, + "Ġdegradation": 27486, + "Ġdisguise": 27487, + "Ġbiom": 27488, + "ĠEXT": 27489, + "Ġequations": 27490, + "Ġhazards": 27491, + "ĠCompared": 27492, + ")*": 27493, + "Ġvirtues": 27494, + "Ġelders": 27495, + "Ġenhancing": 27496, + "ĠAcross": 27497, + "eros": 27498, + "angling": 27499, + "Ġcombust": 27500, + "ucci": 27501, + "Ġconcussion": 27502, + "Ġcontraception": 27503, + "ĠKang": 27504, + "Ġexpresses": 27505, + "Ġaux": 27506, + "ĠPione": 27507, + "Ġexhibits": 27508, + "Debug": 27509, + "OTAL": 27510, + "ĠAlready": 27511, + "ĠWheeler": 27512, + "Ġexpands": 27513, + "?:": 27514, + "Ġreconciliation": 27515, + "Ġpirates": 27516, + "Ġpurse": 27517, + "Ġdiscourage": 27518, + "Ġspectacle": 27519, + "Rank": 27520, + "Ġwraps": 27521, + "ĠThought": 27522, + "Ġimpending": 27523, + "Opp": 27524, + "ĠAnglo": 27525, + "ĠEUR": 27526, + "Ġscrewed": 27527, + "retched": 27528, + "Ġencouragement": 27529, + "models": 27530, + "Ġconfuse": 27531, + "mmm": 27532, + "ĠVitamin": 27533, + "âĸijâĸij": 27534, + "Cru": 27535, + "Ġknights": 27536, + "Ġdiscard": 27537, + "Ġbishops": 27538, + "ĠWear": 27539, + "ĠGarrett": 27540, + "kan": 27541, + "ãĥŁ": 27542, + "Ġmasculine": 27543, + "capital": 27544, + "ĠAus": 27545, + "Ġfatally": 27546, + "thanks": 27547, + "ĠAU": 27548, + "ĠGut": 27549, + "1200": 27550, + "Ġ00000000": 27551, + "Ġsurrog": 27552, + "ĠBIOS": 27553, + "raits": 27554, + "ĠWatts": 27555, + "Ġresurrection": 27556, + "ĠElectoral": 27557, + "ĠTips": 27558, + "4000": 27559, + "Ġnutrient": 27560, + "Ġdepicting": 27561, + "Ġsprink": 27562, + "Ġmuff": 27563, + "ĠLIM": 27564, + "ĠSample": 27565, + "psc": 27566, + "ibi": 27567, + "generated": 27568, + "Ġspecimens": 27569, + "Ġdissatisf": 27570, + "Ġtailored": 27571, + "Ġholdings": 27572, + "ĠMonthly": 27573, + "ĠEat": 27574, + "poons": 27575, + "Ġnec": 27576, + "ĠCage": 27577, + "ĠLotus": 27578, + "ĠLantern": 27579, + "Ġfrontier": 27580, + "Ġpensions": 27581, + "Ġjoked": 27582, + "ĠHardy": 27583, + "=-=-=-=-": 27584, + "rade": 27585, + "UID": 27586, + "Ġrails": 27587, + "Ġemit": 27588, + "Ġslate": 27589, + "Ġsmug": 27590, + "Ġspit": 27591, + "ĠCalls": 27592, + "ĠJacobs": 27593, + "feat": 27594, + "ĠUE": 27595, + "Ġrestruct": 27596, + "Ġregeneration": 27597, + "Ġenergies": 27598, + "ĠConnor": 27599, + "OHN": 27600, + "ĠCheese": 27601, + "Ġger": 27602, + "Ġresurrect": 27603, + "management": 27604, + "NW": 27605, + "Ġpresently": 27606, + "ĠBruins": 27607, + "Member": 27608, + "ĠMang": 27609, + "idan": 27610, + "Ġboosting": 27611, + "wyn": 27612, + "+.": 27613, + "requisite": 27614, + "ĠNYPD": 27615, + "ĠMegan": 27616, + "ĠConditions": 27617, + "Ġpics": 27618, + "nesium": 27619, + "ĠRash": 27620, + "Ġ174": 27621, + "ĠDucks": 27622, + "Ġembro": 27623, + "zu": 27624, + "onian": 27625, + "religious": 27626, + "Ġcraz": 27627, + "ĠACA": 27628, + "ĠZucker": 27629, + "EMA": 27630, + "ĠPros": 27631, + "Weapon": 27632, + "ĠKnox": 27633, + "ĠArduino": 27634, + "Ġstove": 27635, + "Ġheavens": 27636, + "ĠPurchase": 27637, + "Ġherd": 27638, + "Ġfundraiser": 27639, + "Digital": 27640, + "5000": 27641, + "Ġproponents": 27642, + "/âĢĭ": 27643, + "Ġjelly": 27644, + "ĠVisa": 27645, + "Ġmonks": 27646, + "Ġadvancement": 27647, + "ĠWer": 27648, + "Ġ187": 27649, + "eus": 27650, + "ertility": 27651, + "Ġfetal": 27652, + "Ġ1936": 27653, + "Lo": 27654, + "Ġoutfits": 27655, + "Ġstaircase": 27656, + "bomb": 27657, + "Ġcustomized": 27658, + "clair": 27659, + "Tree": 27660, + "Ġmapped": 27661, + "ĠConsidering": 27662, + "ĠTorres": 27663, + "Ġmethyl": 27664, + "Ġapproximate": 27665, + "Ġdoom": 27666, + "ĠHansen": 27667, + "Ġcrossover": 27668, + "Ġstandalone": 27669, + "ä¼": 27670, + "Ġinvites": 27671, + "Ġgraveyard": 27672, + "Ġhp": 27673, + "DonaldTrump": 27674, + "Ġescort": 27675, + "Gar": 27676, + "Ġpredecessors": 27677, + "Ġhay": 27678, + "Ġenzyme": 27679, + "ĠStraight": 27680, + "visors": 27681, + "Ing": 27682, + "aneously": 27683, + "ĠApplied": 27684, + "Ġfec": 27685, + "ĠDurant": 27686, + "Ġoutspoken": 27687, + "orb": 27688, + "Ġzeal": 27689, + "Ġdisgrace": 27690, + "').": 27691, + "ĠCheng": 27692, + "289": 27693, + "ĠRena": 27694, + "ĠSuicide": 27695, + "294": 27696, + "Ġoutraged": 27697, + "ĠNewman": 27698, + "ĠNvidia": 27699, + "ĠAber": 27700, + "ĠBers": 27701, + "Ġrecreation": 27702, + "Window": 27703, + "ĠDP": 27704, + "xe": 27705, + "Ġpedoph": 27706, + "Ġfallout": 27707, + "amboo": 27708, + "Ġpresentations": 27709, + "ĠApps": 27710, + "Ġhtml": 27711, + "345": 27712, + "ĠXXX": 27713, + "Ġrubbing": 27714, + "ĠLeather": 27715, + "Ġhumidity": 27716, + "seys": 27717, + "established": 27718, + "ĠUnits": 27719, + "646": 27720, + "Ġrespectable": 27721, + "Auto": 27722, + "Ġthriving": 27723, + "ĠInnovation": 27724, + "angs": 27725, + "Extra": 27726, + "regulation": 27727, + "298": 27728, + "pick": 27729, + "Examples": 27730, + "ĠCJ": 27731, + "Attack": 27732, + "Ġdracon": 27733, + "LT": 27734, + "Ġsticker": 27735, + "rers": 27736, + "Ġsunny": 27737, + "Iss": 27738, + "regulated": 27739, + "dim": 27740, + "ĠAbstract": 27741, + "Ġhusbands": 27742, + "Office": 27743, + "omination": 27744, + "itars": 27745, + "ANGE": 27746, + "ascal": 27747, + "ĠKris": 27748, + "ĠInfantry": 27749, + "Ġmalf": 27750, + "ĠAthe": 27751, + "ĠRally": 27752, + "balanced": 27753, + "........................": 27754, + "OUP": 27755, + "Ġmolecule": 27756, + "metics": 27757, + "ĠSplit": 27758, + "ĠInstructions": 27759, + "ĠNights": 27760, + "cards": 27761, + "Ġtug": 27762, + "Ġcone": 27763, + "åŃ": 27764, + "Ġtx": 27765, + "ĠDiscussion": 27766, + "Ġcatastrophe": 27767, + "ppe": 27768, + "gio": 27769, + "Ġcommunism": 27770, + "Ġhalted": 27771, + "ĠGuant": 27772, + "clean": 27773, + "ĠSched": 27774, + "ĠKanye": 27775, + "Ġwander": 27776, + "ĠSeriously": 27777, + "Ġ188": 27778, + "ennial": 27779, + "follow": 27780, + "productive": 27781, + "ĠFlow": 27782, + "ĠSail": 27783, + "Ġcraw": 27784, + "Ġsimulations": 27785, + "oru": 27786, + "angles": 27787, + "ĠNolan": 27788, + "Ġmenstru": 27789, + "470": 27790, + "Ġ207": 27791, + "aja": 27792, + "Ġcasually": 27793, + "boarding": 27794, + "Ġ222": 27795, + "ovy": 27796, + "ĠNumbers": 27797, + "umat": 27798, + "OE": 27799, + "287": 27800, + "ĠClemson": 27801, + "Ġcerts": 27802, + "Ġslid": 27803, + "ĠTribe": 27804, + "Ġtoast": 27805, + "Ġfortunes": 27806, + "Ġfals": 27807, + "ĠCommittees": 27808, + "Ġgp": 27809, + "Ġfiery": 27810, + "ĠNets": 27811, + "ĠAnime": 27812, + "Package": 27813, + "ĠCompare": 27814, + "laughter": 27815, + "infect": 27816, + "Ġatrocities": 27817, + "Ġjustices": 27818, + "Ġinsults": 27819, + "ĠVernon": 27820, + "Ġshaken": 27821, + "Ġpersona": 27822, + "estamp": 27823, + "367": 27824, + "brain": 27825, + "Ġexperimenting": 27826, + "Ken": 27827, + "ĠElectronics": 27828, + "Ġ161": 27829, + "domain": 27830, + "Ġgraphical": 27831, + "bishop": 27832, + "Ġwhopping": 27833, + "ĠEvangel": 27834, + "Ġadvertisers": 27835, + "ĠSpear": 27836, + "Ġbids": 27837, + "Ġdestroys": 27838, + "utz": 27839, + "Ġundersc": 27840, + "ĠADD": 27841, + "Ġants": 27842, + "ĠCum": 27843, + "ipples": 27844, + "ĠFill": 27845, + "Ġglanced": 27846, + "Ġindicted": 27847, + "ĠEff": 27848, + "Ġmiscon": 27849, + "ĠDesktop": 27850, + "Ġabide": 27851, + "ãĥĢ": 27852, + "ĠIo": 27853, + "ĠCoul": 27854, + "Ġcapsule": 27855, + "ĠChrys": 27856, + "MON": 27857, + "Ġundes": 27858, + "ĠIRA": 27859, + "Ġcitation": 27860, + "Ġdictate": 27861, + "ĠNetworks": 27862, + "ĠConflict": 27863, + "ĠStuff": 27864, + "xa": 27865, + "isec": 27866, + "ĠChemistry": 27867, + "Ġquarterly": 27868, + "Williams": 27869, + "anan": 27870, + "Opt": 27871, + "ĠAlexandria": 27872, + "outheastern": 27873, + "ĠSpringfield": 27874, + "ĠBlacks": 27875, + "Ġgeography": 27876, + "242": 27877, + "Ġutmost": 27878, + "ĠExxon": 27879, + "abouts": 27880, + "EVA": 27881, + "ĠEnable": 27882, + "ĠBarr": 27883, + "Ġdisagreed": 27884, + "ĠCyprus": 27885, + "Ġdementia": 27886, + "Ġlabs": 27887, + "Ġubiquitous": 27888, + "ĠLOVE": 27889, + "Ġconsolidated": 27890, + "sr": 27891, + "Ġcreamy": 27892, + "ĠTimber": 27893, + "Regardless": 27894, + "ĠCertificate": 27895, + "Ġ\"...": 27896, + "ogenous": 27897, + "Captain": 27898, + "Ġinsulting": 27899, + "ĠSoros": 27900, + "ĠInstr": 27901, + "ĠBulgaria": 27902, + "better": 27903, + "Ġsucking": 27904, + "ĠDavidson": 27905, + "atz": 27906, + "Ġcollateral": 27907, + "gif": 27908, + "Ġplagued": 27909, + "ĠCancel": 27910, + "ĠGardner": 27911, + "RB": 27912, + "Ġsixteen": 27913, + "Remove": 27914, + "uristic": 27915, + "cook": 27916, + "Rod": 27917, + "Ġcomprising": 27918, + "fle": 27919, + ")âĢĶ": 27920, + "ĠViking": 27921, + "growth": 27922, + "agonal": 27923, + "Ġsrf": 27924, + "afety": 27925, + "mot": 27926, + "Nearly": 27927, + "stown": 27928, + "ĠFactor": 27929, + "Ġautomobile": 27930, + "Ġprocedural": 27931, + "mask": 27932, + "ampires": 27933, + "Ġdisappears": 27934, + "jab": 27935, + "315": 27936, + "Ġ1951": 27937, + "needed": 27938, + "Ġdaring": 27939, + "leader": 27940, + "Ġpodium": 27941, + "Ġunhealthy": 27942, + "Ġmund": 27943, + "Ġpyramid": 27944, + "ocre": 27945, + "Ġkissed": 27946, + "Ġdreamed": 27947, + "ĠFantastic": 27948, + "ĠGly": 27949, + "åĬ": 27950, + "Ġgreatness": 27951, + "Ġspices": 27952, + "Ġmetropolitan": 27953, + "Ġcompuls": 27954, + "iets": 27955, + "1016": 27956, + "ĠSham": 27957, + "ĠPyr": 27958, + "flies": 27959, + "ĠMidnight": 27960, + "Ġswallowed": 27961, + "Ġgenres": 27962, + "ĠLucky": 27963, + "ĠRewards": 27964, + "Ġdispatch": 27965, + "ĠIPA": 27966, + "ĠApply": 27967, + "Ġaven": 27968, + "alities": 27969, + "312": 27970, + "things": 27971, + "Ġ().": 27972, + "Ġmates": 27973, + "ĠSz": 27974, + "ĠCOP": 27975, + "olate": 27976, + "OFF": 27977, + "Ġrecharge": 27978, + "caps": 27979, + "ĠYorker": 27980, + "icone": 27981, + "Ġgalaxies": 27982, + "ileaks": 27983, + "Dave": 27984, + "ĠPuzz": 27985, + "ĠCeltic": 27986, + "ĠAFC": 27987, + "276": 27988, + "ĠSons": 27989, + "Ġaffirmative": 27990, + "Hor": 27991, + "Ġtutorials": 27992, + "ĠCITY": 27993, + "ĠRosa": 27994, + "ĠExtension": 27995, + "Series": 27996, + "Ġfats": 27997, + "Ġrab": 27998, + "lis": 27999, + "Ġunic": 28000, + "Ġeve": 28001, + "ĠSpin": 28002, + "Ġadulthood": 28003, + "typ": 28004, + "Ġsectarian": 28005, + "Ġcheckout": 28006, + "ĠCycl": 28007, + "Single": 28008, + "Ġmartyr": 28009, + "Ġchilling": 28010, + "888": 28011, + "oufl": 28012, + "Ġ];": 28013, + "Ġcongestion": 28014, + "mk": 28015, + "ĠWhereas": 28016, + "Ġ1938": 28017, + "urrencies": 28018, + "erion": 28019, + "Ġboast": 28020, + "ĠPatients": 28021, + "Ġchap": 28022, + "ĠBD": 28023, + "realDonaldTrump": 28024, + "Ġexamines": 28025, + "hov": 28026, + "Ġstartling": 28027, + "ĠBabylon": 28028, + "wid": 28029, + "omew": 28030, + "brance": 28031, + "ĠOdyssey": 28032, + "wig": 28033, + "Ġtorch": 28034, + "ĠVox": 28035, + "ĠMoz": 28036, + "ĠTroll": 28037, + "ĠAns": 28038, + "Similarly": 28039, + "ĠFul": 28040, + "006": 28041, + "Unless": 28042, + "ĠAlone": 28043, + "stead": 28044, + "ĠPublisher": 28045, + "rights": 28046, + "tu": 28047, + "ĠDoesn": 28048, + "Ġprofessionally": 28049, + "Ġclo": 28050, + "icz": 28051, + "Ġsteals": 28052, + "Ġá": 28053, + "1986": 28054, + "Ġsturdy": 28055, + "ĠJohann": 28056, + "Ġmedals": 28057, + "Ġfilings": 28058, + "ĠFraser": 28059, + "done": 28060, + "Ġmultinational": 28061, + "Ġfeder": 28062, + "Ġworthless": 28063, + "Ġpest": 28064, + "Yesterday": 28065, + "ankind": 28066, + "Ġgays": 28067, + "Ġborne": 28068, + "ĠPOS": 28069, + "Picture": 28070, + "Ġpercentages": 28071, + "251": 28072, + "rame": 28073, + "Ġpotions": 28074, + "AMD": 28075, + "ĠLebanese": 28076, + "Ġrang": 28077, + "ĠLSU": 28078, + "ongs": 28079, + "Ġpeninsula": 28080, + "ĠClause": 28081, + "ALK": 28082, + "oha": 28083, + "ĠMacBook": 28084, + "Ġunanimous": 28085, + "Ġlenders": 28086, + "Ġhangs": 28087, + "Ġfranchises": 28088, + "orers": 28089, + "ĠUpdates": 28090, + "Ġisolate": 28091, + "andro": 28092, + "Soon": 28093, + "Ġdisruptive": 28094, + "ĠSurve": 28095, + "Ġstitches": 28096, + "ĠScorp": 28097, + "ĠDominion": 28098, + "Ġsupplying": 28099, + "Arg": 28100, + "Ġturret": 28101, + "ĠLuk": 28102, + "Ġbrackets": 28103, + "*)": 28104, + "ĠRevolutionary": 28105, + "ĠHonest": 28106, + "Ġnoticing": 28107, + "ĠShannon": 28108, + "Ġafforded": 28109, + "Ġtha": 28110, + "ĠJanet": 28111, + "!--": 28112, + "ĠNarendra": 28113, + "ĠPlot": 28114, + "Hol": 28115, + "sever": 28116, + "eenth": 28117, + "Ġobstruction": 28118, + "Ġ1024": 28119, + "staff": 28120, + "jas": 28121, + "orget": 28122, + "scenes": 28123, + "laughs": 28124, + "ĠFargo": 28125, + "crime": 28126, + "Ġorchestr": 28127, + "Ġdelet": 28128, + "iliary": 28129, + "rieved": 28130, + "Ġmilitar": 28131, + "ĠGreene": 28132, + "âĹı": 28133, + "ãģ¦": 28134, + "ĠGuards": 28135, + "Ġunleashed": 28136, + "ĠWeber": 28137, + "Ġadjustable": 28138, + "Ġcaliber": 28139, + "Ġmotivations": 28140, + "ĠÃł": 28141, + "mAh": 28142, + "ĠLanka": 28143, + "handle": 28144, + "Ġpent": 28145, + "ĠRav": 28146, + "ĠAngular": 28147, + "ĠKau": 28148, + "umbing": 28149, + "Ġphilanthrop": 28150, + "Ġdehyd": 28151, + "Ġtoxicity": 28152, + "eer": 28153, + "ĠYORK": 28154, + "witz": 28155, + "å¼": 28156, + "ĠIE": 28157, + "community": 28158, + "ĠAH": 28159, + "Ġretali": 28160, + "Ġmassively": 28161, + "ĠDaniels": 28162, + "ĠDEL": 28163, + "Ġcarcin": 28164, + "Url": 28165, + "Ġrouting": 28166, + "ĠNPCs": 28167, + "ĠRAF": 28168, + "ryce": 28169, + "Ġwaived": 28170, + "ĠGuatem": 28171, + "Everybody": 28172, + "Ġcovenant": 28173, + "Ġ173": 28174, + "Ġrelaxing": 28175, + "Ġquart": 28176, + "almost": 28177, + "Ġguarded": 28178, + "ĠSoldiers": 28179, + "ĠPLAY": 28180, + "Ġoutgoing": 28181, + "LAND": 28182, + "Ġrewrite": 28183, + "ĠMOV": 28184, + "ĠImper": 28185, + "ĠSolution": 28186, + "Ġphenomenal": 28187, + "Ġlongevity": 28188, + "Ġimpat": 28189, + "ĠNissan": 28190, + "irie": 28191, + "Ġodor": 28192, + "ĠZar": 28193, + "oks": 28194, + "Ġmilitias": 28195, + "ĠSPEC": 28196, + "Ġtolerated": 28197, + "arser": 28198, + "ĠBradford": 28199, + "+,": 28200, + "Ġsurreal": 28201, + "sf": 28202, + "Canadian": 28203, + "Ġresemblance": 28204, + "Ġcarbohydrate": 28205, + "VIEW": 28206, + "Ġaccessory": 28207, + "meal": 28208, + "largest": 28209, + "iegel": 28210, + "Someone": 28211, + "Ġtoughest": 28212, + "oso": 28213, + "Ġfunnel": 28214, + "Ġcondemnation": 28215, + "luent": 28216, + "Ġwired": 28217, + "ĠSunset": 28218, + "Jesus": 28219, + "ĠPST": 28220, + "ĠPages": 28221, + "ĠTycoon": 28222, + "ĠPF": 28223, + "Ġselections": 28224, + "Ġà¤": 28225, + "partisan": 28226, + "Ġhighs": 28227, + "ĠRune": 28228, + "Ġcrafts": 28229, + "lead": 28230, + "ĠParents": 28231, + "Ġreclaim": 28232, + "eker": 28233, + "ĠAllied": 28234, + "aeper": 28235, + "Ġlooming": 28236, + "Ġbeneficiaries": 28237, + "ĠHull": 28238, + "Students": 28239, + "Jewish": 28240, + "dj": 28241, + "Ġpact": 28242, + "template": 28243, + "ĠOfficials": 28244, + "ĠBaylor": 28245, + "Ġhemp": 28246, + "Ġyouths": 28247, + "ĠLevels": 28248, + "ĠXiao": 28249, + "ĠChes": 28250, + "Ġendeavor": 28251, + "ĠRemoved": 28252, + "Ġhippocamp": 28253, + "Hell": 28254, + "ãĤĬ": 28255, + "805": 28256, + "Ġdinosaur": 28257, + "ĠWrath": 28258, + "ĠIndonesian": 28259, + "Ġcalculator": 28260, + "ĠDictionary": 28261, + "Ġ420": 28262, + "ĠMAG": 28263, + "(_": 28264, + "!,": 28265, + "tarians": 28266, + "Ġrestricting": 28267, + "racuse": 28268, + "Ġweekday": 28269, + "OUNT": 28270, + "Ġshrugged": 28271, + "leground": 28272, + "Ġbald": 28273, + "ĠDoctors": 28274, + "Ġtouted": 28275, + "ĠMaxwell": 28276, + "Ġ214": 28277, + "Ġdiplomat": 28278, + "Ġrepression": 28279, + "Ġconstituency": 28280, + "vice": 28281, + "ranked": 28282, + "ĠNapoleon": 28283, + "gang": 28284, + "ĠForever": 28285, + "tun": 28286, + "Ġbulb": 28287, + "ĠPDT": 28288, + "ĠCisco": 28289, + "VEN": 28290, + "Ġresumed": 28291, + "Steven": 28292, + "ĠManitoba": 28293, + "Ġfabulous": 28294, + "ĠAgents": 28295, + "1984": 28296, + "Ġamusing": 28297, + "ĠMysteries": 28298, + "Ġorthodox": 28299, + "floor": 28300, + "Ġquestionnaire": 28301, + "Ġpenetrate": 28302, + "Ġfilmmakers": 28303, + "ĠUnc": 28304, + "Ġstamped": 28305, + "Ġthirteen": 28306, + "Ġoutfield": 28307, + "Ġforwarded": 28308, + "Ġappra": 28309, + "Ġaided": 28310, + "try": 28311, + "Ġunfocused": 28312, + "ĠLiz": 28313, + "ĠWendy": 28314, + "ĠScene": 28315, + "Charg": 28316, + "Ġrejects": 28317, + "Ġleftist": 28318, + "ĠProvidence": 28319, + "ĠBrid": 28320, + "regn": 28321, + "Ġprophecy": 28322, + "ĠLIVE": 28323, + "499": 28324, + "Ġforge": 28325, + "ĠFML": 28326, + "Ġintrinsic": 28327, + "ĠFrog": 28328, + "Ġwont": 28329, + "ĠHolt": 28330, + "Ġfamed": 28331, + "CLUS": 28332, + "aepernick": 28333, + "ĠHate": 28334, + "ĠCay": 28335, + "Ġregistering": 28336, + "ortality": 28337, + "ropy": 28338, + "ocalyptic": 28339, + "aan": 28340, + "nav": 28341, + "Ġfascist": 28342, + "IFIED": 28343, + "Ġimplicated": 28344, + "ĠResort": 28345, + "ĠChandler": 28346, + "ĠBrick": 28347, + "Pin": 28348, + "ysc": 28349, + "Usage": 28350, + "ĠHelm": 28351, + "usra": 28352, + "âĺħâĺħ": 28353, + "ĠAbbas": 28354, + "Ġunanimously": 28355, + "Ġkeeper": 28356, + "Ġaddicted": 28357, + "???": 28358, + "Ġhelmets": 28359, + "Ġantioxid": 28360, + "apsed": 28361, + "808": 28362, + "giene": 28363, + "Ġwaits": 28364, + "Ġminion": 28365, + "raved": 28366, + "ĠPorsche": 28367, + "Ġdreaming": 28368, + "Ġ171": 28369, + "ĠCain": 28370, + "Ġunfor": 28371, + "asso": 28372, + "ĠConfiguration": 28373, + "kun": 28374, + "hardt": 28375, + "Ġnested": 28376, + "ĠLDS": 28377, + "LES": 28378, + "Ġtying": 28379, + "enos": 28380, + "Ġcue": 28381, + "ĠMarqu": 28382, + "skirts": 28383, + "Ġclicked": 28384, + "Ġexpiration": 28385, + "ĠAccordingly": 28386, + "ĠWC": 28387, + "Ġblessings": 28388, + "Ġaddictive": 28389, + "ĠNarr": 28390, + "yx": 28391, + "ĠJaguars": 28392, + "Ġrents": 28393, + "ĠSiber": 28394, + "Ġtipped": 28395, + "ousse": 28396, + "ĠFitzgerald": 28397, + "Ġhierarch": 28398, + "outine": 28399, + "Ġwavelength": 28400, + ">.": 28401, + "chid": 28402, + "ĠProcessing": 28403, + "/+": 28404, + "ranking": 28405, + "Easy": 28406, + "ĠConstruct": 28407, + "Ġtet": 28408, + "insured": 28409, + "HUD": 28410, + "Ġquoting": 28411, + "Ġcommunicated": 28412, + "inx": 28413, + "Ġinmate": 28414, + "Ġerected": 28415, + "ĠAbsolutely": 28416, + "ĠSurely": 28417, + "Ġunim": 28418, + "ĠThrone": 28419, + "heid": 28420, + "Ġclaws": 28421, + "Ġsuperstar": 28422, + "ĠLenn": 28423, + "ĠWhis": 28424, + "Uk": 28425, + "abol": 28426, + "Ġsket": 28427, + "ĠNiet": 28428, + "Ġperks": 28429, + "Ġaffinity": 28430, + "Ġopenings": 28431, + "phasis": 28432, + "Ġdiscriminate": 28433, + "Tip": 28434, + "vc": 28435, + "Ġgrinding": 28436, + "ĠJenny": 28437, + "Ġasthma": 28438, + "holes": 28439, + "ĠHomer": 28440, + "Ġregisters": 28441, + "ĠGlad": 28442, + "Ġcreations": 28443, + "Ġlithium": 28444, + "Ġapplause": 28445, + "until": 28446, + "Justice": 28447, + "ĠTurks": 28448, + "Ġscandals": 28449, + "Ġbake": 28450, + "tank": 28451, + "Mech": 28452, + "ĠMeans": 28453, + "ĠMaid": 28454, + "Republicans": 28455, + "isal": 28456, + "windows": 28457, + "ĠSantos": 28458, + "Ġvegetation": 28459, + "338": 28460, + "tri": 28461, + "Ġflux": 28462, + "insert": 28463, + "Ġclarified": 28464, + "Ġmortg": 28465, + "ĠChim": 28466, + "ĠTort": 28467, + "Ġdisclaim": 28468, + "metal": 28469, + "ĠAside": 28470, + "Ġinduction": 28471, + "Ġinfl": 28472, + "Ġatheists": 28473, + "amph": 28474, + "Ġether": 28475, + "ĠVital": 28476, + "ĠBuilt": 28477, + "Mind": 28478, + "Ġweaponry": 28479, + "SET": 28480, + "Ġ186": 28481, + "admin": 28482, + "gam": 28483, + "contract": 28484, + "afa": 28485, + "Ġderivatives": 28486, + "Ġsnacks": 28487, + "Ġchurn": 28488, + "Econom": 28489, + "Ġcapped": 28490, + "ĠUnderstanding": 28491, + "ĠHers": 28492, + "ĠIz": 28493, + "Ġduct": 28494, + "IENT": 28495, + "aughty": 28496, + "ĠâľĶ": 28497, + "ĠNP": 28498, + "Ġsailing": 28499, + "Initialized": 28500, + "Ġted": 28501, + "Ġreactors": 28502, + "ĠLomb": 28503, + "Ġchoke": 28504, + "ĠWorm": 28505, + "Ġadmiration": 28506, + "Ġswung": 28507, + "ensibly": 28508, + "Ġrash": 28509, + "ĠGoals": 28510, + "ĠImportant": 28511, + "Shot": 28512, + "ĠRas": 28513, + "Ġtrainers": 28514, + "ĠBun": 28515, + "Working": 28516, + "Ġharmed": 28517, + "ĠPandora": 28518, + "ĠLTE": 28519, + "Ġmushroom": 28520, + "ĠCHAR": 28521, + "ĠFee": 28522, + "ĠMoy": 28523, + "Born": 28524, + "oliberal": 28525, + "ĠMartial": 28526, + "Ġgentlemen": 28527, + "Ġlingering": 28528, + "Official": 28529, + "Ġgraffiti": 28530, + "ĠNames": 28531, + "Der": 28532, + "Ġquint": 28533, + "istrate": 28534, + "azeera": 28535, + "ĠNOTICE": 28536, + "ĠFlorence": 28537, + "Ġpayable": 28538, + "Ġdepicts": 28539, + "ĠSpecies": 28540, + "Heart": 28541, + "âĶĢâĶĢâĶĢâĶĢâĶĢâĶĢâĶĢâĶĢ": 28542, + "Ġenclosed": 28543, + "Increases": 28544, + "Daily": 28545, + "ĠLis": 28546, + "Ġenactment": 28547, + "ĠBacon": 28548, + "ĠSteele": 28549, + "demand": 28550, + "Ġ183": 28551, + "Ġmouths": 28552, + "Ġstranded": 28553, + "Ġenhancement": 28554, + "011": 28555, + "ĠWhats": 28556, + "Ġhealed": 28557, + "eny": 28558, + "ĠRab": 28559, + "Ġ340": 28560, + "ĠLabyrinth": 28561, + "roach": 28562, + "ĠYosh": 28563, + "ĠClippers": 28564, + "Ġconcerts": 28565, + "Internet": 28566, + "355": 28567, + "Ġstickers": 28568, + "Ġtermed": 28569, + "ĠAxe": 28570, + "Ġgrandparents": 28571, + "France": 28572, + "ĠClim": 28573, + "ĠUh": 28574, + "ulic": 28575, + "Ġthrill": 28576, + "centric": 28577, + "ĠOverview": 28578, + "ĠConduct": 28579, + "Ġsubstantive": 28580, + "Ġ182": 28581, + "mur": 28582, + "Ġstray": 28583, + "ĠCoff": 28584, + "Ġrepetitive": 28585, + "ĠForgotten": 28586, + "Ġqualification": 28587, + "ewitness": 28588, + "ĠZimbabwe": 28589, + "Ġsimulated": 28590, + "ĠJD": 28591, + "253": 28592, + "ĠWare": 28593, + "Ġunsc": 28594, + "Times": 28595, + "Ġsummons": 28596, + "Ġdisconnected": 28597, + "Ġ184": 28598, + "cius": 28599, + "ĠGujar": 28600, + "odka": 28601, + "Ġerase": 28602, + "ĠTobacco": 28603, + "elected": 28604, + "Ġuncont": 28605, + "ĠShepard": 28606, + "ĠLamp": 28607, + "Ġalerted": 28608, + "Ġoperative": 28609, + "arna": 28610, + "uint": 28611, + "Ġnegligence": 28612, + "acements": 28613, + "Ġsupra": 28614, + "Ġprevail": 28615, + "ĠShark": 28616, + "Ġbelts": 28617, + "ãģ«": 28618, + "Ġtighter": 28619, + "Engineers": 28620, + "Ġinactive": 28621, + "Ġexponent": 28622, + "ĠWillie": 28623, + "aples": 28624, + "Ġheir": 28625, + "ĠHits": 28626, + "iann": 28627, + "ĠSays": 28628, + "Ġcurrents": 28629, + "ĠBengal": 28630, + "Ġarist": 28631, + "Buffer": 28632, + "Ġbreeze": 28633, + "ĠWesley": 28634, + "Cola": 28635, + "Ġpronoun": 28636, + "Ġdeed": 28637, + "ĠKling": 28638, + "Ġoft": 28639, + "Ġinflict": 28640, + "Ġpunishing": 28641, + "Ġnm": 28642, + "iku": 28643, + "ODUCT": 28644, + "014": 28645, + "Ġsubsidy": 28646, + "ĠDEA": 28647, + "ĠHerbert": 28648, + "ĠJal": 28649, + "Bank": 28650, + "Ġdeferred": 28651, + "Ġshipment": 28652, + "Bott": 28653, + "Ġalle": 28654, + "bearing": 28655, + "HTML": 28656, + "Offline": 28657, + "Ġ213": 28658, + "Ġscrolling": 28659, + "Ġscanned": 28660, + "ĠLibyan": 28661, + "ĠTOP": 28662, + "chrom": 28663, + "dt": 28664, + "column": 28665, + "PsyNetMessage": 28666, + "Zero": 28667, + "Ġtorso": 28668, + "050": 28669, + "âķIJ": 28670, + "Ġimperson": 28671, + "ĠSchwartz": 28672, + "udic": 28673, + "Ġpissed": 28674, + "ĠSapp": 28675, + "257": 28676, + "ĠISPs": 28677, + "ogl": 28678, + "Ġsupervised": 28679, + "Ġadolescent": 28680, + "Ġattained": 28681, + "ĠDelivery": 28682, + "ĠBunny": 28683, + "Ġ1937": 28684, + "Ġminiature": 28685, + "Ġos": 28686, + "Ġ370": 28687, + "608": 28688, + "ĠMourinho": 28689, + "Ġinnate": 28690, + "Ġtempo": 28691, + "ĠNM": 28692, + "ĠFallen": 28693, + "009": 28694, + "Ġprovocative": 28695, + "Streamer": 28696, + "ĠBenedict": 28697, + "ĠBolshe": 28698, + "Ġturtle": 28699, + "ĠPCB": 28700, + "ĠEqual": 28701, + "Director": 28702, + "ĠRend": 28703, + "Ġfluids": 28704, + "Authorities": 28705, + "Ġcousins": 28706, + "requency": 28707, + "ĠNeighbor": 28708, + "sets": 28709, + "shared": 28710, + "Charles": 28711, + "password": 28712, + "Ġgears": 28713, + "Ġ211": 28714, + "ĠHardware": 28715, + "rika": 28716, + "Ġupstream": 28717, + "Hom": 28718, + "Ġdisproportionately": 28719, + "ivities": 28720, + "Ġundefined": 28721, + "Ġelectrons": 28722, + "Ġcommemor": 28723, + "Eventually": 28724, + "Ġ><": 28725, + "Ġirresponsible": 28726, + "218": 28727, + "ĠReleased": 28728, + "ĠOVER": 28729, + "ĠIGN": 28730, + "ĠBread": 28731, + "stellar": 28732, + "ĠSage": 28733, + "tted": 28734, + "damage": 28735, + "edition": 28736, + "ĠPrec": 28737, + "Ġlime": 28738, + "Ġconfinement": 28739, + "Ġcalorie": 28740, + "weapon": 28741, + "Ġdiffering": 28742, + "ĠSina": 28743, + "mys": 28744, + "amd": 28745, + "Ġintricate": 28746, + "kk": 28747, + "ĠPAT": 28748, + "ão": 28749, + "stones": 28750, + "links": 28751, + "Ġranch": 28752, + "Semitic": 28753, + "Ġdifferentiate": 28754, + "ĠSinger": 28755, + "occupied": 28756, + "Ġfortress": 28757, + "cmd": 28758, + "Ġinterception": 28759, + "ĠAnkara": 28760, + "Ġrept": 28761, + "ĠSolitaire": 28762, + "Ġremake": 28763, + "pred": 28764, + "Ġdared": 28765, + "autions": 28766, + "ĠBACK": 28767, + "Running": 28768, + "Ġdebugging": 28769, + "Ġgraphs": 28770, + "399": 28771, + "ĠNigel": 28772, + "Ġbun": 28773, + "Ġpillow": 28774, + "Ġprogressed": 28775, + "fashioned": 28776, + "Ġobedience": 28777, + "ERN": 28778, + "Ġrehears": 28779, + "Cell": 28780, + "tl": 28781, + "Sher": 28782, + "Ġherald": 28783, + "ĠPayment": 28784, + "ĠCory": 28785, + "ĠDept": 28786, + "Ġrepent": 28787, + "ĠWeak": 28788, + "uckland": 28789, + "Ġpleasing": 28790, + "Ġshortages": 28791, + "Ġjurors": 28792, + "ĠKab": 28793, + "qqa": 28794, + "Anti": 28795, + "Ġwow": 28796, + "ĠRCMP": 28797, + "Ġtsun": 28798, + "ĠSic": 28799, + "Ġcomprises": 28800, + "Ġspies": 28801, + "Ġprecinct": 28802, + "nu": 28803, + "Ġurges": 28804, + "Ġtimed": 28805, + "Ġstripes": 28806, + "ĠBoots": 28807, + "Ġyen": 28808, + "Advanced": 28809, + "Ġdiscrete": 28810, + "ĠArchangel": 28811, + "employment": 28812, + "Diff": 28813, + "Ġmonuments": 28814, + "Ġ209": 28815, + "worker": 28816, + "Ġ196": 28817, + "ĠIg": 28818, + "utterstock": 28819, + "TPS": 28820, + "Jac": 28821, + "Ġhomelessness": 28822, + "Ġcommentator": 28823, + "Ġracially": 28824, + "fing": 28825, + "seed": 28826, + "Ele": 28827, + "ellation": 28828, + "Ġethanol": 28829, + "Ġparish": 28830, + "ĠDong": 28831, + "ĠAwakening": 28832, + "Ġdeviation": 28833, + "ĠBearing": 28834, + "ĠTsuk": 28835, + "Ġrecess": 28836, + "Ġlymph": 28837, + "ĠCannabis": 28838, + "åľ": 28839, + "ĠNEWS": 28840, + "Ġdra": 28841, + "ĠStefan": 28842, + "ĠWrong": 28843, + "ĠSAM": 28844, + "Ġloosely": 28845, + "Ġinterpreter": 28846, + "ĠPlain": 28847, + "Government": 28848, + "Ġbigotry": 28849, + "Ġgrenades": 28850, + "avez": 28851, + "pictured": 28852, + "Ġmandated": 28853, + "ĠMonk": 28854, + "ĠPedro": 28855, + "Ġlava": 28856, + "274": 28857, + "Ġcynical": 28858, + "ĠScrolls": 28859, + "locks": 28860, + "Mp": 28861, + "Ġcongregation": 28862, + "ornings": 28863, + "phil": 28864, + "ĠIbid": 28865, + "Ġferv": 28866, + "Ġdisappearing": 28867, + "Ġarrogant": 28868, + "syn": 28869, + "ĠMaver": 28870, + "ĠSuit": 28871, + "241": 28872, + "Ġabbre": 28873, + "ackers": 28874, + "Pa": 28875, + "ĠYel": 28876, + "Whenever": 28877, + "Ġ235": 28878, + "ĠVine": 28879, + "ĠAnat": 28880, + "Ġextinct": 28881, + "LET": 28882, + "Ġexecutable": 28883, + "VERS": 28884, + "oxide": 28885, + "DNA": 28886, + "ĠPrel": 28887, + "Ġresentment": 28888, + "Ġcomprise": 28889, + "ĠAviv": 28890, + "Ġinterceptions": 28891, + "Ġprolific": 28892, + "INA": 28893, + "ĠErin": 28894, + "thought": 28895, + "219": 28896, + "ĠPsychiatry": 28897, + "unky": 28898, + "chemist": 28899, + "Ho": 28900, + "ĠMcCoy": 28901, + "Ġbricks": 28902, + "Los": 28903, + "rily": 28904, + "ĠUSSR": 28905, + "Ġrud": 28906, + "Ġlaud": 28907, + "ĠWise": 28908, + "ĠEmerald": 28909, + "Ġrevived": 28910, + "Ġdamned": 28911, + "ĠRepair": 28912, + "idem": 28913, + "ctica": 28914, + "Ġpatriarch": 28915, + "ĠNurs": 28916, + "meg": 28917, + "Ġcheapest": 28918, + "reements": 28919, + "empty": 28920, + "ĠCelebr": 28921, + "Ġdeprivation": 28922, + "chanted": 28923, + "ĠThumbnails": 28924, + "Energy": 28925, + "ĠEthan": 28926, + "ĠQing": 28927, + "Ġopposes": 28928, + "WIND": 28929, + "vik": 28930, + "ĠMau": 28931, + "ĠSUB": 28932, + "667": 28933, + "GRE": 28934, + "ĠVolunte": 28935, + "nton": 28936, + "Cook": 28937, + "åIJ": 28938, + "esque": 28939, + "Ġplummet": 28940, + "Ġsuing": 28941, + "Ġpronounce": 28942, + "Ġresisting": 28943, + "ĠFishing": 28944, + "ĠTrials": 28945, + "Ġyell": 28946, + "Ġ310": 28947, + "Ġinduct": 28948, + "Ġpersonalized": 28949, + "often": 28950, + "Reb": 28951, + "EMBER": 28952, + "Ġviewpoint": 28953, + "Ġexistential": 28954, + "())": 28955, + "remove": 28956, + "MENTS": 28957, + "lasses": 28958, + "Ġevapor": 28959, + "Ġaisle": 28960, + "meta": 28961, + "Ġreflective": 28962, + "Ġentitlement": 28963, + "Ġdevised": 28964, + "music": 28965, + "ascade": 28966, + "Ġwinding": 28967, + "offset": 28968, + "Ġaccessibility": 28969, + "kered": 28970, + "Better": 28971, + "ĠJohnston": 28972, + "thinking": 28973, + "Snow": 28974, + "ĠCroatia": 28975, + "ĠAtomic": 28976, + "271": 28977, + "348": 28978, + "Ġtextbook": 28979, + "ĠSixth": 28980, + "ĠاÙĦ": 28981, + "Ġslider": 28982, + "ĠBurger": 28983, + "bol": 28984, + "Sync": 28985, + "Ġgrandchildren": 28986, + "Ġcerv": 28987, + "+)": 28988, + "Ġeternity": 28989, + "Ġtweeting": 28990, + "Ġspeculative": 28991, + "Ġpivotal": 28992, + "ĠWP": 28993, + "ĠTER": 28994, + "ynamic": 28995, + "Ġupl": 28996, + "ĠCats": 28997, + "perhaps": 28998, + "Ġclassmates": 28999, + "Ġblatant": 29000, + "'-": 29001, + "Ġlakh": 29002, + "antine": 29003, + "ĠBorg": 29004, + "iom": 29005, + "/(": 29006, + "ĠAthletic": 29007, + "Ġsar": 29008, + "OTA": 29009, + "ĠHoffman": 29010, + "Nevertheless": 29011, + "Ġadorable": 29012, + "Ġspawned": 29013, + "Associated": 29014, + "ĠDomestic": 29015, + "Ġimplant": 29016, + "ĠLuxem": 29017, + "ĠKens": 29018, + "Ġpumps": 29019, + "ĠSAT": 29020, + "Attributes": 29021, + "509": 29022, + "avour": 29023, + "Ġcentralized": 29024, + "ĠTN": 29025, + "Ġfreshly": 29026, + "ĠAchieve": 29027, + "Ġoutsiders": 29028, + "herty": 29029, + "ĠRee": 29030, + "ĠTowers": 29031, + "ĠDart": 29032, + "akable": 29033, + "Ġmp": 29034, + "ĠHeavenly": 29035, + "Ġripe": 29036, + "ĠCaroline": 29037, + "ryan": 29038, + "Ġclassics": 29039, + "Ġretiring": 29040, + "Ġ228": 29041, + "Ġah": 29042, + "Ġdealings": 29043, + "Ġpunching": 29044, + "ĠChapman": 29045, + "Options": 29046, + "maxwell": 29047, + "volume": 29048, + "Ġstal": 29049, + "Ġexported": 29050, + "ĠQuite": 29051, + "Ġnumerical": 29052, + "Burn": 29053, + "Fact": 29054, + "ĠKeystone": 29055, + "Ġtrending": 29056, + "Ġaltering": 29057, + "ĠAfricans": 29058, + "478": 29059, + "ĠMN": 29060, + "ĠKnock": 29061, + "Ġtemptation": 29062, + "Ġprestige": 29063, + "Overview": 29064, + "ĠTraditional": 29065, + "ĠBahrain": 29066, + "Private": 29067, + "ĠHOU": 29068, + "Ġbarr": 29069, + "ĠTat": 29070, + "Cube": 29071, + "USD": 29072, + "ĠGrande": 29073, + "ĠGat": 29074, + "ĠFlo": 29075, + "Ġresides": 29076, + "Ġindec": 29077, + "volent": 29078, + "Ġperpetual": 29079, + "ubes": 29080, + "Ġworldview": 29081, + "ĠQuantum": 29082, + "Ġfiltered": 29083, + "Ġensu": 29084, + "orgetown": 29085, + "ERSON": 29086, + "ĠMild": 29087, + "379": 29088, + "OTT": 29089, + "Ã¥": 29090, + "Ġvitamins": 29091, + "Ġribbon": 29092, + "Ġsincerely": 29093, + "ĠHin": 29094, + "Ġeighteen": 29095, + "Ġcontradictory": 29096, + "Ġglaring": 29097, + "Ġexpectancy": 29098, + "Ġconspir": 29099, + "Ġmonstrous": 29100, + "Ġ380": 29101, + "reci": 29102, + "Ġhandic": 29103, + "Ġpumped": 29104, + "Ġindicative": 29105, + "Ġrapp": 29106, + "Ġavail": 29107, + "ĠLEGO": 29108, + "ĠMarijuana": 29109, + "1985": 29110, + "erton": 29111, + "Ġtwentieth": 29112, + "################################": 29113, + "ĠSwamp": 29114, + "Ġvaluation": 29115, + "Ġaffiliates": 29116, + "adjusted": 29117, + "ĠFacility": 29118, + "262": 29119, + "Ġenzymes": 29120, + "itudinal": 29121, + "Ġimprint": 29122, + "Site": 29123, + "Ġinstaller": 29124, + "ĠTRA": 29125, + "mology": 29126, + "linear": 29127, + "ĠCollective": 29128, + "igating": 29129, + "ĠToken": 29130, + "Ġspeculated": 29131, + "KN": 29132, + "ĠCly": 29133, + "ority": 29134, + "Ġdefer": 29135, + "Ġinspectors": 29136, + "approved": 29137, + "RM": 29138, + "ĠSuns": 29139, + "Ġinforming": 29140, + "ĠSyracuse": 29141, + "ibli": 29142, + "765": 29143, + "Ġglove": 29144, + "Ġauthorize": 29145, + "âĢ¦âĢ¦âĢ¦âĢ¦âĢ¦âĢ¦âĢ¦âĢ¦": 29146, + "ĠCruise": 29147, + "Ġcontracting": 29148, + "shell": 29149, + "IFE": 29150, + "ĠJewel": 29151, + "pract": 29152, + "ĠPhotoshop": 29153, + "ĠKnowing": 29154, + "harm": 29155, + "Ġattractions": 29156, + "adan": 29157, + "etus": 29158, + "018": 29159, + "wagen": 29160, + "Alt": 29161, + "Ġmultiply": 29162, + "Ġequilibrium": 29163, + ":{": 29164, + "ĠFighters": 29165, + "ĠEdgar": 29166, + "Ġfourteen": 29167, + "Govern": 29168, + "Ġmisuse": 29169, + "Ġabusing": 29170, + "Ġancestry": 29171, + "ramer": 29172, + "644": 29173, + "Ġworms": 29174, + "Ġthicker": 29175, + "ĠCombine": 29176, + "Ġpeasants": 29177, + "Ġvind": 29178, + "Ġconquest": 29179, + "Ġmocked": 29180, + "Ġcinnamon": 29181, + "ĠCald": 29182, + "ĠGallup": 29183, + "Ġavoidance": 29184, + "Ġincarnation": 29185, + "ĠStrat": 29186, + "Ġtasted": 29187, + "enta": 29188, + "ĠNeal": 29189, + "pared": 29190, + "Ġterminology": 29191, + "jection": 29192, + "Scientists": 29193, + "ĠINS": 29194, + "ĠDee": 29195, + "Ġdirectories": 29196, + "Road": 29197, + "ĠShap": 29198, + "bright": 29199, + "ĠDirectors": 29200, + "ĠColumn": 29201, + "Ġbob": 29202, + "Ġpreferably": 29203, + "Ġglitch": 29204, + "furt": 29205, + "Ġeg": 29206, + "idis": 29207, + "CBC": 29208, + "Ġsurrendered": 29209, + "Ġtestament": 29210, + "336": 29211, + "uggest": 29212, + "ĠNil": 29213, + "another": 29214, + "Ġpathetic": 29215, + "ĠDonna": 29216, + "Ġ218": 29217, + "ĠAvery": 29218, + "Ġwhiskey": 29219, + "Ġfixture": 29220, + "ĠConquest": 29221, + "Ġbets": 29222, + "Occ": 29223, + "ĠLeicester": 29224, + "].\"": 29225, + "Ġ));": 29226, + "Ġflashes": 29227, + "456": 29228, + "Ġmasked": 29229, + "gebra": 29230, + "Ġcomputed": 29231, + "chel": 29232, + "auder": 29233, + "Ġdefeats": 29234, + "ĠLiberation": 29235, + "ĠOsama": 29236, + "ĠVive": 29237, + "Changes": 29238, + "Channel": 29239, + "Ġtariffs": 29240, + "Ġmage": 29241, + "ĠSax": 29242, + "Ġinadvertently": 29243, + "ĠCRE": 29244, + "ĠReaper": 29245, + "inky": 29246, + "grading": 29247, + "Ġstereotyp": 29248, + "Ġcurl": 29249, + "ĠFANT": 29250, + "Ġframeworks": 29251, + "Mom": 29252, + "ĠAnch": 29253, + "Ġflavour": 29254, + "carbon": 29255, + "Ġpermitting": 29256, + "letcher": 29257, + "ĠMozilla": 29258, + "ĠParking": 29259, + "ĠChamp": 29260, + "Scroll": 29261, + "Ġmurderer": 29262, + "Ġrested": 29263, + "Ġowes": 29264, + "ĠPoss": 29265, + "ADD": 29266, + "IFF": 29267, + "resolution": 29268, + "ĠMining": 29269, + "Ġcomparative": 29270, + "Dim": 29271, + "Ġneighbouring": 29272, + "ĠAST": 29273, + "ĠToxic": 29274, + "Ġbiases": 29275, + "Ġgunfire": 29276, + "urous": 29277, + "ĠMoment": 29278, + "1983": 29279, + "Ġpervasive": 29280, + "ttp": 29281, + "ĠNormally": 29282, + "rir": 29283, + "Sarah": 29284, + "ĠAlbany": 29285, + "Ġunsett": 29286, + "ĠSMS": 29287, + "ipers": 29288, + "layer": 29289, + "ĠWhites": 29290, + "uple": 29291, + "Ġturbo": 29292, + "ĠLeeds": 29293, + "Ġthats": 29294, + "ĠMiner": 29295, + "MER": 29296, + "ĠReign": 29297, + "Ġperme": 29298, + "ĠBlitz": 29299, + "Ġ1934": 29300, + "Ġintimidating": 29301, + "tube": 29302, + "Ġeccentric": 29303, + "abolic": 29304, + "boxes": 29305, + "ĠAssociates": 29306, + "votes": 29307, + "Ġsimulate": 29308, + "umbo": 29309, + "astery": 29310, + "Ġshipments": 29311, + "FFFF": 29312, + "anth": 29313, + "Ġseasoned": 29314, + "Ġexperimentation": 29315, + "âĸł": 29316, + "laws": 29317, + "Meet": 29318, + "iddles": 29319, + "antics": 29320, + "Rating": 29321, + "ISIS": 29322, + "hift": 29323, + "Ġfronts": 29324, + "buf": 29325, + "017": 29326, + "Ġunatt": 29327, + "ĠDil": 29328, + "leases": 29329, + "ĠGardens": 29330, + "777": 29331, + "touch": 29332, + "vell": 29333, + "458": 29334, + "Ġ=====": 29335, + "saving": 29336, + "Ġerosion": 29337, + "ĠQuin": 29338, + "Ġearns": 29339, + "Ġaccomplishment": 29340, + "ĠWei": 29341, + "Ġ<[": 29342, + "_____": 29343, + "Ġirrig": 29344, + "ĠTeddy": 29345, + "Ġconquered": 29346, + "ĠArmored": 29347, + "Ġasserts": 29348, + "Ġmanipulating": 29349, + "ré": 29350, + "Ġtranscripts": 29351, + "Gallery": 29352, + "Ġplotting": 29353, + "Neil": 29354, + "Ġbetrayal": 29355, + "loader": 29356, + "ĠSul": 29357, + "Ġdisplacement": 29358, + "Ġroyalty": 29359, + "ĠWI": 29360, + "heit": 29361, + "ĠDevices": 29362, + "allel": 29363, + "Ġmunicipalities": 29364, + "Ġcanal": 29365, + "Stars": 29366, + "ĠUAE": 29367, + "Ġ\"âĢ¦": 29368, + "ĠCU": 29369, + "above": 29370, + "Ġresonance": 29371, + "ĠguiActiveUn": 29372, + "added": 29373, + "ĠBraves": 29374, + "ĠIbn": 29375, + "Ġhereby": 29376, + "ĠBRE": 29377, + "Ġshareholder": 29378, + "ĠHir": 29379, + "ĠJi": 29380, + "Ġstrangely": 29381, + "Ġadmired": 29382, + "Ġplight": 29383, + "Ġbachelor": 29384, + "ĠPole": 29385, + "ciplinary": 29386, + "Tony": 29387, + "ĠArmenian": 29388, + "Ġunman": 29389, + "ĠZionist": 29390, + "Stage": 29391, + "iscover": 29392, + "Ġautomotive": 29393, + "Ġsidelines": 29394, + "Ġslick": 29395, + "ĠRenaissance": 29396, + "ĠFUN": 29397, + "Images": 29398, + "ĠHaj": 29399, + "Ġping": 29400, + "Ġshortcut": 29401, + "ĠBlvd": 29402, + "ĠLooks": 29403, + "Ġbursts": 29404, + "Ġclamp": 29405, + "Ġmish": 29406, + "Ġsorting": 29407, + "Ġpatriot": 29408, + "Ġcorrectness": 29409, + "ĠScandinav": 29410, + "ĠCavaliers": 29411, + "python": 29412, + "azar": 29413, + "Ġ375": 29414, + "ĠJaune": 29415, + "409": 29416, + "Ġdetrimental": 29417, + "Ġstabbing": 29418, + "Ġpoisoned": 29419, + "Ġfountain": 29420, + "ocent": 29421, + "orst": 29422, + "ĠMari": 29423, + "Ġrains": 29424, + "ĠOvers": 29425, + "ĠInstitution": 29426, + "udget": 29427, + "AMY": 29428, + "tale": 29429, + "ĠKR": 29430, + "ĠPrices": 29431, + "Ġheadaches": 29432, + "Ġlandsl": 29433, + "ĠAura": 29434, + "Bonus": 29435, + "ĠZhao": 29436, + "ĠHip": 29437, + "Ġhops": 29438, + "ĠKurdistan": 29439, + "Ġexploiting": 29440, + "ryn": 29441, + "Ġhypocrisy": 29442, + "opening": 29443, + "Ġgunshot": 29444, + "Ġwed": 29445, + "interstitial": 29446, + "Interstitial": 29447, + "Ġamen": 29448, + "Breaking": 29449, + "Ġmarketed": 29450, + "Wire": 29451, + "ĠCrowd": 29452, + "Continue": 29453, + "ĠKnown": 29454, + "ĠEffective": 29455, + "orean": 29456, + "izons": 29457, + "Joseph": 29458, + "Ġescalation": 29459, + "username": 29460, + "Ġcurtain": 29461, + "ATES": 29462, + "ĠPAR": 29463, + "ĠMiy": 29464, + "Ġcounterfe": 29465, + "lene": 29466, + "Ġcontenders": 29467, + "daily": 29468, + "ĠAsc": 29469, + "ĠPhillip": 29470, + "mostly": 29471, + "Ġfilename": 29472, + "hene": 29473, + "Ġresembling": 29474, + "Ġstaging": 29475, + "ĠChloe": 29476, + "Ġwiring": 29477, + "Hon": 29478, + "ĠRenew": 29479, + "ottage": 29480, + "ĠHybrid": 29481, + "much": 29482, + "Ġstrokes": 29483, + "Ġpolicymakers": 29484, + "APTER": 29485, + "ĠArkham": 29486, + "plot": 29487, + "Ġassistants": 29488, + "Ġdeport": 29489, + "ĠSega": 29490, + "Ġinfluenza": 29491, + "ĠCursed": 29492, + "ĠKobe": 29493, + "Ġskinny": 29494, + "Provider": 29495, + "ĠRip": 29496, + "Ġincremental": 29497, + "products": 29498, + "BF": 29499, + "Ġdome": 29500, + "ĠCredits": 29501, + "Ġlosers": 29502, + "ints": 29503, + "ĠBetty": 29504, + "ĠTalent": 29505, + "ĠDAM": 29506, + "Lv": 29507, + "Ess": 29508, + "Ġdens": 29509, + "temp": 29510, + "Judge": 29511, + "odic": 29512, + "Ġ'(": 29513, + "URES": 29514, + "etsk": 29515, + "VO": 29516, + "Ġretrieved": 29517, + "Ġarchitects": 29518, + "Ùĩ": 29519, + "Ġethic": 29520, + "ĠSecondary": 29521, + "stocks": 29522, + "adia": 29523, + "Ġ325": 29524, + "ĠOpinion": 29525, + "Ġsimultaneous": 29526, + "Ġdizz": 29527, + "ulp": 29528, + "Ġsmuggling": 29529, + "ippery": 29530, + "Random": 29531, + "facing": 29532, + "ĠDas": 29533, + "Ġstockp": 29534, + "Ġdisclosures": 29535, + "pointer": 29536, + "Ġcoral": 29537, + "ĠSelection": 29538, + "ĠPike": 29539, + "ivalent": 29540, + "Ġruthless": 29541, + "ĠRim": 29542, + "Ġensuing": 29543, + "ĠExperiment": 29544, + "Ġcongressman": 29545, + "Ġbeliever": 29546, + "Ġunspecified": 29547, + "ĠMord": 29548, + "Ġknowledgeable": 29549, + "ĠVERY": 29550, + "TX": 29551, + "Ġstraps": 29552, + "Ġturf": 29553, + "apeshifter": 29554, + "Ġmarital": 29555, + "Ġflock": 29556, + "ãģĨ": 29557, + "263": 29558, + "AMES": 29559, + "ĠOpposition": 29560, + "Ġtreasures": 29561, + "ĠGOD": 29562, + "Ġmodeled": 29563, + "ĠWORLD": 29564, + "Ġ([": 29565, + "ĠUsage": 29566, + "HF": 29567, + "Ġ$(": 29568, + "ussed": 29569, + "Ġpioneer": 29570, + "Eight": 29571, + "parse": 29572, + "bread": 29573, + "ritz": 29574, + "ĠMiranda": 29575, + "ĠKant": 29576, + "++)": 29577, + "oren": 29578, + "Ġprovoked": 29579, + "Ġbreeds": 29580, + "ĠIncludes": 29581, + "ĠPastebin": 29582, + "ĠFlip": 29583, + "Java": 29584, + "Ġbrink": 29585, + "Ġrumored": 29586, + "Ġunseen": 29587, + "Ġgarnered": 29588, + "ĠDefin": 29589, + "alted": 29590, + "Ġtattoos": 29591, + "Ġhesitation": 29592, + "isitions": 29593, + "ĠWeaver": 29594, + "ĠReporting": 29595, + "Ġtherapies": 29596, + "Ġconsultants": 29597, + "Ġresidual": 29598, + "ĠMali": 29599, + "ĠRoma": 29600, + "iago": 29601, + "ĠResidents": 29602, + "ubi": 29603, + "Ġremedies": 29604, + "Ġadaptive": 29605, + "ĠAlive": 29606, + "ĠBarcl": 29607, + "Ġwallets": 29608, + "crypt": 29609, + "etermination": 29610, + "ĠPelosi": 29611, + "Ġslipping": 29612, + "otonin": 29613, + "Ġalliances": 29614, + "patrick": 29615, + "iris": 29616, + "Ġorth": 29617, + "ĠPerkins": 29618, + "ĠDeV": 29619, + "ĠGets": 29620, + "Ġdrying": 29621, + "gee": 29622, + "forest": 29623, + "ĠForget": 29624, + "orem": 29625, + "339": 29626, + "Ġvaguely": 29627, + "ĠDion": 29628, + "ĠPorn": 29629, + "ĠHOW": 29630, + "Ġpneum": 29631, + "Ġrubble": 29632, + "ĠTaste": 29633, + "encia": 29634, + "ĠGel": 29635, + "Ġdst": 29636, + "Ġ245": 29637, + "ĠMorocco": 29638, + "inflamm": 29639, + "ĠTwins": 29640, + "Ġbots": 29641, + "daughter": 29642, + "ĠBalk": 29643, + "Ġbrethren": 29644, + "Ġlogos": 29645, + "Ġgobl": 29646, + "fps": 29647, + "Ġsubdivision": 29648, + "Ġpawn": 29649, + "Ġsqueezed": 29650, + "Ġmorale": 29651, + "ĠDW": 29652, + "'\"": 29653, + "Ġknot": 29654, + "ooky": 29655, + "Ġdivisive": 29656, + "Ġboosted": 29657, + "chy": 29658, + "ãĥIJ": 29659, + "ifact": 29660, + "Ġnewcomers": 29661, + "ĠWrestling": 29662, + "Ġscouts": 29663, + "wolves": 29664, + "Rat": 29665, + "Ġnineteenth": 29666, + "ĠOsborne": 29667, + "Stats": 29668, + "Ġempowered": 29669, + "Ġpsychopath": 29670, + "ĠOEM": 29671, + "uggage": 29672, + "ĠPK": 29673, + "ĠMohammad": 29674, + "Pak": 29675, + "Ġanarchists": 29676, + "ĠExtract": 29677, + "esthes": 29678, + "ĠStockholm": 29679, + "loo": 29680, + "ĠGraph": 29681, + "Ġdeploying": 29682, + "ĠStranger": 29683, + "ĠMold": 29684, + "Ġstaffer": 29685, + "Ġdiscounted": 29686, + "uckle": 29687, + "please": 29688, + "ĠLanding": 29689, + "ÃŃa": 29690, + "Ġ193": 29691, + "Ġante": 29692, + "Ġrepetition": 29693, + "Ġ+/-": 29694, + "Ġparody": 29695, + "Ġlively": 29696, + "AAA": 29697, + "ĠHorus": 29698, + "Ġpits": 29699, + "inders": 29700, + "LOC": 29701, + "ĠVenice": 29702, + "406": 29703, + "ĠDiscover": 29704, + "âĨ": 29705, + "ellectual": 29706, + "Ġpens": 29707, + "Ġeyel": 29708, + "iguous": 29709, + "Impl": 29710, + "Ġjoking": 29711, + "Ġinval": 29712, + "ĠBelfast": 29713, + "Ġcreditors": 29714, + "ĠSkywalker": 29715, + "ovsky": 29716, + "Ġceasefire": 29717, + "Ġseals": 29718, + "isoft": 29719, + ")).": 29720, + "ĠFelix": 29721, + "ITS": 29722, + "Ġtresp": 29723, + "ĠBlockchain": 29724, + "eware": 29725, + "ĠSchwar": 29726, + "enne": 29727, + "mounted": 29728, + "ĠBeacon": 29729, + "lesh": 29730, + "Ġimmensely": 29731, + "Ġcheering": 29732, + "Employ": 29733, + "scene": 29734, + "ishly": 29735, + "atchewan": 29736, + "ĠNicolas": 29737, + "Ġdrained": 29738, + "ĠExit": 29739, + "ĠAzerb": 29740, + "jun": 29741, + "Ġfloated": 29742, + "uania": 29743, + "Deep": 29744, + "Ġsuperv": 29745, + "Ġmystical": 29746, + "ĠDollar": 29747, + "ĠApostle": 29748, + "ĠREL": 29749, + "ĠProvided": 29750, + "ĠBucks": 29751, + "ãĥ´": 29752, + "cutting": 29753, + "Ġenhancements": 29754, + "ĠPenguins": 29755, + "ĠIsaiah": 29756, + "Ġjerk": 29757, + "ĠWyn": 29758, + "Ġstalled": 29759, + "Ġcryptocurrencies": 29760, + "ĠRoland": 29761, + "single": 29762, + "Ġlumin": 29763, + "ĠFellow": 29764, + "ĠCapacity": 29765, + "ĠKazakh": 29766, + "WN": 29767, + "Ġfinanced": 29768, + "389": 29769, + "Ġtid": 29770, + "Ġcollusion": 29771, + "ĠMyr": 29772, + "îĢ": 29773, + "Senator": 29774, + "Ġpediatric": 29775, + "Ġneatly": 29776, + "Ġsandwiches": 29777, + "ĠArchitecture": 29778, + "Ġtucked": 29779, + "Ġbalcony": 29780, + "Ġearthquakes": 29781, + "quire": 29782, + "Future": 29783, + "Ġhefty": 29784, + "éĹ": 29785, + "Ġspecializes": 29786, + "Ġstresses": 29787, + "Ġsender": 29788, + "Ġmisunderstanding": 29789, + "Ġepile": 29790, + "Ġprovoke": 29791, + "ĠColors": 29792, + "Ġdismay": 29793, + "uko": 29794, + "[_": 29795, + "586": 29796, + "neutral": 29797, + "Ġdonating": 29798, + "ĠRandall": 29799, + "Multi": 29800, + "Ġconveniently": 29801, + "ĠSung": 29802, + "ĠCoca": 29803, + "Ġtents": 29804, + "ĠAcceler": 29805, + "Ġpartnered": 29806, + "272": 29807, + "irming": 29808, + "ĠBAS": 29809, + "sometimes": 29810, + "Ġobjected": 29811, + "ubric": 29812, + "posed": 29813, + "LCS": 29814, + "grass": 29815, + "Ġattributable": 29816, + "VIS": 29817, + "Israeli": 29818, + "Ġrepeats": 29819, + "ĠRM": 29820, + "vag": 29821, + "uta": 29822, + "inous": 29823, + "Ġinert": 29824, + "ĠMiguel": 29825, + "æŃ": 29826, + "ĠHawaiian": 29827, + "Board": 29828, + "Ġartific": 29829, + "ĠAzerbai": 29830, + "asio": 29831, + "ĠRent": 29832, + "AIN": 29833, + "Ġappliances": 29834, + "Ġnationality": 29835, + "Ġasshole": 29836, + "ĠNeb": 29837, + "Ġnotch": 29838, + "hani": 29839, + "ĠBride": 29840, + "Availability": 29841, + "Ġintercepted": 29842, + "Ġcontinental": 29843, + "Ġswelling": 29844, + "ĠPerspect": 29845, + "bies": 29846, + ".<": 29847, + "ithmetic": 29848, + "ĠLara": 29849, + "Ġtempting": 29850, + "addr": 29851, + "Ġoverseeing": 29852, + "clad": 29853, + "ĠDV": 29854, + "ĠGingrich": 29855, + "Ġmun": 29856, + "ĠAppropri": 29857, + "Ġalterations": 29858, + "ĠPatreon": 29859, + "Ġhavoc": 29860, + "Ġdisciplines": 29861, + "Ġnotoriously": 29862, + "akuya": 29863, + "ieri": 29864, + "?).": 29865, + "ĠWent": 29866, + "Ġsilicon": 29867, + "Ġtremb": 29868, + "Container": 29869, + "Known": 29870, + "Ġmortar": 29871, + "este": 29872, + "icka": 29873, + "Arthur": 29874, + "ĠPreviously": 29875, + "ĠMarty": 29876, + "Ġsparse": 29877, + "gins": 29878, + "Ġinward": 29879, + "ĠParticipant": 29880, + "Copy": 29881, + "ĠMisc": 29882, + "Ġantibiotic": 29883, + "ĠRetro": 29884, + "Ġelusive": 29885, + "Ġassail": 29886, + "ĠBattalion": 29887, + "ĠBought": 29888, + "Ġdiminish": 29889, + "ĠEuropa": 29890, + "session": 29891, + "ĠDangerous": 29892, + "iesel": 29893, + "Ġdisbelief": 29894, + "Ġblasts": 29895, + "extreme": 29896, + "ĠBoyd": 29897, + "ĠProjects": 29898, + "ĠGuys": 29899, + "Ġundergone": 29900, + "Ġgrill": 29901, + "ĠDwight": 29902, + "Ġ197": 29903, + "USER": 29904, + "Ġfilesystem": 29905, + "Ġclocks": 29906, + "Taylor": 29907, + "Ġwrapper": 29908, + "Ġfolding": 29909, + "ousand": 29910, + "ĠPhilippine": 29911, + "ATIONAL": 29912, + "ĠPerth": 29913, + "Ġashes": 29914, + "Ġaccumulate": 29915, + "ĠGateway": 29916, + "Shop": 29917, + "orkshire": 29918, + "Han": 29919, + "ĠBarrel": 29920, + "ĠLeh": 29921, + "ĠXV": 29922, + "Ġwhim": 29923, + "Ġrepo": 29924, + "ĠCG": 29925, + "ĠMam": 29926, + "Ġincorporating": 29927, + "Ġbailout": 29928, + "Ġlinguistic": 29929, + "Ġdisinteg": 29930, + "CLE": 29931, + "Ġcinematic": 29932, + "ĠFiber": 29933, + "Syn": 29934, + "ilion": 29935, + "ĠCompos": 29936, + "chens": 29937, + "Ġneoc": 29938, + "Ġboiled": 29939, + "FINE": 29940, + "ono": 29941, + "uncle": 29942, + "iken": 29943, + "ĠBM": 29944, + "ι": 29945, + "Ġreceipts": 29946, + "Ġdisposed": 29947, + "ĠThirty": 29948, + "ĠRough": 29949, + "ĠABS": 29950, + "Ġnotwithstanding": 29951, + "ollen": 29952, + "#$": 29953, + "Ġunreliable": 29954, + "Ġbloom": 29955, + "Ġmediocre": 29956, + "Ġtram": 29957, + "ĠTasman": 29958, + "Ġshakes": 29959, + "Ġmanifesto": 29960, + "ĠMW": 29961, + "Ġsatisfactory": 29962, + "Ġshores": 29963, + "Ġcomputation": 29964, + "Ġassertions": 29965, + "ormons": 29966, + "arag": 29967, + "abit": 29968, + "Democrats": 29969, + "ĠLoot": 29970, + "ĠVolks": 29971, + "haired": 29972, + "Ġgravitational": 29973, + "Sing": 29974, + "ĠMiz": 29975, + "Ġthrottle": 29976, + "Ġtyranny": 29977, + "ĠViews": 29978, + "Ġrobber": 29979, + "ĠMinority": 29980, + "Ġshrine": 29981, + "scope": 29982, + "purpose": 29983, + "Ġnucleus": 29984, + "ourcing": 29985, + "ĠUSDA": 29986, + "ĠDHS": 29987, + "wra": 29988, + "ĠBowie": 29989, + "Scale": 29990, + "ĠBEL": 29991, + "xi": 29992, + "Iter": 29993, + "Ġ(),": 29994, + "wright": 29995, + "Ġsailors": 29996, + "oused": 29997, + "NASA": 29998, + "ĠProof": 29999, + "ĠMineral": 30000, + "token": 30001, + "ĠFD": 30002, + "Rew": 30003, + "Ġell": 30004, + "630": 30005, + "Ġchancellor": 30006, + "ĠGos": 30007, + "Ġamounted": 30008, + "ĠRecre": 30009, + "omez": 30010, + "ĠOptim": 30011, + "ĠOlive": 30012, + "Ġtracker": 30013, + "owler": 30014, + "ĠUnique": 30015, + "Root": 30016, + "Ġmaritime": 30017, + "ĠQuran": 30018, + "ĠAdapt": 30019, + "Ġecosystems": 30020, + "ĠRepeat": 30021, + "ĠSoy": 30022, + "ĠIMP": 30023, + "Ġgraduating": 30024, + "andem": 30025, + "Pur": 30026, + "ĠReset": 30027, + "ĠTrick": 30028, + "ĠPhilly": 30029, + "ĠTue": 30030, + "ĠMalaysian": 30031, + "Ġclimax": 30032, + "Ġbury": 30033, + "Ġconspic": 30034, + "ĠSouthampton": 30035, + "ĠFlowers": 30036, + "Ġescorted": 30037, + "ĠEducational": 30038, + "ĠIRC": 30039, + "Ġbrutally": 30040, + "eating": 30041, + "Ġpillar": 30042, + "ĠSang": 30043, + "ĠJude": 30044, + "arling": 30045, + "ĠAmnesty": 30046, + "Ġreminding": 30047, + "ĠAdministrative": 30048, + "hesda": 30049, + "Ġflashed": 30050, + "ĠPBS": 30051, + "perate": 30052, + "feature": 30053, + "Ġswipe": 30054, + "Ġgraves": 30055, + "oultry": 30056, + "261": 30057, + "breaks": 30058, + "ĠGuer": 30059, + "Ġshrimp": 30060, + "ĠVoting": 30061, + "quist": 30062, + "Ġanalytical": 30063, + "Ġtablespoons": 30064, + "ĠSOU": 30065, + "Ġresearched": 30066, + "Ġdisrupted": 30067, + "Ġjour": 30068, + "Ġreplica": 30069, + "Ġcartoons": 30070, + "bians": 30071, + "})": 30072, + "copy": 30073, + "Got": 30074, + "ouched": 30075, + "PUT": 30076, + "Ġswarm": 30077, + "notations": 30078, + "said": 30079, + "Ġrebuilt": 30080, + "Ġcollaborate": 30081, + "Ġraging": 30082, + "Ġnar": 30083, + "Ġdemographics": 30084, + "ĠDDR": 30085, + "Ġdistrust": 30086, + "ossier": 30087, + "ĠKro": 30088, + "Ġpumpkin": 30089, + "Ġregrets": 30090, + "Ġfatalities": 30091, + "ĠLens": 30092, + "ĠOle": 30093, + "pd": 30094, + "Ġpuppet": 30095, + "ĠOutlook": 30096, + "ĠStam": 30097, + "Ol": 30098, + "Fair": 30099, + "UU": 30100, + "Ġrewritten": 30101, + "ı": 30102, + "Ġfascinated": 30103, + "Ġvectors": 30104, + "Ġtribunal": 30105, + "uay": 30106, + "ĠMats": 30107, + "ĠCoins": 30108, + "[[": 30109, + "Ġ181": 30110, + "Ġrenders": 30111, + "ĠKaepernick": 30112, + "Ġespionage": 30113, + "Ġsumm": 30114, + "Ġditch": 30115, + "Account": 30116, + "Ġspreadsheet": 30117, + "Ġmutant": 30118, + "past": 30119, + "407": 30120, + "Ġdye": 30121, + "Ġinitiation": 30122, + "Ġ4000": 30123, + "Ġpunishable": 30124, + "Ġthinner": 30125, + "ĠKhal": 30126, + "Ġintermedi": 30127, + "Dun": 30128, + "ĠGotham": 30129, + "Ġeagerly": 30130, + "Ġvaginal": 30131, + "powers": 30132, + "VW": 30133, + "ĠWATCHED": 30134, + "Ġpredator": 30135, + "amsung": 30136, + "Ġdisparity": 30137, + "Ġ[*": 30138, + "Ġamph": 30139, + "Ġoutskirts": 30140, + "ĠSpirits": 30141, + "Ġskeletal": 30142, + "л": 30143, + "ĠRear": 30144, + "Ġissuance": 30145, + "ĠLogic": 30146, + "released": 30147, + "ZZ": 30148, + "ĠBound": 30149, + "Entry": 30150, + "Ġexits": 30151, + "isol": 30152, + "ĠFounder": 30153, + "Ġwre": 30154, + "ĠGreenland": 30155, + "ĠMMO": 30156, + "taker": 30157, + "INC": 30158, + "ãģ¾": 30159, + "Ġhourly": 30160, + "henko": 30161, + "Ġfantasies": 30162, + "Ġdisob": 30163, + "Ġdemolition": 30164, + "ãĥĭ": 30165, + "Ġenlisted": 30166, + "ratulations": 30167, + "Ġmisguided": 30168, + "Ġensured": 30169, + "Ġdiscouraged": 30170, + "mort": 30171, + "Ġflank": 30172, + "Ġcess": 30173, + "Ġreacts": 30174, + "ĠSere": 30175, + "sensitive": 30176, + "ĠSerpent": 30177, + "assad": 30178, + "Ġ247": 30179, + "Ġcalmly": 30180, + "busters": 30181, + "Ġbleed": 30182, + "ĠStro": 30183, + "Ġamusement": 30184, + "ĠAntarctica": 30185, + "Ġscept": 30186, + "ĠGaw": 30187, + "aq": 30188, + "asonic": 30189, + "Ġsprawling": 30190, + "native": 30191, + "aturated": 30192, + "ĠBattlefield": 30193, + "IVERS": 30194, + "EB": 30195, + "ĠGems": 30196, + "ĠNorthwestern": 30197, + "ĠFilms": 30198, + "ĠAutomatic": 30199, + "Ġapprehend": 30200, + "ãģ¨": 30201, + "ĠguiName": 30202, + "Ġbackend": 30203, + "Ġevidenced": 30204, + "geant": 30205, + "012": 30206, + "ĠSiege": 30207, + "ĠexternalTo": 30208, + "ĠunfocusedRange": 30209, + "ĠguiActiveUnfocused": 30210, + "ĠguiIcon": 30211, + "ĠexternalToEVA": 30212, + "ĠexternalToEVAOnly": 30213, + "Fri": 30214, + "chard": 30215, + "enaries": 30216, + "Ġchiefs": 30217, + "Ġcf": 30218, + "ĠHUD": 30219, + "Ġcorrobor": 30220, + "ĠdB": 30221, + "ĠTaken": 30222, + "ĠPatricia": 30223, + "rail": 30224, + "ĠCharm": 30225, + "ĠLibertarian": 30226, + "rieve": 30227, + "Personal": 30228, + "ĠOUR": 30229, + "geries": 30230, + "Ġdumping": 30231, + "Ġneurological": 30232, + "itimate": 30233, + "ĠClintons": 30234, + "rafted": 30235, + "ĠMolly": 30236, + "Ġterminals": 30237, + "register": 30238, + "Ġflare": 30239, + "Ġencoded": 30240, + "Ġautopsy": 30241, + "pel": 30242, + "machine": 30243, + "Ġexemptions": 30244, + "ĠRoyals": 30245, + "distance": 30246, + "Ġdrafts": 30247, + "Ġlame": 30248, + "ĠCunning": 30249, + "Ġspouses": 30250, + "ĠMarkets": 30251, + "ĠCarrier": 30252, + "Ġimplying": 30253, + "ĠYak": 30254, + "sid": 30255, + "Ġloser": 30256, + "Ġvigilant": 30257, + "Ġimpeachment": 30258, + "Ġaugmented": 30259, + "ĠEmployees": 30260, + "Ġunintended": 30261, + "ternally": 30262, + "ĠWatt": 30263, + "Ġrecognizable": 30264, + "essim": 30265, + "æĿ": 30266, + "Ġcoated": 30267, + "rha": 30268, + "Ġlieutenant": 30269, + "ĠLegislation": 30270, + "published": 30271, + "444": 30272, + "013": 30273, + "Ġideally": 30274, + "ĠPassword": 30275, + "Ġsimplify": 30276, + "ĠMeta": 30277, + "ĠMRI": 30278, + "Ġpleading": 30279, + "organized": 30280, + "handler": 30281, + "Ġunravel": 30282, + "correct": 30283, + "Ġicy": 30284, + "Ġparanoid": 30285, + "Ġpasser": 30286, + "Ġinspections": 30287, + "ofer": 30288, + "ĠHealthcare": 30289, + "283": 30290, + "ĠBrut": 30291, + "iola": 30292, + "forge": 30293, + "ĠMedieval": 30294, + "MSN": 30295, + "ievers": 30296, + "ĠProgramming": 30297, + "åī": 30298, + "Ġ223": 30299, + "mu": 30300, + "ĠCLE": 30301, + "uga": 30302, + "Ġshoppers": 30303, + "Ġinformative": 30304, + "ĠPlans": 30305, + "Ġsupplementation": 30306, + "ĠTests": 30307, + "tyard": 30308, + "ocytes": 30309, + "ĠVega": 30310, + "ĠGujarat": 30311, + "ermanent": 30312, + "Except": 30313, + "ĠLOT": 30314, + "alla": 30315, + "ĠCumm": 30316, + "ĠOsw": 30317, + "Ġvenom": 30318, + "ĠDebt": 30319, + "ĠDOWN": 30320, + "Ġreunion": 30321, + "Ġmuc": 30322, + "ĠRelief": 30323, + "Ġgeop": 30324, + "ĠðŁĺ": 30325, + "alogue": 30326, + "Anth": 30327, + "echo": 30328, + "Ġcorros": 30329, + "Ġreplication": 30330, + "ĠBlazing": 30331, + "ĠDaughter": 30332, + "Ġinflic": 30333, + "ĠLindsey": 30334, + "ÙĪ": 30335, + "284": 30336, + "Exit": 30337, + "Ġgloom": 30338, + "TAIN": 30339, + "Ġundermining": 30340, + "Ġadvising": 30341, + "hidden": 30342, + "Ġoverflow": 30343, + "Ġgor": 30344, + "urdue": 30345, + "Ġechoes": 30346, + "enhagen": 30347, + "Ġimpuls": 30348, + "drug": 30349, + "cash": 30350, + "Ġasync": 30351, + "Ġmirac": 30352, + "atts": 30353, + "punk": 30354, + "Ġpivot": 30355, + "ĠLegislative": 30356, + "Ġbloggers": 30357, + "ĠClaw": 30358, + "sburg": 30359, + "dyl": 30360, + "ĠRecommend": 30361, + "Ġverte": 30362, + "Ġprohibiting": 30363, + "ĠPanther": 30364, + "Jonathan": 30365, + "Ġomin": 30366, + "Ġhateful": 30367, + "281": 30368, + "ĠOrche": 30369, + "ĠMurdoch": 30370, + "downs": 30371, + "Ġasymm": 30372, + "GER": 30373, + "Always": 30374, + "Ġinforms": 30375, + "ĠWM": 30376, + "ĠPony": 30377, + "ĠAppendix": 30378, + "ĠArlington": 30379, + "Jam": 30380, + "Ġmedicinal": 30381, + "ĠSlam": 30382, + "ITIES": 30383, + "Ġreaff": 30384, + "ĠRi": 30385, + "FG": 30386, + "Spring": 30387, + "bool": 30388, + "Ġthighs": 30389, + "Ġmarkings": 30390, + "ĠRaqqa": 30391, + "ĠLak": 30392, + "poll": 30393, + "tsky": 30394, + "ĠMorty": 30395, + "ĠDefinition": 30396, + "Ġdebunk": 30397, + "endered": 30398, + "ĠLeone": 30399, + "avers": 30400, + "Ġmortgages": 30401, + "Apparently": 30402, + "Nic": 30403, + "haus": 30404, + "ĠThousands": 30405, + "auld": 30406, + "Ġmash": 30407, + "shoot": 30408, + "Ġdiarr": 30409, + "Ġconsciously": 30410, + "Hero": 30411, + "eas": 30412, + "ĠNaturally": 30413, + "ĠDestroyer": 30414, + "Ġdashboard": 30415, + "services": 30416, + "Rog": 30417, + "Ġmillennials": 30418, + "Ġinvade": 30419, + "-(": 30420, + "Ġcommissions": 30421, + "ĠAuckland": 30422, + "Ġbroadcasts": 30423, + "Ġfrontal": 30424, + "Ġcrank": 30425, + "ĠHistoric": 30426, + "Ġrumours": 30427, + "CTV": 30428, + "Ġsteril": 30429, + "Ġbooster": 30430, + "rocket": 30431, + "ãĤ¼": 30432, + "utsche": 30433, + "ĠPI": 30434, + "Ġ233": 30435, + "ĠProducer": 30436, + "ĠAnalytics": 30437, + "Ġinvaluable": 30438, + "Ġunintention": 30439, + "ĠCY": 30440, + "Ġscrutin": 30441, + "Ġgigg": 30442, + "Ġengulf": 30443, + "Ġproletariat": 30444, + "Ġhacks": 30445, + "ĠHew": 30446, + "arak": 30447, + "ĠSlime": 30448, + "ielding": 30449, + "agher": 30450, + "ĠElliot": 30451, + "Ġtelecom": 30452, + "Ġ219": 30453, + "ultan": 30454, + "ĠArbor": 30455, + "ĠScouts": 30456, + "Ban": 30457, + "Ġlifespan": 30458, + "Ġblasp": 30459, + "388": 30460, + "Ġjudiciary": 30461, + "ĠContinental": 30462, + "asking": 30463, + "McC": 30464, + "LED": 30465, + "Ġbaggage": 30466, + "ĠSorcerer": 30467, + "Ġremnants": 30468, + "ĠGriffith": 30469, + "etsu": 30470, + "ĠSubaru": 30471, + "ĠPersonality": 30472, + "designed": 30473, + "ushima": 30474, + "agnar": 30475, + "Ġrecoil": 30476, + "Ġpassions": 30477, + "\\\":": 30478, + "Ġtee": 30479, + "Ġabolition": 30480, + "ĠCreating": 30481, + "jac": 30482, + "Ġ194": 30483, + "019": 30484, + "Ġpillars": 30485, + "riched": 30486, + "/\"": 30487, + "tk": 30488, + "Ġlivelihood": 30489, + "Ġroasted": 30490, + "ahon": 30491, + "ĠHutch": 30492, + "assert": 30493, + "Ġdividend": 30494, + "Ġknit": 30495, + "Ġdaunting": 30496, + "Ġdisturbance": 30497, + "Ġshale": 30498, + "Ġcultivated": 30499, + "Ġrefrigerator": 30500, + "LB": 30501, + "ĠNET": 30502, + "Ġcommercials": 30503, + "Ġthinkers": 30504, + "455": 30505, + "Ġchop": 30506, + "Broad": 30507, + "Ġsuspicions": 30508, + "Ġtagged": 30509, + "lifting": 30510, + "Ġstylish": 30511, + "ĠShields": 30512, + "Shortly": 30513, + "Ġtails": 30514, + "Auth": 30515, + "STE": 30516, + "ĠGAME": 30517, + "Ġseism": 30518, + "ĠKis": 30519, + "ologne": 30520, + "Ġcowork": 30521, + "Ġforcibly": 30522, + "Ġthyroid": 30523, + "ĠPB": 30524, + "ANE": 30525, + "married": 30526, + "horse": 30527, + "Ġpolymer": 30528, + "ĠChal": 30529, + "odor": 30530, + "DEBUG": 30531, + "ĠContext": 30532, + "Ġbliss": 30533, + "Ġpinpoint": 30534, + "ĠMathemat": 30535, + "legram": 30536, + "ĠWeekend": 30537, + "Ġlabelled": 30538, + "Ġbart": 30539, + "itles": 30540, + "Ġestrogen": 30541, + "âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ": 30542, + "\"'": 30543, + "Ġvisibly": 30544, + "Ġoutsider": 30545, + "aida": 30546, + "Area": 30547, + "Ġdissemin": 30548, + "Ġdishonest": 30549, + "ĠClosed": 30550, + "ĠBulletin": 30551, + "ĠRamsey": 30552, + "sword": 30553, + "ĠXI": 30554, + "ourced": 30555, + "Same": 30556, + "346": 30557, + "ĠRepe": 30558, + "ĠKou": 30559, + "cake": 30560, + "emis": 30561, + "Cache": 30562, + "ĠMeaning": 30563, + "ĠEnlight": 30564, + "onomy": 30565, + "Ġmanifestation": 30566, + "sworth": 30567, + "Jay": 30568, + "Ġchore": 30569, + "ör": 30570, + "Dream": 30571, + "Ġsanctioned": 30572, + "Ġculturally": 30573, + "ĠAra": 30574, + "Nav": 30575, + "Ġtheological": 30576, + "Ġstrut": 30577, + "ĠVO": 30578, + "ĠHandbook": 30579, + "Ġconstructing": 30580, + "Ġ¶": 30581, + "ĠBenefits": 30582, + "ĠPsychological": 30583, + "sac": 30584, + "å¸": 30585, + "policy": 30586, + "ĠMatters": 30587, + "ĠReported": 30588, + "ĠByte": 30589, + "Ġvitro": 30590, + "ĠMaiden": 30591, + "Ġlam": 30592, + "ĠJennings": 30593, + "Ġgarment": 30594, + "ĠRutgers": 30595, + "ĠStafford": 30596, + "ĠWellington": 30597, + "Ġintermitt": 30598, + "Ġnpm": 30599, + "Ġordeal": 30600, + "Ġplugged": 30601, + "ooming": 30602, + "inished": 30603, + "framework": 30604, + "Ġtimber": 30605, + "Ġcass": 30606, + "Ġ850": 30607, + "iless": 30608, + "ĠRedux": 30609, + "768": 30610, + "Stre": 30611, + "Ġsurpassed": 30612, + "whel": 30613, + "Ġparallels": 30614, + "Ġveil": 30615, + "ĠGI": 30616, + "ĠREST": 30617, + "Ġreadiness": 30618, + "sort": 30619, + "Ġmodifying": 30620, + "ĠSlate": 30621, + "ruff": 30622, + "Ġmarble": 30623, + "Ġinfrared": 30624, + "Ġauditor": 30625, + "ĠFANTASY": 30626, + "ĠPoverty": 30627, + "ĠSPD": 30628, + "Ġ\"(": 30629, + "Ky": 30630, + "RAY": 30631, + "Ġexecutions": 30632, + "ĠBeverly": 30633, + "ĠMarxism": 30634, + "ĠBurst": 30635, + "ĠKali": 30636, + "estones": 30637, + "Clearly": 30638, + "Ell": 30639, + "ãģ§": 30640, + "ĠProceedings": 30641, + "Token": 30642, + "IFIC": 30643, + "ña": 30644, + "Central": 30645, + "ĠHaley": 30646, + "ĠDrama": 30647, + "Ġformations": 30648, + "ORN": 30649, + "Books": 30650, + "Ġdominating": 30651, + "ĠFlyers": 30652, + "ĠCompanion": 30653, + "Ġdisciplined": 30654, + "ĠYugoslav": 30655, + "ĠSpells": 30656, + "Ġvengeance": 30657, + "Ġlandlords": 30658, + "Len": 30659, + "ĠOgre": 30660, + "anoia": 30661, + "Ġpiercing": 30662, + "Ġcongreg": 30663, + "Ġscorer": 30664, + "obia": 30665, + "Ġnickel": 30666, + "ĠLearns": 30667, + "Ġrejo": 30668, + "Ġmasterpiece": 30669, + "Flash": 30670, + "Ġinhabited": 30671, + "ĠOpenGL": 30672, + "ĠDud": 30673, + "ĠICO": 30674, + "Ġarter": 30675, + "Ġplur": 30676, + "Ġmastery": 30677, + "Ġlongstanding": 30678, + "sted": 30679, + "Ġwines": 30680, + "Ġtelevised": 30681, + "ĠShrine": 30682, + "ĠBayern": 30683, + "Ġâĵĺ": 30684, + "Ġenclosure": 30685, + "john": 30686, + "Ġprophets": 30687, + "ĠResurrection": 30688, + "ĠOrders": 30689, + "Ġuneven": 30690, + "rals": 30691, + "Ġdwind": 30692, + "ĠLah": 30693, + "ĠSloven": 30694, + "378": 30695, + "Ġinsistence": 30696, + "affle": 30697, + "ĠClone": 30698, + "Ġhardship": 30699, + "ĠCongressman": 30700, + "Ġplead": 30701, + "Ġreviewers": 30702, + "Ġcured": 30703, + "Ġ1935": 30704, + "asley": 30705, + "fake": 30706, + "ĠThinking": 30707, + "ydia": 30708, + "PART": 30709, + "ĠDota": 30710, + "oit": 30711, + "Ġwhipped": 30712, + "Ġbouncing": 30713, + "ĠHispanics": 30714, + "comings": 30715, + "Ġcannabin": 30716, + "ĠChambers": 30717, + "ĠZack": 30718, + "Optional": 30719, + "Ġcoats": 30720, + "Ġprowess": 30721, + "ĠNorton": 30722, + "Ġplainly": 30723, + "Ġfreight": 30724, + "Ġinhibition": 30725, + "Ġclam": 30726, + "Ġ303": 30727, + "kef": 30728, + "aleigh": 30729, + "Luke": 30730, + "Ġpsycho": 30731, + "atorium": 30732, + "MED": 30733, + "Ġtreaties": 30734, + "Ġindisc": 30735, + "Ġdc": 30736, + "OPS": 30737, + "Ġresilient": 30738, + "ĠInterstate": 30739, + "Ġslack": 30740, + "Ġmundane": 30741, + "Ġestablishes": 30742, + "359": 30743, + "Ġstrained": 30744, + "Ġnond": 30745, + "Sus": 30746, + "Ġcaste": 30747, + "arate": 30748, + "ieving": 30749, + "Ġunfairly": 30750, + "Ġparser": 30751, + "onial": 30752, + "ursive": 30753, + "Via": 30754, + "ĠOtto": 30755, + "ĠAuthorities": 30756, + "stroke": 30757, + "KR": 30758, + "ĠMercy": 30759, + "Ġfurnished": 30760, + "Ġoutset": 30761, + "Ġmetic": 30762, + "1982": 30763, + "olithic": 30764, + "ĠTent": 30765, + "ogical": 30766, + "ĠAircraft": 30767, + "Ġhides": 30768, + "ĠBecame": 30769, + "Ġeducators": 30770, + "reaching": 30771, + "Ġvolatility": 30772, + "Ġtoddler": 30773, + "ĠNASCAR": 30774, + "ĠTwelve": 30775, + "ĠHighlights": 30776, + "Ġgrape": 30777, + "Ġsplits": 30778, + "Ġpeasant": 30779, + "Ġreneg": 30780, + "ĠMSI": 30781, + "Temp": 30782, + "stars": 30783, + "Ġtrek": 30784, + "ĠHyde": 30785, + "binding": 30786, + "Ġrealism": 30787, + "Ġoxide": 30788, + "ĠHos": 30789, + "Ġmounts": 30790, + "Ġbiting": 30791, + "Ġcollapsing": 30792, + "Ġpostal": 30793, + "Ġmuseums": 30794, + "Ġdetached": 30795, + "Ġrespecting": 30796, + "Ġmonopol": 30797, + "Ġworkflow": 30798, + "ĠCake": 30799, + "Template": 30800, + "ĠOrganisation": 30801, + "Ġpersistence": 30802, + "369": 30803, + "Coming": 30804, + "Brad": 30805, + "Ġredundant": 30806, + "ĠGTA": 30807, + "Ġbending": 30808, + "Ġrevoked": 30809, + "Ġoffending": 30810, + "Ġframing": 30811, + "Ġprintf": 30812, + "Commun": 30813, + "members": 30814, + "Outside": 30815, + "Ġconstrued": 30816, + "Ġcoded": 30817, + "FORE": 30818, + "Ġchast": 30819, + "Chat": 30820, + "Indian": 30821, + "ĠYard": 30822, + "?!\"": 30823, + "ĠPorts": 30824, + "ĠXavier": 30825, + "ĠRET": 30826, + "'.\"": 30827, + "ĠBoat": 30828, + "ivated": 30829, + "icht": 30830, + "umerable": 30831, + "Ds": 30832, + "ĠDunn": 30833, + "Ġcoffin": 30834, + "Ġsecurely": 30835, + "ĠRaptors": 30836, + "ĠBes": 30837, + "Installation": 30838, + "Ġinception": 30839, + "ĠHealthy": 30840, + "endants": 30841, + "Ġpsychologists": 30842, + "ĠSheikh": 30843, + "cultural": 30844, + "ĠBlackBerry": 30845, + "shift": 30846, + "Fred": 30847, + "oche": 30848, + "Ġcakes": 30849, + "ĠSEO": 30850, + "ĠGian": 30851, + "ĠAsians": 30852, + "ogging": 30853, + "element": 30854, + "Ġpundits": 30855, + "ĠVaugh": 30856, + "ĠGavin": 30857, + "Ġhitter": 30858, + "Ġdrowned": 30859, + "Ġchalk": 30860, + "ĠZika": 30861, + "Ġmeasles": 30862, + "802": 30863, + "âĢ¦..": 30864, + "ĠAWS": 30865, + "]\"": 30866, + "Ġdistort": 30867, + "ĠMast": 30868, + "Ġantibodies": 30869, + "ĠMash": 30870, + "Memory": 30871, + "ĠUganda": 30872, + "ĠProb": 30873, + "Ġvomiting": 30874, + "ĠTurns": 30875, + "Ġoccupying": 30876, + "Ġevasion": 30877, + "ĠTherapy": 30878, + "Ġpromo": 30879, + "Ġelectr": 30880, + "Ġblueprint": 30881, + "ĠDre": 30882, + "priced": 30883, + "ĠDepot": 30884, + "Ġalleviate": 30885, + "ĠSomali": 30886, + "marg": 30887, + "nine": 30888, + "Ġnostalgia": 30889, + "ĠShepherd": 30890, + "Ġcavalry": 30891, + "Ġtorped": 30892, + "ĠBloody": 30893, + "xb": 30894, + "Ġsank": 30895, + "Ġgoalt": 30896, + "reportprint": 30897, + "embedreportprint": 30898, + "cloneembedreportprint": 30899, + "ĠInitially": 30900, + "ĠFischer": 30901, + "Ġnoteworthy": 30902, + "cern": 30903, + "Ġinefficient": 30904, + "rawdownload": 30905, + "rawdownloadcloneembedreportprint": 30906, + "cation": 30907, + "ĠDynasty": 30908, + "lag": 30909, + "DES": 30910, + "Ġdistinctly": 30911, + "ĠEstonia": 30912, + "Ġopenness": 30913, + "Ġgossip": 30914, + "ruck": 30915, + "Width": 30916, + "ĠIbrahim": 30917, + "Ġpetroleum": 30918, + "Ġavatar": 30919, + "ĠHed": 30920, + "atha": 30921, + "ĠHogwarts": 30922, + "Ġcaves": 30923, + "678": 30924, + "Ġsafeguard": 30925, + "ĠMog": 30926, + "isson": 30927, + "ĠDurham": 30928, + "slaught": 30929, + "ĠGraduate": 30930, + "Ġsubconscious": 30931, + "ĠExcellent": 30932, + "ĠDum": 30933, + "-----": 30934, + "Ġpiles": 30935, + "ĠWORK": 30936, + "ĠGarn": 30937, + "ĠFol": 30938, + "ĠATM": 30939, + "Ġavoids": 30940, + "ĠTul": 30941, + "Ġbleak": 30942, + "ELY": 30943, + "ivist": 30944, + "lightly": 30945, + "Pers": 30946, + "ĠDob": 30947, + "ĠLS": 30948, + "Ġinsanity": 30949, + "ε": 30950, + "atalie": 30951, + "Enlarge": 30952, + "Ġtwists": 30953, + "Ġfaulty": 30954, + "Ġpiracy": 30955, + "Ġimpover": 30956, + "Ġrugged": 30957, + "ĠFashion": 30958, + "Ġsands": 30959, + "'?": 30960, + "swick": 30961, + "Ġnatives": 30962, + "Ġhen": 30963, + "ĠNoise": 30964, + "ãĥĹ": 30965, + "Ġgreens": 30966, + "Ġfreezer": 30967, + "Ġdynasty": 30968, + "ĠFathers": 30969, + "ĠNewark": 30970, + "Ġarchaeological": 30971, + "Ġot": 30972, + "obar": 30973, + "Ġblockade": 30974, + "Ġallerg": 30975, + "LV": 30976, + "Ġdebit": 30977, + "ĠRFC": 30978, + "ĠMilton": 30979, + "ĠPressure": 30980, + "Ġwillingly": 30981, + "Ġdisproportionate": 30982, + "Ġoppressive": 30983, + "Ġdiamonds": 30984, + "Ġbelongings": 30985, + "1970": 30986, + "Ġbells": 30987, + "Ġimperialism": 30988, + "Ġ227": 30989, + "Ġexploding": 30990, + "ĠEclipse": 30991, + "Ġ1919": 30992, + "Ġrant": 30993, + "Ġnominations": 30994, + "347": 30995, + "Ġpeacefully": 30996, + "rica": 30997, + "ĠFUCK": 30998, + "Ġvibration": 30999, + "malink": 31000, + "Ġropes": 31001, + "ĠIvanka": 31002, + "ĠBrewery": 31003, + "ĠBooker": 31004, + "ĠOwens": 31005, + "goers": 31006, + "Services": 31007, + "ĠSnape": 31008, + "Ġ191": 31009, + "395": 31010, + "Ġ299": 31011, + "justice": 31012, + "Ġbri": 31013, + "Ġdiscs": 31014, + "Ġprominently": 31015, + "Ġvulgar": 31016, + "Ġskipping": 31017, + "lves": 31018, + "Ġtsunami": 31019, + "374": 31020, + "ĠUrug": 31021, + "ĠEid": 31022, + "recated": 31023, + "phen": 31024, + "Ġfaults": 31025, + "ĠStarted": 31026, + "950": 31027, + "Ġpi": 31028, + "Ġdetector": 31029, + "Ġbastard": 31030, + "Ġvalidated": 31031, + "SpaceEngineers": 31032, + "OURCE": 31033, + "Ġ(~": 31034, + "Ġunsur": 31035, + "Ġaffirmed": 31036, + "Ġfascism": 31037, + "Ġresolving": 31038, + "ĠChavez": 31039, + "ĠCyn": 31040, + "Ġdetract": 31041, + "Lost": 31042, + "Ġrigged": 31043, + "Ġhomage": 31044, + "ĠBruno": 31045, + "555": 31046, + "eca": 31047, + "Ġpresses": 31048, + "Ġhumour": 31049, + "Ġspacing": 31050, + "Ġ'/": 31051, + "olkien": 31052, + "Coun": 31053, + "OPER": 31054, + "Tre": 31055, + "Son": 31056, + "ĠCambodia": 31057, + "ierre": 31058, + "mong": 31059, + "ozy": 31060, + "Ġliquidity": 31061, + "ĠSoviets": 31062, + "ĠFernando": 31063, + "Ġ229": 31064, + "Ġslug": 31065, + "ĠCatalan": 31066, + "electric": 31067, + "Ġscenery": 31068, + "ĠHearth": 31069, + "Ġconstrained": 31070, + "Ġgoalie": 31071, + "ĠGuidelines": 31072, + "ĠAmmo": 31073, + "ĠPearson": 31074, + "Ġtaxed": 31075, + "Ġfetus": 31076, + "Response": 31077, + "ĠAlexis": 31078, + "thia": 31079, + "Guy": 31080, + "Ġreconstruct": 31081, + "Ġextremes": 31082, + "Ġconcluding": 31083, + "ĠPeg": 31084, + "ooks": 31085, + "Ġdeductions": 31086, + "Rose": 31087, + "Ġgroundbreaking": 31088, + "ĠTarg": 31089, + "ãĥģ": 31090, + "ĠReve": 31091, + "resource": 31092, + "Ġmoons": 31093, + "Ġelectromagnetic": 31094, + "Ġamidst": 31095, + "ĠViktor": 31096, + "NESS": 31097, + "BACK": 31098, + "Ġcommute": 31099, + "ĠAnaheim": 31100, + "Ġfluctuations": 31101, + "640": 31102, + "Ġnoodles": 31103, + "ĠCopenhagen": 31104, + "ĠTide": 31105, + "ĠGrizz": 31106, + "ĠSEE": 31107, + "Ġpipelines": 31108, + "Ġscars": 31109, + "endo": 31110, + "agus": 31111, + "ĠETF": 31112, + "/#": 31113, + "ĠBecome": 31114, + "448": 31115, + "Ġvisc": 31116, + "ĠRecommended": 31117, + "Ġjumper": 31118, + "Ġcognition": 31119, + "Ġassassin": 31120, + "Ġwitnessing": 31121, + "ĠSetup": 31122, + "Ġlac": 31123, + "vim": 31124, + "ISM": 31125, + "pages": 31126, + "SSL": 31127, + "358": 31128, + "Ġadject": 31129, + "industrial": 31130, + "lore": 31131, + "chery": 31132, + "Ġglitter": 31133, + "Ġcalf": 31134, + "Florida": 31135, + "Ġspoilers": 31136, + "Ġsucceeds": 31137, + "Ġchanting": 31138, + "Ġslogans": 31139, + "ĠTracy": 31140, + "Visit": 31141, + "rology": 31142, + "Ġmornings": 31143, + "Ġlineage": 31144, + "Ġsip": 31145, + "Ġintensely": 31146, + "Ġflourish": 31147, + "ĠSleeping": 31148, + "ĠFem": 31149, + "orpor": 31150, + "ĠKlan": 31151, + "ĠDarth": 31152, + "hack": 31153, + "ĠNielsen": 31154, + "Ġtumors": 31155, + "Ġprocurement": 31156, + "ĠYorkshire": 31157, + "Ġraided": 31158, + "KY": 31159, + "Anna": 31160, + "Ġ//[": 31161, + "ĠDisorder": 31162, + "ĠMustang": 31163, + "ĠWen": 31164, + "ĠTrying": 31165, + "sq": 31166, + "Ġdeliveries": 31167, + "Ġshutter": 31168, + "Ġcerebral": 31169, + "Ġbipolar": 31170, + "ĠCN": 31171, + "lass": 31172, + "jet": 31173, + "Ġdebating": 31174, + ">:": 31175, + "Ġeagle": 31176, + "grades": 31177, + "ĠDixon": 31178, + "UGC": 31179, + "MAS": 31180, + "ĠDraco": 31181, + "ĠMachines": 31182, + "affer": 31183, + "Ġeman": 31184, + "²": 31185, + "pron": 31186, + "ĠGym": 31187, + "Ġcomparatively": 31188, + "ĠTribunal": 31189, + "PRO": 31190, + "Ġlex": 31191, + "Ġfertile": 31192, + "Ġdepressing": 31193, + "Ġsuperficial": 31194, + "essential": 31195, + "ĠHunters": 31196, + "gp": 31197, + "Ġprominence": 31198, + "Liber": 31199, + "ĠAncest": 31200, + "otechnology": 31201, + "Ġmocking": 31202, + "ĠTraff": 31203, + "ĸļ": 31204, + "Medium": 31205, + "Iraq": 31206, + "Ġpsychiatrist": 31207, + "Quantity": 31208, + "ĠLect": 31209, + "Ġnoisy": 31210, + "520": 31211, + "GY": 31212, + "Ġslapped": 31213, + "ĠMTV": 31214, + "Ġpara": 31215, + "pull": 31216, + "Multiple": 31217, + "asher": 31218, + "Ġnour": 31219, + "ĠSeg": 31220, + "Spell": 31221, + "vous": 31222, + "ordial": 31223, + "Senior": 31224, + "ĠGoldberg": 31225, + "ĠPlasma": 31226, + "need": 31227, + "Ġmessenger": 31228, + "eret": 31229, + "Ġteamed": 31230, + "Ġliteracy": 31231, + "ĠLeah": 31232, + "ĠDoyle": 31233, + "Ġemitted": 31234, + "UX": 31235, + "Ġevade": 31236, + "Ġmaze": 31237, + "Ġwrongly": 31238, + "ĠLars": 31239, + "Ġstereotype": 31240, + "Ġpledges": 31241, + "Ġaroma": 31242, + "ĠMET": 31243, + "Ġacre": 31244, + "ĠOD": 31245, + "Ġff": 31246, + "Ġbreweries": 31247, + "ĠHilton": 31248, + "undle": 31249, + "ĠKak": 31250, + "ĠThankfully": 31251, + "ĠCanucks": 31252, + "inctions": 31253, + "ĠAppears": 31254, + "Ġcoer": 31255, + "Ġundermined": 31256, + "rovers": 31257, + "Andre": 31258, + "Ġblaze": 31259, + "umers": 31260, + "Ġfamine": 31261, + "amphetamine": 31262, + "ulkan": 31263, + "Amount": 31264, + "Ġdesperation": 31265, + "wikipedia": 31266, + "development": 31267, + "ĠCorinth": 31268, + "ussia": 31269, + "Jackson": 31270, + "LI": 31271, + "Native": 31272, + "Rs": 31273, + "Ohio": 31274, + "ĠKathleen": 31275, + "Fortunately": 31276, + "Ġattendant": 31277, + "ĠPreferred": 31278, + "ĠDidn": 31279, + "ĠVs": 31280, + "Mis": 31281, + "Ġrespondent": 31282, + "Ġboun": 31283, + "stable": 31284, + "Ġpaved": 31285, + "Ġunexpl": 31286, + "ĠCheney": 31287, + "LM": 31288, + "ĠCull": 31289, + "blown": 31290, + "Ġconfronting": 31291, + "ocese": 31292, + "serving": 31293, + "Wi": 31294, + "ĠLithuania": 31295, + "anni": 31296, + "Ġstalk": 31297, + "hd": 31298, + "Ġvener": 31299, + "APH": 31300, + "ynchronous": 31301, + "URR": 31302, + "umably": 31303, + "historic": 31304, + "Half": 31305, + "Hay": 31306, + "Ġresilience": 31307, + "spection": 31308, + "Ġabandoning": 31309, + "Obs": 31310, + "ĠDebbie": 31311, + "Ġgradient": 31312, + "ĠPlaint": 31313, + "ĠCanal": 31314, + "ARCH": 31315, + "Ġexpansive": 31316, + "Ġfung": 31317, + "Ġbounced": 31318, + "Und": 31319, + "Ġprecautions": 31320, + "Ġclarification": 31321, + "Ġdagger": 31322, + "Ġgrips": 31323, + "Ġµ": 31324, + "ĠRivera": 31325, + "ĠUndead": 31326, + "isites": 31327, + "ĠFIRST": 31328, + "ño": 31329, + "audi": 31330, + "Ġhostages": 31331, + "Ġcompliant": 31332, + "Ġalumni": 31333, + "Seven": 31334, + "Ġcybersecurity": 31335, + "either": 31336, + "Collect": 31337, + "Ġinvariably": 31338, + "ĠSoci": 31339, + "Ġlawmaker": 31340, + "Ġale": 31341, + "ĠPersonally": 31342, + "Nazi": 31343, + "Ġcustomization": 31344, + "ĠProc": 31345, + "ĠSaskatchewan": 31346, + "eaturing": 31347, + "Ġspared": 31348, + "Ġdiscontinued": 31349, + "Ġcomputational": 31350, + "ĠMotorola": 31351, + "Ġsupremacist": 31352, + "governmental": 31353, + "Ġparadise": 31354, + "ĠDowning": 31355, + "ĠNikon": 31356, + "Ġcatalyst": 31357, + "berra": 31358, + "Toronto": 31359, + "875": 31360, + "beta": 31361, + "ĠMacron": 31362, + "Ġunrealistic": 31363, + "vector": 31364, + "ĠVehicles": 31365, + "itiveness": 31366, + "ĠRV": 31367, + "ĠColbert": 31368, + "sin": 31369, + "oji": 31370, + "entin": 31371, + "ĠKrish": 31372, + "hello": 31373, + "ffield": 31374, + "oky": 31375, + "ĠTate": 31376, + "Ġmaple": 31377, + "Ġaids": 31378, + "chemical": 31379, + "334": 31380, + "nuts": 31381, + "ĠWarp": 31382, + "Ġxx": 31383, + "ĠRobb": 31384, + "umerous": 31385, + "_-_": 31386, + "ftime": 31387, + "ĠVW": 31388, + "Ġwinger": 31389, + "ĠDome": 31390, + "tools": 31391, + "ĠPV": 31392, + "ĠGeorgetown": 31393, + "Ġgeared": 31394, + "Ġjihadists": 31395, + "Ġcp": 31396, + "Ġsteroids": 31397, + "Mother": 31398, + "clerosis": 31399, + "ĠDRM": 31400, + "nesia": 31401, + "Ġlinger": 31402, + "Ġimmersive": 31403, + "ĠCOUN": 31404, + "Ġoutweigh": 31405, + "ensual": 31406, + "Band": 31407, + "Ġtransforms": 31408, + "matched": 31409, + "psons": 31410, + "ĠJudicial": 31411, + "factor": 31412, + "Ġreferral": 31413, + "Ġoddly": 31414, + "ĠWenger": 31415, + "Bring": 31416, + "ĠBows": 31417, + "602": 31418, + "ICLE": 31419, + "Ġlions": 31420, + "ĠAcademic": 31421, + "ĠThorn": 31422, + "ĠRaider": 31423, + "kefeller": 31424, + "Storage": 31425, + "Lower": 31426, + "ĠOrt": 31427, + "ĠEquality": 31428, + "ALT": 31429, + "ĠSOC": 31430, + "Types": 31431, + "Ġlyn": 31432, + "ĠAsset": 31433, + "coat": 31434, + "TPP": 31435, + "CVE": 31436, + "ĠPioneer": 31437, + "application": 31438, + "Modern": 31439, + "ĠHK": 31440, + "Environment": 31441, + "Alright": 31442, + "Rain": 31443, + "IPP": 31444, + "ĠShiite": 31445, + "Ġmound": 31446, + "ĠAbilities": 31447, + "condition": 31448, + "Staff": 31449, + "Ġcompetence": 31450, + "ĠMoor": 31451, + "ĠDiablo": 31452, + "Ġwithheld": 31453, + "Ġostensibly": 31454, + "ĠBrom": 31455, + "Ġmsg": 31456, + "Ġdenomin": 31457, + "ĠReferences": 31458, + "ĠFP": 31459, + "Ġplunged": 31460, + "Ġpamph": 31461, + "moving": 31462, + "central": 31463, + "Ġdownright": 31464, + "Ġfading": 31465, + "Tal": 31466, + "Typ": 31467, + "ĠThy": 31468, + "ukes": 31469, + "ithe": 31470, + "Ġove": 31471, + "Ġbattled": 31472, + "Ġseafood": 31473, + "Ġfigur": 31474, + "ĠRD": 31475, + "crop": 31476, + "Ġsquads": 31477, + "{\\": 31478, + "à¹": 31479, + "ĠEh": 31480, + "Ġinterviewing": 31481, + "ĠQin": 31482, + "Ġaspiring": 31483, + "PLIC": 31484, + "Ġclauses": 31485, + "ĠGast": 31486, + "ĠNir": 31487, + "Ġluggage": 31488, + "Ġhose": 31489, + "Ġsystemd": 31490, + "Ġdescending": 31491, + "ĠRevised": 31492, + "ĠRails": 31493, + "align": 31494, + "709": 31495, + "337": 31496, + "Ġfug": 31497, + "charging": 31498, + "tags": 31499, + "Ġuter": 31500, + "kish": 31501, + "WARNING": 31502, + "490": 31503, + "profits": 31504, + "Ġvoyage": 31505, + "Ġace": 31506, + "ĠVanguard": 31507, + "ĠTanks": 31508, + "ĠMuk": 31509, + "Ġ226": 31510, + "Safe": 31511, + "Armor": 31512, + "Ġvolcanic": 31513, + "Ġwomb": 31514, + "ĠMIL": 31515, + "Ġbeginner": 31516, + "ĠRecogn": 31517, + "ĠAAP": 31518, + "PLAY": 31519, + ")!": 31520, + "Ġdetecting": 31521, + "cn": 31522, + "Ġbreaches": 31523, + "Basically": 31524, + "ĠPag": 31525, + "ĠMunicipal": 31526, + "ĠIndie": 31527, + "ĠLaf": 31528, + "ĠDisable": 31529, + "ĠOlson": 31530, + "Ġrestrained": 31531, + "Ġrulings": 31532, + "Ġhumane": 31533, + "events": 31534, + "ĠCinema": 31535, + "displayText": 31536, + "ĠHatch": 31537, + "actionDate": 31538, + "onnaissance": 31539, + "Ġassaulting": 31540, + "ĠLug": 31541, + "CHAT": 31542, + "Ġvigorous": 31543, + "ĠPerse": 31544, + "Ġintolerance": 31545, + "ĠSnapchat": 31546, + "ĠSharks": 31547, + "Ġdummy": 31548, + "ĠDiagn": 31549, + "ĠGuitar": 31550, + "imeters": 31551, + "403": 31552, + "REG": 31553, + "Ax": 31554, + "Ġseparates": 31555, + "ĠMahm": 31556, + "Ġtv": 31557, + "jah": 31558, + "OOL": 31559, + "Circ": 31560, + "ĠWindsor": 31561, + "ussian": 31562, + "Ġintuition": 31563, + "Ġdisdain": 31564, + "ĠDonovan": 31565, + "Ġ221": 31566, + "Emb": 31567, + "Ġcondemning": 31568, + "Ġgenerosity": 31569, + "zzy": 31570, + "Ġpanties": 31571, + "ĠPrevent": 31572, + "ActionCode": 31573, + "ANA": 31574, + "342": 31575, + "externalActionCode": 31576, + "Ġspecifying": 31577, + "Ġcrystall": 31578, + "Jere": 31579, + "Ġrupt": 31580, + "ĠApprentice": 31581, + "Ġprofiling": 31582, + "к": 31583, + "Strike": 31584, + "Ġsideline": 31585, + "Ġobligated": 31586, + "Ġoccult": 31587, + "Ġbureaucratic": 31588, + "antically": 31589, + "rupted": 31590, + "negative": 31591, + "ĠEthiopia": 31592, + "ĠCivic": 31593, + "Ġinsiders": 31594, + "eligible": 31595, + "ĠTVs": 31596, + "ĠBAR": 31597, + "ĠTI": 31598, + "iologist": 31599, + "ĠAIR": 31600, + "Ġsubstituted": 31601, + "Arab": 31602, + "ĠSaul": 31603, + "ĠYog": 31604, + "prem": 31605, + "Ġbuilders": 31606, + "Ġstationary": 31607, + "Ġdoubtful": 31608, + "Ġvigorously": 31609, + "Ġthrilling": 31610, + "Physical": 31611, + "ĠCarey": 31612, + "ĠHydra": 31613, + "geoning": 31614, + "ĠSly": 31615, + "yton": 31616, + "Ġborrowers": 31617, + "ĠParkinson": 31618, + "Ġë": 31619, + "ĠJamaica": 31620, + "Ġsatir": 31621, + "Ġinsurgents": 31622, + "ĠFirm": 31623, + "Ġisot": 31624, + "ĠKarn": 31625, + "ourning": 31626, + "akens": 31627, + "docs": 31628, + "little": 31629, + "ĠMonaco": 31630, + "CLASS": 31631, + "Turkey": 31632, + "Ly": 31633, + "ĠConan": 31634, + "assic": 31635, + "Ġstarred": 31636, + "ĠPacers": 31637, + "eties": 31638, + "Ġtipping": 31639, + "Moon": 31640, + "ĠRw": 31641, + "same": 31642, + "Ġcavity": 31643, + "Ġgoof": 31644, + "ĠZo": 31645, + "Shock": 31646, + "ummer": 31647, + "Ġemphasizes": 31648, + "Ġregrett": 31649, + "Ġnovelty": 31650, + "Ġenvy": 31651, + "ĠPassive": 31652, + "rw": 31653, + "505": 31654, + "Ġindifferent": 31655, + "ĠRica": 31656, + "ĠHimself": 31657, + "ĠFreddie": 31658, + "Ġadip": 31659, + "ä¸Ģ": 31660, + "Ġbreakout": 31661, + "Ġhurried": 31662, + "ĠHuang": 31663, + "ĠDisk": 31664, + "Ġroaming": 31665, + "?????-?????-": 31666, + "UV": 31667, + "ĠRicky": 31668, + "ĠSigma": 31669, + "Ġmarginalized": 31670, + "Ġedits": 31671, + "Ġ304": 31672, + "memory": 31673, + "Ġspecimen": 31674, + "293": 31675, + "ãģ¯": 31676, + "Ġvertically": 31677, + "Ġaudition": 31678, + "ĠHeck": 31679, + "Ġcaster": 31680, + "ĠHoldings": 31681, + "adal": 31682, + "ĠCron": 31683, + "ĠLiam": 31684, + "Ġdeflect": 31685, + "Pick": 31686, + "ĠDebug": 31687, + "REF": 31688, + "Ġversatility": 31689, + "othes": 31690, + "classified": 31691, + "ĠMahar": 31692, + "ĠHort": 31693, + "Counter": 31694, + "stasy": 31695, + "noticed": 31696, + "331": 31697, + "ĠShim": 31698, + "fuck": 31699, + "ĠBie": 31700, + "Ġairing": 31701, + "ĠProtein": 31702, + "ĠHolding": 31703, + "Ġspectators": 31704, + "iliated": 31705, + "ĠThatcher": 31706, + "nosis": 31707, + "ãĥ¼ãĥ³": 31708, + "Tele": 31709, + "Boston": 31710, + "ĠTempl": 31711, + "stay": 31712, + "Ġdeclarations": 31713, + "479": 31714, + "Volume": 31715, + "ĠDesigner": 31716, + "ĠOverwatch": 31717, + "idae": 31718, + "Ġonwards": 31719, + "Ġnets": 31720, + "ĠManila": 31721, + "particularly": 31722, + "Ġpolitic": 31723, + "oother": 31724, + "Ġportraits": 31725, + "Ġpavement": 31726, + "cffff": 31727, + "Ġsaints": 31728, + "Ġbeginners": 31729, + "ESPN": 31730, + "Ġshortcomings": 31731, + "âķIJâķIJ": 31732, + "Ġcomet": 31733, + "ĠOrganic": 31734, + "quel": 31735, + "Ġhospitalized": 31736, + "Break": 31737, + "Ġpeel": 31738, + "dylib": 31739, + "aspx": 31740, + "urances": 31741, + "ĠTIM": 31742, + "Pg": 31743, + "Ġreadable": 31744, + "ĠMalik": 31745, + "Ġmuzzle": 31746, + "Ġbenchmarks": 31747, + "dal": 31748, + "ĠVacc": 31749, + "ĠHicks": 31750, + "609": 31751, + "ĠBiblical": 31752, + "heng": 31753, + "Ġoverload": 31754, + "ĠCivilization": 31755, + "Ġimmoral": 31756, + "Ġfries": 31757, + "ãĤĴ": 31758, + "Ġreproduced": 31759, + "Ġformulation": 31760, + "jug": 31761, + "irez": 31762, + "gear": 31763, + "Ġcoached": 31764, + "MpServer": 31765, + "ĠSJ": 31766, + "ĠKw": 31767, + "Init": 31768, + "deal": 31769, + "ĠOro": 31770, + "ĠLoki": 31771, + "ĠSongs": 31772, + "Ġ232": 31773, + "ĠLouise": 31774, + "asionally": 31775, + "Ġuncond": 31776, + "ollywood": 31777, + "Ġprogressives": 31778, + "ĠEnough": 31779, + "ĠDoe": 31780, + "Ġwreckage": 31781, + "Ġbrushed": 31782, + "ĠBaseType": 31783, + "Ġzoning": 31784, + "ishable": 31785, + "hetically": 31786, + "ĠCaucus": 31787, + "ĠHue": 31788, + "Ġkarma": 31789, + "ĠSporting": 31790, + "Ġtrader": 31791, + "Ġseeming": 31792, + "ĠCapture": 31793, + "430": 31794, + "bish": 31795, + "Ġtunes": 31796, + "Ġindoors": 31797, + "ĠSphere": 31798, + "ĠDancing": 31799, + "TERN": 31800, + "Ġnob": 31801, + "ĠGST": 31802, + "maps": 31803, + "Ġpeppers": 31804, + "Fit": 31805, + "Ġoversees": 31806, + "ĠRabbi": 31807, + "ĠRuler": 31808, + "vertising": 31809, + "office": 31810, + "xxx": 31811, + "Ġraft": 31812, + "Changed": 31813, + "Ġtextbooks": 31814, + "Links": 31815, + "ĠOmn": 31816, + "ãĢij": 31817, + "Ġinconvenience": 31818, + "ĠDonetsk": 31819, + "=~": 31820, + "Ġimplicitly": 31821, + "Ġboosts": 31822, + "ĠBones": 31823, + "ĠBoom": 31824, + "Courtesy": 31825, + "Ġsensational": 31826, + "ANY": 31827, + "Ġgreedy": 31828, + "eden": 31829, + "Ġinexper": 31830, + "ĠLer": 31831, + "ĠVale": 31832, + "Ġtighten": 31833, + "ĠEAR": 31834, + "ĠNum": 31835, + "Ġancestor": 31836, + "Sent": 31837, + "ĠHorde": 31838, + "urgical": 31839, + "allah": 31840, + "Ġsap": 31841, + "amba": 31842, + "ĠSpread": 31843, + "twitch": 31844, + "Ġgrandson": 31845, + "Ġfracture": 31846, + "Ġmoderator": 31847, + "ĠSeventh": 31848, + "ĠReverse": 31849, + "Ġestimation": 31850, + "Choose": 31851, + "Ġparach": 31852, + "Ġbarric": 31853, + "ãĢIJ": 31854, + "Ġcompass": 31855, + "Ġallergic": 31856, + "âĢķ": 31857, + "OTHER": 31858, + "errilla": 31859, + "Ġwagon": 31860, + "Ġzinc": 31861, + "Ġrubbed": 31862, + "ĠFuller": 31863, + "ĠLuxembourg": 31864, + "ĠHoover": 31865, + "Ġliar": 31866, + "ĠEvening": 31867, + "ĠCobb": 31868, + "esteem": 31869, + "Ġselector": 31870, + "ĠBrawl": 31871, + "isance": 31872, + "ĠEk": 31873, + "Ġtroop": 31874, + "Ġguts": 31875, + "ĠAppeal": 31876, + "ĠTibetan": 31877, + "Ġroutines": 31878, + "ĠMent": 31879, + "Ġsummarized": 31880, + "steamapps": 31881, + "Ġtranqu": 31882, + "Ġ1929": 31883, + "oran": 31884, + "ĠAuthent": 31885, + "Ġgmaxwell": 31886, + "Ġapprehens": 31887, + "Ġpoems": 31888, + "Ġsausage": 31889, + "ĠWebster": 31890, + "urus": 31891, + "Ġthemed": 31892, + "Ġlounge": 31893, + "Ġcharger": 31894, + "Spoiler": 31895, + "Ġspilled": 31896, + "hog": 31897, + "ĠSunder": 31898, + "ĠAin": 31899, + "ĠAngry": 31900, + "Ġdisqual": 31901, + "ĠFrequency": 31902, + "ĠEthernet": 31903, + "Ġhelper": 31904, + "Percent": 31905, + "Ġhorrifying": 31906, + "Ġail": 31907, + "ĠAllan": 31908, + "EEE": 31909, + "ĠCrossing": 31910, + "449": 31911, + "Ġholog": 31912, + "ĠPuzzles": 31913, + "ĠGoes": 31914, + "erenn": 31915, + "604": 31916, + "ãģı": 31917, + "ĠRafael": 31918, + "Ġatten": 31919, + "ĠEmanuel": 31920, + "Ġupro": 31921, + "ĠSusp": 31922, + "Psych": 31923, + "ĠTrainer": 31924, + "ĠNES": 31925, + "ĠHunts": 31926, + "becue": 31927, + "Ġcounselor": 31928, + "Rule": 31929, + "Ġtoxins": 31930, + "Ġbanners": 31931, + "rifice": 31932, + "Ġgreeting": 31933, + "Ġfrenzy": 31934, + "Ġallocate": 31935, + "Ġ*)": 31936, + "expr": 31937, + "503": 31938, + "ĠChick": 31939, + "ĠTorn": 31940, + "Ġconsolidation": 31941, + "ĠFletcher": 31942, + "switch": 31943, + "frac": 31944, + "clips": 31945, + "ĠMcKin": 31946, + "ĠLunar": 31947, + "Month": 31948, + "ITCH": 31949, + "Ġscholarly": 31950, + "raped": 31951, + "398": 31952, + "Ġ1910": 31953, + "Ġegreg": 31954, + "Ġinsecure": 31955, + "Ġvictorious": 31956, + "cffffcc": 31957, + "Ġsingled": 31958, + "Ġelves": 31959, + "ĠWond": 31960, + "burst": 31961, + "Ġcamoufl": 31962, + "ĠBLACK": 31963, + "Ġconditioned": 31964, + "çī": 31965, + "answered": 31966, + "Ġcompulsory": 31967, + "ascist": 31968, + "Ġpodcasts": 31969, + "ĠFrankfurt": 31970, + "bnb": 31971, + "Ġneoliberal": 31972, + "ĠKeyboard": 31973, + "ĠBelle": 31974, + "warm": 31975, + "Ġtrusts": 31976, + "Ġinsured": 31977, + "ĠBucc": 31978, + "usable": 31979, + "607": 31980, + "ĠPlains": 31981, + "Ġ1890": 31982, + "Ġsabotage": 31983, + "Ġlodged": 31984, + "felt": 31985, + "Ġga": 31986, + "ĠNarc": 31987, + "ĠSalem": 31988, + "Ġseventy": 31989, + "ĠBlank": 31990, + "pocket": 31991, + "Ġwhisper": 31992, + "Ġmating": 31993, + "omics": 31994, + "ĠSalman": 31995, + "ĠKad": 31996, + "Ġangered": 31997, + "Ġcollisions": 31998, + "Ġextraordinarily": 31999, + "Ġcoercion": 32000, + "Ghost": 32001, + "birds": 32002, + "èĢ": 32003, + "kok": 32004, + "Ġpermissible": 32005, + "avorable": 32006, + "Ġpointers": 32007, + "Ġdissip": 32008, + "aci": 32009, + "Ġtheatrical": 32010, + "ĠCosmic": 32011, + "Ġforgetting": 32012, + "Ġfinalized": 32013, + "大": 32014, + "yout": 32015, + "library": 32016, + "Ġbooming": 32017, + "ĠBelieve": 32018, + "ĠTeacher": 32019, + "ĠLiv": 32020, + "ĠGOODMAN": 32021, + "ĠDominican": 32022, + "ORED": 32023, + "ĠParties": 32024, + "Ġprecipitation": 32025, + "ĠSlot": 32026, + "Roy": 32027, + "ĠCombined": 32028, + "Ġintegrating": 32029, + "Ġchrome": 32030, + "Ġintestinal": 32031, + "ĠRebell": 32032, + "Ġmatchups": 32033, + "Ġblockbuster": 32034, + "ĠLoren": 32035, + "ĠLevy": 32036, + "Ġpreaching": 32037, + "ĠSending": 32038, + "ĠPurpose": 32039, + "rax": 32040, + "fif": 32041, + "Ġauthoritative": 32042, + "ĠPET": 32043, + "astical": 32044, + "Ġdishon": 32045, + "Ġchatting": 32046, + "Ġ\"$:/": 32047, + "Connection": 32048, + "Ġrecreate": 32049, + "Ġdelinqu": 32050, + "Ġbroth": 32051, + "ĠDirty": 32052, + "ĠAdmin": 32053, + "zman": 32054, + "Ġscholarships": 32055, + "Ġ253": 32056, + "contact": 32057, + "alsa": 32058, + "767": 32059, + "creen": 32060, + "abbage": 32061, + "Ġ1915": 32062, + "Ġblended": 32063, + "Ġalarmed": 32064, + "Language": 32065, + "356": 32066, + "Ġblends": 32067, + "ĠChanged": 32068, + "Wolf": 32069, + "Ġhepat": 32070, + "Creating": 32071, + "Ġpersecut": 32072, + "Ġsweetness": 32073, + "arte": 32074, + "Ġforfeiture": 32075, + "ĠRoberto": 32076, + "impro": 32077, + "NFL": 32078, + "ĠMagnet": 32079, + "Detailed": 32080, + "Ġinsignificant": 32081, + "ĠPOLIT": 32082, + "ĠBBQ": 32083, + "ĠCPS": 32084, + "Ġseaw": 32085, + "aminer": 32086, + "mL": 32087, + "endif": 32088, + "finals": 32089, + "Ġ265": 32090, + "uish": 32091, + "Ġ})": 32092, + "ĠProblems": 32093, + "Ġemblem": 32094, + "Ġseriousness": 32095, + "Ġparsing": 32096, + "Ġsubstitution": 32097, + "Ġpressured": 32098, + "Ġrecycled": 32099, + "aleb": 32100, + "Ruby": 32101, + "Ġproficiency": 32102, + "Driver": 32103, + "ĠWester": 32104, + ":'": 32105, + "AFTA": 32106, + "Ġmantle": 32107, + "ĠClayton": 32108, + "flag": 32109, + "Ġpractitioner": 32110, + "covered": 32111, + "ĠStruct": 32112, + "addafi": 32113, + "425": 32114, + "ĠTownship": 32115, + "ĠHydro": 32116, + "Louis": 32117, + "343": 32118, + "Ġcondo": 32119, + "ĠTao": 32120, + "Ġutilization": 32121, + "Ġnausea": 32122, + "ĠDems": 32123, + "ridges": 32124, + "pause": 32125, + "Ġformulas": 32126, + "Ġchallenger": 32127, + "376": 32128, + "Ġdefective": 32129, + "ĠRailway": 32130, + "ĠPubMed": 32131, + "Ġyogurt": 32132, + "lbs": 32133, + "ĠNorfolk": 32134, + "OPE": 32135, + "ĠMoody": 32136, + "Ġdistributor": 32137, + "Ġscrolls": 32138, + "Ġextracts": 32139, + "Stan": 32140, + "Ġviability": 32141, + "Ġexposes": 32142, + "Ġstarvation": 32143, + "ĠSteps": 32144, + "ĠDodd": 32145, + "few": 32146, + "STD": 32147, + "332": 32148, + "Ġclosures": 32149, + "Ġcomplementary": 32150, + "ĠSasha": 32151, + "umpy": 32152, + "Ġmonet": 32153, + "Ġarticulate": 32154, + "ĠDoct": 32155, + "killer": 32156, + "Ġscrim": 32157, + "Ġ264": 32158, + "Ġprostitutes": 32159, + "Ġsevered": 32160, + "Ġattachments": 32161, + "Ġcooled": 32162, + "Lev": 32163, + "ĠFalk": 32164, + "fail": 32165, + "Ġpoliceman": 32166, + "ĠDag": 32167, + "Ġprayed": 32168, + "ĠKernel": 32169, + "Ġclut": 32170, + "Ġcath": 32171, + "Ġanomaly": 32172, + "Storm": 32173, + "emaker": 32174, + "ĠBreakfast": 32175, + "uli": 32176, + "oire": 32177, + "JJ": 32178, + "hz": 32179, + "Operation": 32180, + "ĠSick": 32181, + "354": 32182, + "ĠGuatemala": 32183, + "Rate": 32184, + "Ġexposures": 32185, + "faces": 32186, + "ĠArchae": 32187, + "raf": 32188, + "ĠMia": 32189, + "Ġ2025": 32190, + "Ġopaque": 32191, + "Ġdisguised": 32192, + "ĠHeadquarters": 32193, + "Sah": 32194, + "Ġpots": 32195, + "978": 32196, + "ĠMalf": 32197, + "Ġfrowned": 32198, + "Ġpoisonous": 32199, + "ĠConvers": 32200, + "eeks": 32201, + "Ġcrab": 32202, + ".\"\"": 32203, + "Ġtreason": 32204, + "Ġranc": 32205, + "Ġescalating": 32206, + "Ġwarr": 32207, + "Ġmobs": 32208, + "Ġlamps": 32209, + "ĠSunshine": 32210, + "ĠBrunswick": 32211, + "Phones": 32212, + "Ġspelled": 32213, + "ĠSkip": 32214, + "Ġ2050": 32215, + "Ġ1911": 32216, + "ĠPluto": 32217, + "ĠAmend": 32218, + "Ġmeats": 32219, + "387": 32220, + "Ġstomp": 32221, + "ĠZhou": 32222, + "ĠLeviathan": 32223, + "ĠHazard": 32224, + "adv": 32225, + "ĠOrwell": 32226, + "Ġaloud": 32227, + "Ġbumper": 32228, + "ĠAnarch": 32229, + "ubuntu": 32230, + "ĠSerious": 32231, + "fitting": 32232, + "ĠOptional": 32233, + "ĠCecil": 32234, + "REAM": 32235, + "Ġserotonin": 32236, + "Ġcultivate": 32237, + "agogue": 32238, + "}\\": 32239, + "Ġmosques": 32240, + "ĠSunny": 32241, + "Ġreactive": 32242, + "revolution": 32243, + "ĠLup": 32244, + "ĠFedora": 32245, + "Ġdefenseman": 32246, + "ĠVID": 32247, + "istine": 32248, + "Ġdrowning": 32249, + "ĠBroadcasting": 32250, + "Ġthriller": 32251, + "ĠScy": 32252, + "Ġaccelerating": 32253, + "Ġdirects": 32254, + "odied": 32255, + "bike": 32256, + "duration": 32257, + "Ġpainfully": 32258, + "Redd": 32259, + "Ġproductions": 32260, + "Ġgag": 32261, + "Ġwhist": 32262, + "Ġsock": 32263, + "Ġinfinitely": 32264, + "ĠConcern": 32265, + "ĠCitadel": 32266, + "Ġlieu": 32267, + "Ġcandles": 32268, + "ogeneous": 32269, + "arger": 32270, + "Ġheavenly": 32271, + "inflammatory": 32272, + "Performance": 32273, + "Cs": 32274, + "ructose": 32275, + "azaki": 32276, + "Ġpessim": 32277, + "Ġinference": 32278, + "Ġpowd": 32279, + "ĠZoe": 32280, + "Ġpaints": 32281, + "Ġdazz": 32282, + "pta": 32283, + "-----------": 32284, + "Ġinspir": 32285, + "ĠExperimental": 32286, + "ĠKnife": 32287, + "regor": 32288, + "bors": 32289, + "Ġshowers": 32290, + "romeda": 32291, + "Ġsaint": 32292, + "Ġbenign": 32293, + "ĠJiang": 32294, + "Ġenvisioned": 32295, + "Ġshroud": 32296, + "IFT": 32297, + "HO": 32298, + "Ġshuff": 32299, + "ĠICC": 32300, + "Ġsegreg": 32301, + "Ġrevisit": 32302, + "ighthouse": 32303, + "Li": 32304, + "Ġsubstrate": 32305, + "ĠSeas": 32306, + "ĠReward": 32307, + "ĠHep": 32308, + "ĠBrass": 32309, + "sbm": 32310, + "Ġeliminates": 32311, + "Ġstamina": 32312, + "ĠVAT": 32313, + "ĠLoan": 32314, + "Ġconstraint": 32315, + "Ġappropriated": 32316, + "Ġpes": 32317, + "ĠALE": 32318, + "ranging": 32319, + "Ġ404": 32320, + "392": 32321, + "Ġintellectuals": 32322, + "achu": 32323, + "Ġrestructuring": 32324, + "ĠLevin": 32325, + "Ġrunes": 32326, + "Ġdelightful": 32327, + "Ġcarbohydrates": 32328, + "ĠModels": 32329, + "ĠExpo": 32330, + "Ġtransporting": 32331, + "alloc": 32332, + "Ġringing": 32333, + "Samsung": 32334, + "Ġscarcely": 32335, + "ĠURLs": 32336, + "ĠMAS": 32337, + "Ġprototypes": 32338, + "Ġnarrator": 32339, + "ĠCPUs": 32340, + "cdn": 32341, + "ĠBarton": 32342, + "Ġdecidedly": 32343, + "ĠShu": 32344, + "ixir": 32345, + "ocious": 32346, + "ĠMyst": 32347, + "Nintendo": 32348, + "Ġreuse": 32349, + "Ġforgiven": 32350, + "Few": 32351, + "inical": 32352, + "nat": 32353, + "Ġseamless": 32354, + "ĠEva": 32355, + "ĠEVE": 32356, + "ĠJO": 32357, + "landers": 32358, + "Ġsofter": 32359, + "negie": 32360, + "Ġtransient": 32361, + "Ġorbital": 32362, + "Ġfulfil": 32363, + "ĠKom": 32364, + "Hopefully": 32365, + "Ġdynamically": 32366, + "ĠHunger": 32367, + "åĽ": 32368, + "ĠArmenia": 32369, + "elman": 32370, + "berto": 32371, + "Ġpige": 32372, + "ĠIDs": 32373, + "limit": 32374, + "Ġveins": 32375, + "Ġsoaring": 32376, + "packs": 32377, + "Golden": 32378, + "ĠCrab": 32379, + "istor": 32380, + "ĠRPM": 32381, + "Ġ$$": 32382, + "gression": 32383, + "Ġjihadist": 32384, + "Ġgamble": 32385, + "Ġcareg": 32386, + "Ġinflated": 32387, + "Face": 32388, + "ĠFirearms": 32389, + "ĠEmmanuel": 32390, + "âĿ": 32391, + "Ġshocks": 32392, + "grab": 32393, + "Ġsplend": 32394, + "ĠHPV": 32395, + "abortion": 32396, + "Above": 32397, + "Entity": 32398, + "players": 32399, + "Ġcommenced": 32400, + "ulence": 32401, + "Ġfulfillment": 32402, + "Ġembodiments": 32403, + "ĠWelfare": 32404, + "Ġhail": 32405, + "Ġ<@": 32406, + "tten": 32407, + "Ġcatcher": 32408, + "ĠJazeera": 32409, + "Ġvolcano": 32410, + "Ġstabilize": 32411, + "ĠHandler": 32412, + "Ġintensified": 32413, + "ĠAbrams": 32414, + "Ġhumiliation": 32415, + "paced": 32416, + "605": 32417, + "ĠCentOS": 32418, + "Specific": 32419, + "Ġheed": 32420, + "ĠCAM": 32421, + "ĠGalile": 32422, + "Die": 32423, + "Ġabolished": 32424, + "ĠThomson": 32425, + "ĠTeachers": 32426, + "ĠWass": 32427, + "jong": 32428, + "ĠISBN": 32429, + "ĠAllies": 32430, + "shake": 32431, + "å·": 32432, + "vict": 32433, + "Howard": 32434, + "Ġdeem": 32435, + "Ġexceedingly": 32436, + "ĠSmartstocks": 32437, + "ibe": 32438, + "Ġdoorway": 32439, + "Ġcompeted": 32440, + "igmat": 32441, + "Ġnationalists": 32442, + "Ġgroom": 32443, + "ĠKeen": 32444, + "Ġdisposable": 32445, + "decl": 32446, + "ĠTolkien": 32447, + "ĠScheme": 32448, + "Ġbiod": 32449, + "Ġavid": 32450, + "ĠElon": 32451, + "agar": 32452, + "ĠTSA": 32453, + "Roman": 32454, + "Ġartificially": 32455, + "Ġadvisors": 32456, + "XL": 32457, + "ĠInferno": 32458, + "366": 32459, + "Ġtedious": 32460, + "ĠPhotography": 32461, + "ĠCarrie": 32462, + "Ġtrope": 32463, + "ĠSandra": 32464, + "Ġdecimal": 32465, + "Queen": 32466, + "ĠGundam": 32467, + "ĠOM": 32468, + "otech": 32469, + "NBA": 32470, + "Ġ1932": 32471, + "Ġentrenched": 32472, + "ĠMarion": 32473, + "Ġfraternity": 32474, + "Labour": 32475, + "Henry": 32476, + "Ġlatitude": 32477, + "Either": 32478, + "Ġenhances": 32479, + "ĠPotential": 32480, + "Ġshines": 32481, + "idad": 32482, + "Ġbreadth": 32483, + "Ġcapacities": 32484, + "ĠðŁĻĤ": 32485, + "ĠBronx": 32486, + "Ġsexes": 32487, + "Ġdifferentiation": 32488, + "Ġheavyweight": 32489, + "ĠTaj": 32490, + "dra": 32491, + "Ġmigrate": 32492, + "Ġexhaustion": 32493, + "ĠRUN": 32494, + "elsius": 32495, + "ĠCuomo": 32496, + "Ġguitars": 32497, + "Ġclones": 32498, + "ĠSomew": 32499, + "ĠPry": 32500, + "-------------": 32501, + "Ġwarranted": 32502, + "cycles": 32503, + "Ġsalvage": 32504, + "Ġdisks": 32505, + "RANT": 32506, + "ĠNGOs": 32507, + "ĠMartian": 32508, + "\":[{\"": 32509, + "Ġaddicts": 32510, + "ojure": 32511, + "illet": 32512, + "Ġamazingly": 32513, + "artments": 32514, + "pixel": 32515, + "ĠGPUs": 32516, + "Layout": 32517, + "è£": 32518, + "ĠTamil": 32519, + "ĠBasil": 32520, + "Ġimpartial": 32521, + "ĠStructure": 32522, + "fork": 32523, + "bryce": 32524, + "Ġridge": 32525, + "ĠHamburg": 32526, + "rious": 32527, + "Ġblitz": 32528, + "cigarettes": 32529, + "Ġcanned": 32530, + "402": 32531, + "Ġironically": 32532, + "Ġcompassionate": 32533, + "ĠHawkins": 32534, + ".#": 32535, + "ĠCathedral": 32536, + "Ġrallied": 32537, + "internal": 32538, + "Ġquota": 32539, + "stakes": 32540, + "TEXT": 32541, + "mom": 32542, + "Ġcompletes": 32543, + "Ġ238": 32544, + "Ġshrug": 32545, + "ãĥij": 32546, + "ĠNinth": 32547, + "Ġrevise": 32548, + "ĠProvider": 32549, + "Ġtreacher": 32550, + "Ġquasi": 32551, + "ĠPRES": 32552, + "Ġdeposition": 32553, + "Ġconfidentiality": 32554, + "issors": 32555, + "Ġimbalance": 32556, + "Ġspanning": 32557, + "Ġangular": 32558, + "ĠCul": 32559, + "communication": 32560, + "ĠNora": 32561, + "ĠGenius": 32562, + "opter": 32563, + "Ġsacked": 32564, + "Spot": 32565, + "Ġfinely": 32566, + "ĠCHR": 32567, + "282": 32568, + "waves": 32569, + "Palest": 32570, + "ĠRohing": 32571, + "NL": 32572, + "è¿": 32573, + "Ġshitty": 32574, + "ĠScalia": 32575, + "475": 32576, + "Progress": 32577, + "Ġreferencing": 32578, + "Ġclassrooms": 32579, + "abee": 32580, + "Ġsod": 32581, + "hesion": 32582, + "708": 32583, + "ĠZuckerberg": 32584, + "ĠFinish": 32585, + "ĠScotia": 32586, + "ĠSavior": 32587, + "ĠInstallation": 32588, + "antha": 32589, + "(-": 32590, + "Ġ302": 32591, + "ĠPunk": 32592, + "Ġcrater": 32593, + "youtu": 32594, + "Ġroast": 32595, + "Ġinfluencing": 32596, + "Ġdup": 32597, + "ĠJR": 32598, + "ĠGrav": 32599, + "Ġstature": 32600, + "Ġbathrooms": 32601, + "Aside": 32602, + "Wiki": 32603, + "mean": 32604, + "ĠZak": 32605, + "ĠOnes": 32606, + "ĠNath": 32607, + "Ġhypert": 32608, + "Ġcommencement": 32609, + "Civil": 32610, + "Ġmoderately": 32611, + "Ġdistributors": 32612, + "Ġbreastfeeding": 32613, + "Ġ980": 32614, + "ĠSik": 32615, + "ĠCig": 32616, + "ĠAMER": 32617, + "RIP": 32618, + "ĠCareer": 32619, + "usting": 32620, + "Ġmessed": 32621, + "Ġeh": 32622, + "ĠJensen": 32623, + "/$": 32624, + "Ġblackmail": 32625, + "Ġconversions": 32626, + "Ġscientifically": 32627, + "Ġmantra": 32628, + "paying": 32629, + "Ġivory": 32630, + "ĠCourts": 32631, + "OUGH": 32632, + "auntlet": 32633, + "Serial": 32634, + "Brow": 32635, + "ĠHundreds": 32636, + "323": 32637, + "Ġpee": 32638, + "Ġlinux": 32639, + "Ġsubmer": 32640, + "ĠPrincipal": 32641, + "485": 32642, + "ĠDSL": 32643, + "ĠCousins": 32644, + "Ġdoctrines": 32645, + "ĠAthletics": 32646, + "Ġ315": 32647, + "ĠKarma": 32648, + "Ġattent": 32649, + "urger": 32650, + "Ġprescribe": 32651, + "Ġencaps": 32652, + "ĠCame": 32653, + "Ġsecretive": 32654, + "ĠCrimes": 32655, + "dn": 32656, + "Clean": 32657, + "ĠEgyptians": 32658, + "ĠCarpenter": 32659, + "Ġll": 32660, + "Hum": 32661, + "ĠMilo": 32662, + "Ġcapitalists": 32663, + "Ġbriefed": 32664, + "Twe": 32665, + "ĠBasin": 32666, + "elvet": 32667, + "Mos": 32668, + "Ġplunge": 32669, + "ĠKaiser": 32670, + "ĠFuj": 32671, + "illin": 32672, + "Ġsafeguards": 32673, + "Ġoste": 32674, + "ĠOpportunity": 32675, + "ĠMafia": 32676, + "ĠCalling": 32677, + "apa": 32678, + "urban": 32679, + "brush": 32680, + "illard": 32681, + "cé": 32682, + "intelligence": 32683, + "ĠLob": 32684, + "ĠDruid": 32685, + "Ġsmoother": 32686, + "Ġfooting": 32687, + "Ġmotorists": 32688, + "arcity": 32689, + "Ġmasculinity": 32690, + "Ġmism": 32691, + "Ġabdominal": 32692, + "ĠTavern": 32693, + "ĠRoh": 32694, + "Ġescapes": 32695, + "signed": 32696, + "Anthony": 32697, + "Ġsacrificing": 32698, + "Ġintimacy": 32699, + "Ġanterior": 32700, + "ĠKod": 32701, + "Ġmotif": 32702, + "Ġgraz": 32703, + "Ġvisualization": 32704, + "Ġguitarist": 32705, + "ĠTrotsky": 32706, + "magic": 32707, + "Dar": 32708, + "ĠMori": 32709, + "Ġwards": 32710, + "Ġtoilets": 32711, + "lest": 32712, + "Ġteleport": 32713, + "ĠSundays": 32714, + "ĠPlat": 32715, + "ETS": 32716, + "ĠeSports": 32717, + "Patrick": 32718, + "ĠKatherine": 32719, + "enko": 32720, + "Ġhassle": 32721, + "ĠMick": 32722, + "ggles": 32723, + "Ġhob": 32724, + "aintain": 32725, + "Ġairborne": 32726, + "Ġspans": 32727, + "Ġchili": 32728, + "Ġaperture": 32729, + "Ġvolunteered": 32730, + "ĠIncident": 32731, + "ĠFres": 32732, + "ĠVeteran": 32733, + "aughtered": 32734, + "ingo": 32735, + "Ġuninsured": 32736, + "CLOSE": 32737, + "Ġfuse": 32738, + "Ġerotic": 32739, + "Ġadvertise": 32740, + "raising": 32741, + "Texture": 32742, + "Ġattends": 32743, + "ĠREAL": 32744, + "uddled": 32745, + "Ġsmoot": 32746, + "Ġ305": 32747, + "ĠWillis": 32748, + "Ġblond": 32749, + "Analysis": 32750, + "ĠVT": 32751, + "onica": 32752, + "Ġstronghold": 32753, + "RF": 32754, + "NM": 32755, + ".>>": 32756, + "Ġprosperous": 32757, + "Ġboasted": 32758, + "292": 32759, + "ĠManufacturing": 32760, + "PRESS": 32761, + "gren": 32762, + "Ġpharmacy": 32763, + "ĠRockefeller": 32764, + "kai": 32765, + "Ġthumbs": 32766, + "ĠHut": 32767, + "Ġmotherboard": 32768, + "Ġguardians": 32769, + "ĠAlter": 32770, + "llular": 32771, + "Ġshack": 32772, + "Ġwisely": 32773, + "Ġbackbone": 32774, + "erva": 32775, + "Ġsuicides": 32776, + "ĠMcGregor": 32777, + "ijah": 32778, + "Emer": 32779, + "ĠBrav": 32780, + "Ġdesignate": 32781, + "POST": 32782, + "produced": 32783, + "Ġcleansing": 32784, + "irlwind": 32785, + "existent": 32786, + "ĠHumph": 32787, + "ĠPayne": 32788, + "Ġvested": 32789, + "Å¡": 32790, + "Ġstringent": 32791, + "iona": 32792, + "Ġunsub": 32793, + "Ġsummed": 32794, + "ĠHercules": 32795, + "subject": 32796, + "ĠRagnar": 32797, + "ĠNos": 32798, + "Ġcharacterization": 32799, + "Ġsavvy": 32800, + "ĠDawson": 32801, + "ĠCasino": 32802, + "Ġfri": 32803, + "ĠBarrier": 32804, + "Ġmisinformation": 32805, + "Ġinsulation": 32806, + "Ġcorridors": 32807, + "Ġairplanes": 32808, + "ĠNoct": 32809, + "ahi": 32810, + "Ġ1916": 32811, + "kb": 32812, + "armac": 32813, + "Ġshun": 32814, + "Ġschema": 32815, + "Ġhorrified": 32816, + "Ġ239": 32817, + "aunders": 32818, + "NB": 32819, + "iates": 32820, + "erity": 32821, + "ĠShard": 32822, + "Ġrarity": 32823, + "Ġgrouped": 32824, + "ĠGhana": 32825, + "against": 32826, + "ĠBiological": 32827, + "ĠAware": 32828, + "owell": 32829, + "ÏĦ": 32830, + "ĠBeau": 32831, + "shaw": 32832, + "Hack": 32833, + "ĠJulius": 32834, + "USS": 32835, + "olson": 32836, + "auna": 32837, + "cru": 32838, + "ĠMaurice": 32839, + "ĠIk": 32840, + "Ġsequencing": 32841, + "Ġradicals": 32842, + "Ġ(?,": 32843, + "virtual": 32844, + "Ġanyways": 32845, + "Ġreperc": 32846, + "Ġhandlers": 32847, + "Ġhesitant": 32848, + "éĥ": 32849, + "ĠMF": 32850, + "plementation": 32851, + "associated": 32852, + "Ġcampaigned": 32853, + "ĠYue": 32854, + "utations": 32855, + "ĠYoga": 32856, + "Ġsimmer": 32857, + "Ġrods": 32858, + "Ġmelody": 32859, + "Ġconvoy": 32860, + "videos": 32861, + "Ġscreened": 32862, + "Neg": 32863, + "ochemical": 32864, + "Ġ())": 32865, + "Ġultras": 32866, + "Ġantip": 32867, + "ĠIslanders": 32868, + "704": 32869, + "Ġfetish": 32870, + "Ġridiculously": 32871, + "ĠKart": 32872, + "Ġmitochondrial": 32873, + "Ġinterfering": 32874, + "Builder": 32875, + "Ġoverfl": 32876, + "Ġacne": 32877, + "ĠMud": 32878, + "ĠKerr": 32879, + "flex": 32880, + "ĠPostal": 32881, + "ĠBaltic": 32882, + "477": 32883, + "ĠPersons": 32884, + "ourage": 32885, + "HB": 32886, + "ĠMuse": 32887, + "ĠImmortal": 32888, + "ĠDriving": 32889, + "Ġpetitions": 32890, + "Ġsubscript": 32891, + "Ġsorce": 32892, + "ĠProcessor": 32893, + "uton": 32894, + "Sony": 32895, + "Ġphon": 32896, + "Ġraced": 32897, + "ĠAnthrop": 32898, + "Ġdaytime": 32899, + "ĠExercise": 32900, + "Adding": 32901, + "Ġengages": 32902, + "ĠQualcomm": 32903, + "Ġmiracles": 32904, + "Ġmemes": 32905, + "ĠDrink": 32906, + "ĠOrioles": 32907, + "Ġhairs": 32908, + "ĠPolar": 32909, + "athom": 32910, + "Ġslippery": 32911, + "ĠRemy": 32912, + "Ġcaramel": 32913, + "ĠYEAR": 32914, + "Ġalk": 32915, + "Ign": 32916, + "aution": 32917, + "ĠMerlin": 32918, + "ĠCran": 32919, + "Ġapologies": 32920, + "Ġ410": 32921, + "Ġouting": 32922, + "ĠMemories": 32923, + "appointed": 32924, + "Ġcountered": 32925, + "uld": 32926, + "posing": 32927, + "Ġfirewall": 32928, + "ĠWast": 32929, + "ĠWet": 32930, + "worked": 32931, + "seller": 32932, + "Ġrepealed": 32933, + "ereo": 32934, + "assuming": 32935, + "BLIC": 32936, + "mite": 32937, + "ĠCEOs": 32938, + "ĠChapel": 32939, + "elligent": 32940, + "________________________": 32941, + "Dog": 32942, + "Ġwart": 32943, + "Ġsubscriber": 32944, + "sports": 32945, + "Ġbegged": 32946, + "ĠMV": 32947, + "Ġsemif": 32948, + "ethical": 32949, + "Ġpreach": 32950, + "Ġrevital": 32951, + "Ġpunitive": 32952, + "Ġshortcuts": 32953, + "Ġinstituted": 32954, + "ĠWarsaw": 32955, + "Ġabdomen": 32956, + "ĠKING": 32957, + "Ġsuperintendent": 32958, + "Ġfry": 32959, + "ĠGeo": 32960, + "TOR": 32961, + "Ġcontradictions": 32962, + "aptic": 32963, + "Ġlandscapes": 32964, + "bugs": 32965, + "Ġclust": 32966, + "Ġvolley": 32967, + "cribed": 32968, + "Ġtandem": 32969, + "Ġrobes": 32970, + "WHAT": 32971, + "Ġpromoter": 32972, + "Ġeloqu": 32973, + "reviewed": 32974, + "ĠDK": 32975, + "ĠPlato": 32976, + "Ġfps": 32977, + "Tank": 32978, + "ĠDerrick": 32979, + "Ġprioritize": 32980, + "asper": 32981, + "ĠHonduras": 32982, + "ĠCompleted": 32983, + "nec": 32984, + "Ġmog": 32985, + "nir": 32986, + "ĠMayo": 32987, + "DEF": 32988, + "stall": 32989, + "inness": 32990, + "ĠVolkswagen": 32991, + "Ġprecaution": 32992, + "ĠMell": 32993, + "iak": 32994, + "istries": 32995, + "Ġ248": 32996, + "Ġoverlapping": 32997, + "Senate": 32998, + "ĠEnhance": 32999, + "resy": 33000, + "racial": 33001, + "ORTS": 33002, + "ĠMormons": 33003, + "Strong": 33004, + "ĠCoch": 33005, + "Mexico": 33006, + "ĠMaduro": 33007, + "Ġjars": 33008, + "Ġcane": 33009, + "Wik": 33010, + "olla": 33011, + "ifference": 33012, + "Ġphysicist": 33013, + "ĠMaggie": 33014, + "Ġ285": 33015, + "Ġdepiction": 33016, + "ĠMcLaren": 33017, + "Ju": 33018, + "Ġslows": 33019, + "Ġcommissioners": 33020, + "ĠWillow": 33021, + "ĠExplos": 33022, + "hovah": 33023, + "Ġtechnician": 33024, + "Ġhomicides": 33025, + "ĠFlav": 33026, + "ĠTruman": 33027, + "Ġ10000": 33028, + "uctor": 33029, + "Ġshader": 33030, + "Newsletter": 33031, + "457": 33032, + "Ġrever": 33033, + "Ġhardened": 33034, + "Ġwhereabouts": 33035, + "Ġredevelop": 33036, + "Ġcarbs": 33037, + "Ġtravers": 33038, + "Ġsquirrel": 33039, + "Ġfollower": 33040, + "Ġsings": 33041, + "508": 33042, + "Ġrabbits": 33043, + "emonium": 33044, + "Ġdocumenting": 33045, + "Ġmisunderstood": 33046, + ")'": 33047, + "Rick": 33048, + "ggies": 33049, + "Ġpremie": 33050, + "Ġskating": 33051, + "Ġpassports": 33052, + "Ġfists": 33053, + "ageddon": 33054, + "Haw": 33055, + "ACP": 33056, + "080": 33057, + "ĠThoughts": 33058, + "ĠCarlson": 33059, + "Ġpriesthood": 33060, + "hua": 33061, + "Ġdungeons": 33062, + "ĠLoans": 33063, + "Ġantis": 33064, + "Ġfamiliarity": 33065, + "ĠSabb": 33066, + "opal": 33067, + "ĠInk": 33068, + "strike": 33069, + "Ġcram": 33070, + "Ġlegalized": 33071, + "Ġcuisine": 33072, + "Ġfibre": 33073, + "Travel": 33074, + "ĠMonument": 33075, + "ODY": 33076, + "ethy": 33077, + "Ġinterstate": 33078, + "ĠPUR": 33079, + "emporary": 33080, + "ĠArabian": 33081, + "developed": 33082, + "Ġsaddle": 33083, + "Ġgithub": 33084, + "ĠOffer": 33085, + "ĠISP": 33086, + "rolet": 33087, + "ĠSUPER": 33088, + "ĠDenis": 33089, + "Ġmultiplier": 33090, + "Ġstirred": 33091, + "Interestingly": 33092, + "Ġcustomary": 33093, + "Ġbilled": 33094, + "hex": 33095, + "Ġmultiplied": 33096, + "Ġflipping": 33097, + "ĠCrosby": 33098, + "Ġfundamentals": 33099, + "iae": 33100, + "ĠPlayed": 33101, + "ĠAtom": 33102, + "amazon": 33103, + "ĠFlam": 33104, + "eez": 33105, + "activated": 33106, + "Ġtablespoon": 33107, + "Ġliberalism": 33108, + "ĠPalin": 33109, + "ĠPatel": 33110, + "Num": 33111, + "ĠTAM": 33112, + "Ġsurn": 33113, + "ĠReloaded": 33114, + "Ġcoined": 33115, + "\"],": 33116, + "ĠClash": 33117, + "ĠAgu": 33118, + "Ġpragmatic": 33119, + "ĠActivate": 33120, + "Ġ802": 33121, + "Ġtrailers": 33122, + "Ġsilhou": 33123, + "Ġprobes": 33124, + "Ġcircus": 33125, + "ĠBain": 33126, + "ĠLindsay": 33127, + "ĠAbbey": 33128, + "Delivery": 33129, + "Ġconcession": 33130, + "Ġgastro": 33131, + "ĠSprite": 33132, + "ÄŁ": 33133, + "andel": 33134, + "Ġgimm": 33135, + "Ġautobi": 33136, + "ĠTurtle": 33137, + "Ġwonderfully": 33138, + "ĠHaram": 33139, + "ĠWorldwide": 33140, + "ĠHandle": 33141, + "Ġtheorists": 33142, + "Ġsleek": 33143, + "ĠZhu": 33144, + "ographically": 33145, + "EGA": 33146, + "ĠOwners": 33147, + "aths": 33148, + "ĠAntarctic": 33149, + "natal": 33150, + "=\"\"": 33151, + "flags": 33152, + "````": 33153, + "Ġsul": 33154, + "Kh": 33155, + "Ġpotassium": 33156, + "Ġlineman": 33157, + "Ġcereal": 33158, + "ĠSeasons": 33159, + "Ġ2022": 33160, + "Ġmathematic": 33161, + "Ġastronomers": 33162, + "professional": 33163, + "Ġfares": 33164, + "cknowled": 33165, + "Ġchi": 33166, + "Ġyoungsters": 33167, + "Ġmistakenly": 33168, + "Ġhemisphere": 33169, + "ĠDivinity": 33170, + "rone": 33171, + "Ġ\",": 33172, + "rings": 33173, + "Ġattracts": 33174, + "vana": 33175, + "å¹": 33176, + "CAP": 33177, + "Ġplaylist": 33178, + "Ġporch": 33179, + "ãģ£": 33180, + "Ġincorporates": 33181, + "Ġsoak": 33182, + "Ġasserting": 33183, + "ĠTerrorism": 33184, + "ĠPablo": 33185, + "Ja": 33186, + "cester": 33187, + "Ġfearing": 33188, + "ĠPrayer": 33189, + "Ġescalated": 33190, + "GW": 33191, + "Ġrobe": 33192, + "ĠBrighton": 33193, + "acists": 33194, + "ĠSymphony": 33195, + "ĠDwarf": 33196, + "ĠParade": 33197, + "ĠLego": 33198, + "Ġinexpl": 33199, + "Ġlords": 33200, + "leaf": 33201, + "RAG": 33202, + "liber": 33203, + "Ġcigars": 33204, + "ĠJehovah": 33205, + "606": 33206, + "WINDOWS": 33207, + "ĠLiberia": 33208, + "ebus": 33209, + "Heavy": 33210, + "Ġlubric": 33211, + "ĠRW": 33212, + "anguages": 33213, + "Ġnarrowed": 33214, + "computer": 33215, + "ĠEmber": 33216, + "Ġmurdering": 33217, + "Ġdownstream": 33218, + "ĠTuls": 33219, + "ĠTables": 33220, + "Topic": 33221, + "ĠAccuracy": 33222, + "=/": 33223, + "lost": 33224, + "ĠRei": 33225, + "Ġprogresses": 33226, + "bear": 33227, + "Ġestablishments": 33228, + "Justin": 33229, + "ĠPeach": 33230, + "ĠGomez": 33231, + "å¿": 33232, + "ĠTriangle": 33233, + "Ident": 33234, + "ĠHive": 33235, + "Resources": 33236, + "Ġmixes": 33237, + "ĠAssuming": 33238, + "Mu": 33239, + "Ġhypoc": 33240, + "Ġsane": 33241, + "ĠWan": 33242, + "idious": 33243, + "Success": 33244, + "Ġio": 33245, + "Angel": 33246, + "Ġdangerously": 33247, + "ĠCreature": 33248, + "WORK": 33249, + ":[": 33250, + "ĠKatrina": 33251, + "Listener": 33252, + "Miller": 33253, + "ĠIdlib": 33254, + "hang": 33255, + "Ġcircumvent": 33256, + "href": 33257, + "Ġcelestial": 33258, + "ĠWeeks": 33259, + "ĠPug": 33260, + "ĠDalton": 33261, + "Ġsubpoena": 33262, + "uku": 33263, + "Ġpersisted": 33264, + "pei": 33265, + "olding": 33266, + "ĠDocuments": 33267, + "ĠHast": 33268, + "ĠCENT": 33269, + "Ġprimer": 33270, + "Ġsynonymous": 33271, + "Ġnib": 33272, + "ombs": 33273, + "Ġnotation": 33274, + "ĠDish": 33275, + "ĠAtmosp": 33276, + "Ġforbid": 33277, + "ĠANG": 33278, + "pattern": 33279, + "los": 33280, + "Ġprojectiles": 33281, + "brown": 33282, + ".\",": 33283, + "ĠVenom": 33284, + "Ġfiercely": 33285, + "ublished": 33286, + "ĠUran": 33287, + "ĠNicarag": 33288, + "410": 33289, + "ĠCAL": 33290, + "OTOS": 33291, + "ĠMiracle": 33292, + "ĠEnchant": 33293, + "Ġguarding": 33294, + "append": 33295, + "Attach": 33296, + "Ġleveled": 33297, + "Ġcondoms": 33298, + "ihilation": 33299, + "649": 33300, + "Ġnightmares": 33301, + "ĠTHEY": 33302, + "ĠSTART": 33303, + "ĠKinn": 33304, + "Ġroommate": 33305, + "Ġhygiene": 33306, + "opping": 33307, + "Job": 33308, + "Ġlvl": 33309, + "ĠVER": 33310, + "ĠKeeping": 33311, + "abetic": 33312, + "Ġformatting": 33313, + "erala": 33314, + "Ġrevisions": 33315, + "Ġresurg": 33316, + "Tel": 33317, + "ĠGoodman": 33318, + "353": 33319, + "pod": 33320, + "Ġindisp": 33321, + "ĠTranslation": 33322, + "Ġgown": 33323, + "ĠMund": 33324, + "Ġcis": 33325, + "Ġbystand": 33326, + "collect": 33327, + "ĠPunjab": 33328, + "actively": 33329, + "ĠGamb": 33330, + "tell": 33331, + "Ġimporting": 33332, + "gencies": 33333, + "Ġlocom": 33334, + "ĠBrill": 33335, + "Holy": 33336, + "ĠBerger": 33337, + "Ġshowdown": 33338, + "Ġresponders": 33339, + "ILY": 33340, + "Ġtakedown": 33341, + "leted": 33342, + "Ġmattered": 33343, + "Ġpredictive": 33344, + "Ġoverlay": 33345, + "GPU": 33346, + "ĠVick": 33347, + "Ġconveyed": 33348, + "Tab": 33349, + "peer": 33350, + "Scan": 33351, + "Ġdefensively": 33352, + "vae": 33353, + "Ġapproving": 33354, + "Ġtiers": 33355, + "ĠVia": 33356, + "querade": 33357, + "ĠSaudis": 33358, + "Ġdemolished": 33359, + "ĠProphe": 33360, + "Ġmono": 33361, + "Ġhospitality": 33362, + "HAM": 33363, + "ĠAriel": 33364, + "MOD": 33365, + "ĠTorah": 33366, + "Ġblah": 33367, + "ĠBelarus": 33368, + "erential": 33369, + "ĠTuc": 33370, + "Ġbanker": 33371, + "397": 33372, + "Ġmosquit": 33373, + "ĠScientist": 33374, + "ĠMusical": 33375, + "Ġhust": 33376, + "Shift": 33377, + "Ġtorment": 33378, + "Ġstandoff": 33379, + "Educ": 33380, + "ĠFog": 33381, + "Ġamplifier": 33382, + "Shape": 33383, + "Instance": 33384, + "ĠCritics": 33385, + "Ġdaemon": 33386, + "Houston": 33387, + "Ġmattress": 33388, + "ĠIDF": 33389, + "Ġobscene": 33390, + "ĠAmer": 33391, + "hetti": 33392, + "Ġcompiling": 33393, + "352": 33394, + "verett": 33395, + "ĠReduction": 33396, + "istration": 33397, + "ĠBlessed": 33398, + "ĠBachelor": 33399, + "316": 33400, + "Ġprank": 33401, + "ĠVulcan": 33402, + "dding": 33403, + "Ġmourning": 33404, + "ĠQuint": 33405, + "ĠBlaster": 33406, + "testing": 33407, + "Ġsediment": 33408, + ">>>": 33409, + "ĠEternity": 33410, + "ĠWHERE": 33411, + "ĠMaze": 33412, + "Ġreacting": 33413, + "ĠAlv": 33414, + "omsday": 33415, + "ĠCRA": 33416, + "Ġtranslator": 33417, + "Ġbogus": 33418, + "atu": 33419, + "Website": 33420, + "olls": 33421, + "Ġbaptism": 33422, + "Ġsibling": 33423, + "ĠAutumn": 33424, + "vez": 33425, + "ãģ®é": 33426, + "guards": 33427, + "Georg": 33428, + "assadors": 33429, + "ĠFreud": 33430, + "Ġcontinents": 33431, + "ĠRegistry": 33432, + "Bernie": 33433, + "ĸļ士": 33434, + "Ġtolerant": 33435, + "ĠUW": 33436, + "Ġhorribly": 33437, + "995": 33438, + "ĠMIDI": 33439, + "Ġimpatient": 33440, + "ocado": 33441, + "eri": 33442, + "ĠWorst": 33443, + "ĠNorris": 33444, + "ĠTalking": 33445, + "Ġdefends": 33446, + "ensable": 33447, + "Ġ2021": 33448, + "Ġanatomy": 33449, + "Lew": 33450, + "Ġdrawer": 33451, + "ĠCanberra": 33452, + "Ġpatriotic": 33453, + "é¾įåĸļ士": 33454, + "ĠAvg": 33455, + "ARM": 33456, + "Ġundisclosed": 33457, + "Ġfarewell": 33458, + "459": 33459, + "bable": 33460, + "ĠAllison": 33461, + "OLOG": 33462, + "Ġconco": 33463, + "tight": 33464, + "ĠACPI": 33465, + "ĠMines": 33466, + "lich": 33467, + "ĠâĶľ": 33468, + "represented": 33469, + "200000": 33470, + "Ġenthusiast": 33471, + "OTS": 33472, + "bil": 33473, + "ĠIngredients": 33474, + "Ġinventor": 33475, + "ĠMySQL": 33476, + "³³³": 33477, + "ĠABOUT": 33478, + "within": 33479, + "Ġmk": 33480, + "Bul": 33481, + "ĠFake": 33482, + "Ġdraconian": 33483, + "Wa": 33484, + "helm": 33485, + "ĠTerran": 33486, + "erville": 33487, + "Ġcommonplace": 33488, + "SIZE": 33489, + "Ġ\"<": 33490, + "replace": 33491, + "ographs": 33492, + "ĠSELECT": 33493, + "incible": 33494, + "ĠMostly": 33495, + "ĠSheffield": 33496, + "ĠIDE": 33497, + "uggle": 33498, + "Ġcitations": 33499, + "hurst": 33500, + "ĠUnix": 33501, + "Ġunleash": 33502, + "ĠPiper": 33503, + "ĠNano": 33504, + "Ġsuccumb": 33505, + "Ġreluctance": 33506, + "Ġ2500": 33507, + "ĠMerchant": 33508, + "Ġwiret": 33509, + "Ġcombos": 33510, + "ĠBirthday": 33511, + "Ġcharcoal": 33512, + "ĠUPS": 33513, + "ĠFairfax": 33514, + "Ġdriveway": 33515, + "ĠTek": 33516, + "ĠPitch": 33517, + "overe": 33518, + "Ġtechnicians": 33519, + "ĠActual": 33520, + "flation": 33521, + "ĠFiscal": 33522, + "ĠEmpty": 33523, + "anamo": 33524, + "Ġmagnesium": 33525, + "Ġslut": 33526, + "Ġgrowers": 33527, + "Investigators": 33528, + "():": 33529, + "ĠSatellite": 33530, + "ĠKeynes": 33531, + "missive": 33532, + "lane": 33533, + "Ġborough": 33534, + "344": 33535, + "ĠTEAM": 33536, + "ĠBethesda": 33537, + "CV": 33538, + "hower": 33539, + "ĠRAD": 33540, + "Ġchant": 33541, + "ĠRiy": 33542, + "Ġcompositions": 33543, + "Ġmildly": 33544, + "Ġmeddling": 33545, + "Ġagility": 33546, + "aneers": 33547, + "501": 33548, + "Ġsynth": 33549, + "linger": 33550, + "291": 33551, + "Ġexclaimed": 33552, + "Party": 33553, + "Ġcontamin": 33554, + "ĠManor": 33555, + "ĠRespond": 33556, + "Ġpraising": 33557, + "Ġmanners": 33558, + "fleet": 33559, + "Summer": 33560, + "ĠLynd": 33561, + "ĠDefinitely": 33562, + "grim": 33563, + "Ġbowling": 33564, + "stri": 33565, + "çĽ": 33566, + "ynt": 33567, + "Ġmandates": 33568, + "DIV": 33569, + "Ġreconcile": 33570, + "views": 33571, + "ĠDamon": 33572, + "vette": 33573, + "Flo": 33574, + "ĠGreatest": 33575, + "ilon": 33576, + "icia": 33577, + "Ġportrayal": 33578, + "Ġcushion": 33579, + "504": 33580, + "1979": 33581, + "ossal": 33582, + "Applic": 33583, + "scription": 33584, + "Ġmitigation": 33585, + "ATS": 33586, + "pac": 33587, + "Ġerased": 33588, + "Ġdeficiencies": 33589, + "ĠHollande": 33590, + "ĠXu": 33591, + "Ġbred": 33592, + "Ġpregnancies": 33593, + "femin": 33594, + "Ġemph": 33595, + "Ġplanners": 33596, + "Ġoutper": 33597, + "uttering": 33598, + "Ġperpetrator": 33599, + "Ġmotto": 33600, + "ĠEllison": 33601, + "ĠNEVER": 33602, + "Ġadmittedly": 33603, + "ARI": 33604, + "ĠAzerbaijan": 33605, + "Ġmillisec": 33606, + "Ġcombustion": 33607, + "ĠBottle": 33608, + "ĠLund": 33609, + "ĠPs": 33610, + "ĠDress": 33611, + "Ġfabricated": 33612, + "Ġbattered": 33613, + "Ġsidel": 33614, + "ĠNotting": 33615, + "Foreign": 33616, + "ĠJerome": 33617, + "020": 33618, + "ĠArbit": 33619, + "Ġknots": 33620, + "ĠRIGHT": 33621, + "Moving": 33622, + "ãģĻ": 33623, + "Ġsurgeries": 33624, + "Ġcourthouse": 33625, + "Ġmastered": 33626, + "Ġhovering": 33627, + "ĠBran": 33628, + "ĠAlison": 33629, + "Ġsafest": 33630, + "military": 33631, + "Ġbullied": 33632, + "Ġbarrage": 33633, + "Reader": 33634, + "ESE": 33635, + "ĠGeographic": 33636, + "Tools": 33637, + "314": 33638, + "ĠGeek": 33639, + "roth": 33640, + "glers": 33641, + "ĠFIN": 33642, + "Ïģ": 33643, + "ĠAston": 33644, + "altern": 33645, + "488": 33646, + "Ġveterin": 33647, + "Gamer": 33648, + "Ġintel": 33649, + "renches": 33650, + "Shield": 33651, + "Ġamnesty": 33652, + "ĠBhar": 33653, + "Ġpiled": 33654, + "Ġhonorable": 33655, + "ĠInstitutes": 33656, + "Ġsoaked": 33657, + "Ġcoma": 33658, + "ĠEFF": 33659, + "341": 33660, + "bytes": 33661, + "ĠGmail": 33662, + "lein": 33663, + "ĠCanadiens": 33664, + "material": 33665, + "Il": 33666, + "Ġinstructors": 33667, + "ĠKY": 33668, + "Ġconceive": 33669, + "ubb": 33670, + "ĠPossible": 33671, + "Ġeasing": 33672, + "ĠChristina": 33673, + "Ġcaric": 33674, + "ĠHDR": 33675, + "ROM": 33676, + "Ġshovel": 33677, + "delete": 33678, + "Ġpuff": 33679, + "ĠChanging": 33680, + "Ġseamlessly": 33681, + "Attribute": 33682, + "Ġacquisitions": 33683, + "akery": 33684, + "ĠEF": 33685, + "Ġautistic": 33686, + "ĠTakes": 33687, + "ĠPowder": 33688, + "ĠStir": 33689, + "510": 33690, + "ĠBubble": 33691, + "settings": 33692, + "ĠFowler": 33693, + "Ġmustard": 33694, + "Ġmoreover": 33695, + "Ġcopyrighted": 33696, + "ĠLEDs": 33697, + "1500": 33698, + "æī": 33699, + "ĠHIS": 33700, + "enf": 33701, + "Ġcustod": 33702, + "ĠHuck": 33703, + "Gi": 33704, + "Ġimg": 33705, + "Answer": 33706, + "Ct": 33707, + "jay": 33708, + "ĠInfrastructure": 33709, + "Ġfederally": 33710, + "Loc": 33711, + "Ġmicrobes": 33712, + "Ġoverrun": 33713, + "dds": 33714, + "otent": 33715, + "adiator": 33716, + ">>>>>>>>": 33717, + "Ġtornado": 33718, + "Ġadjud": 33719, + "Ġintrigued": 33720, + "Ġsi": 33721, + "ĠRevelation": 33722, + "progress": 33723, + "Ġburglary": 33724, + "ĠSaiyan": 33725, + "ĠKathy": 33726, + "Ġserpent": 33727, + "ĠAndreas": 33728, + "Ġcompel": 33729, + "essler": 33730, + "ĠPlastic": 33731, + "ĠAdvent": 33732, + "ĠPositive": 33733, + "ĠQt": 33734, + "ĠHindus": 33735, + "registered": 33736, + "ularity": 33737, + "Ġrighteousness": 33738, + "Ġdemonic": 33739, + "uitive": 33740, + "ĠBDS": 33741, + "ĠGregg": 33742, + "cia": 33743, + "ĠCrusade": 33744, + "ĠSinai": 33745, + "WARE": 33746, + "+(": 33747, + "Ġmell": 33748, + "Ġderail": 33749, + "yards": 33750, + "Ast": 33751, + "Ġnoticeably": 33752, + "ĠOber": 33753, + "Ram": 33754, + "Ġunnoticed": 33755, + "Ġseq": 33756, + "avage": 33757, + "Ts": 33758, + "Ġ640": 33759, + "Ġconcede": 33760, + "Ġ])": 33761, + "Fill": 33762, + "Ġcaptivity": 33763, + "ĠImprovement": 33764, + "ĠCrusader": 33765, + "araoh": 33766, + "MAP": 33767, + "æĹ": 33768, + "Ġstride": 33769, + "always": 33770, + "Fly": 33771, + "Nit": 33772, + "Ġalgae": 33773, + "ĠCooking": 33774, + "ĠDoors": 33775, + "Malley": 33776, + "Ġpolicemen": 33777, + "ãģį": 33778, + "Ġastronaut": 33779, + "accessible": 33780, + "495": 33781, + "ĠRAW": 33782, + "cliffe": 33783, + "udicrous": 33784, + "Ġdepended": 33785, + "alach": 33786, + "Ġventures": 33787, + "rake": 33788, + "Ġtits": 33789, + "ĠHou": 33790, + "Ġcondom": 33791, + "ormonal": 33792, + "Ġindent": 33793, + "Ġuploading": 33794, + "Footnote": 33795, + "Important": 33796, + "Ġ271": 33797, + "Ġmindful": 33798, + "Ġcontends": 33799, + "Cra": 33800, + "Ġcalibr": 33801, + "ĠOECD": 33802, + "plugin": 33803, + "Fat": 33804, + "ĠISS": 33805, + "ĠDynamics": 33806, + "ansen": 33807, + "686": 33808, + "'),": 33809, + "Ġsprite": 33810, + "Ġhandheld": 33811, + "ĠHipp": 33812, + "=~=~": 33813, + "Trust": 33814, + "Ġsemantics": 33815, + "ĠBundes": 33816, + "ĠReno": 33817, + "ĠLiterature": 33818, + "sense": 33819, + "Gary": 33820, + "ĠAeg": 33821, + "ĠTrin": 33822, + "EEK": 33823, + "Ġcleric": 33824, + "ĠSSH": 33825, + "Ġchrist": 33826, + "Ġinvading": 33827, + "ibu": 33828, + "Ġenum": 33829, + "aura": 33830, + "Ġallege": 33831, + "ĠIncredible": 33832, + "BBC": 33833, + "Ġthru": 33834, + "Ġsailed": 33835, + "Ġemulate": 33836, + "Ġinsecurity": 33837, + "Ġcrou": 33838, + "Ġaccommodations": 33839, + "Ġincompetent": 33840, + "Ġslips": 33841, + "ĠEarthqu": 33842, + "sama": 33843, + "ILLE": 33844, + "ĠiPhones": 33845, + "asaki": 33846, + "Ġbye": 33847, + "Ġard": 33848, + "Ġextras": 33849, + "Ġslaughtered": 33850, + "Ġcrowdfunding": 33851, + "resso": 33852, + "Ġfilib": 33853, + "ĠERROR": 33854, + "ĠTLS": 33855, + "egg": 33856, + "ĠItal": 33857, + "Ġenlist": 33858, + "ĠCatalonia": 33859, + "ĠScots": 33860, + "Ġsergeant": 33861, + "Ġdissolve": 33862, + "NH": 33863, + "Ġstandings": 33864, + "rique": 33865, + "IQ": 33866, + "Ġbeneficiary": 33867, + "Ġaquarium": 33868, + "YouTube": 33869, + "ĠPowerShell": 33870, + "Ġbrightest": 33871, + "ĠWarrant": 33872, + "Sold": 33873, + "Writing": 33874, + "Ġbeginnings": 33875, + "ĠReserved": 33876, + "ĠLatinos": 33877, + "heading": 33878, + "Ġ440": 33879, + "Ġrooftop": 33880, + "ATING": 33881, + "Ġ390": 33882, + "VPN": 33883, + "Gs": 33884, + "kernel": 33885, + "turned": 33886, + "Ġpreferable": 33887, + "Ġturnovers": 33888, + "ĠHels": 33889, + "Sa": 33890, + "ĠShinji": 33891, + "veh": 33892, + "ĠMODULE": 33893, + "Viol": 33894, + "Ġexiting": 33895, + "Ġjab": 33896, + "ĠVanilla": 33897, + "Ġacron": 33898, + "ĠGap": 33899, + "bern": 33900, + "Ak": 33901, + "ĠMcGu": 33902, + "Ġendlessly": 33903, + "ĠFarage": 33904, + "ĠNoel": 33905, + "Va": 33906, + "MK": 33907, + "Ġbrute": 33908, + "ĠKru": 33909, + "ĠESV": 33910, + "ĠOlivia": 33911, + "âĢł": 33912, + "ĠKaf": 33913, + "Ġtrusting": 33914, + "Ġhots": 33915, + "324": 33916, + "Ġmalaria": 33917, + "Ġjson": 33918, + "Ġpounding": 33919, + "ortment": 33920, + "Country": 33921, + "Ġpostponed": 33922, + "Ġunequiv": 33923, + "?),": 33924, + "ĠRooney": 33925, + "udding": 33926, + "ĠLeap": 33927, + "urrence": 33928, + "shapeshifter": 33929, + "ĠHAS": 33930, + "osate": 33931, + "Ġcavern": 33932, + "Ġconservatism": 33933, + "ĠBAD": 33934, + "Ġmileage": 33935, + "Ġarresting": 33936, + "Vaults": 33937, + "Ġmixer": 33938, + "Democratic": 33939, + "ĠBenson": 33940, + "Ġauthored": 33941, + "8000": 33942, + "Ġproactive": 33943, + "ĠSpiritual": 33944, + "tre": 33945, + "Ġincarcerated": 33946, + "ĠSort": 33947, + "Ġpeaked": 33948, + "Ġwielding": 33949, + "reciation": 33950, + "×Ļ×": 33951, + "Patch": 33952, + "ĠEmmy": 33953, + "Ġexqu": 33954, + "tto": 33955, + "ĠRatio": 33956, + "ĠPicks": 33957, + "ĠGry": 33958, + "phant": 33959, + "Ġfret": 33960, + "Ġethn": 33961, + "Ġarchived": 33962, + "%-": 33963, + "cases": 33964, + "ĠBlaze": 33965, + "Ġimb": 33966, + "cv": 33967, + "yss": 33968, + "imony": 33969, + "Ġcountdown": 33970, + "Ġawakening": 33971, + "ĠTunisia": 33972, + "ĠRefer": 33973, + "ĠMJ": 33974, + "Ġunnatural": 33975, + "ĠCarnegie": 33976, + "izen": 33977, + "ĠNuggets": 33978, + "hess": 33979, + "Ġevils": 33980, + "647": 33981, + "Ġintroductory": 33982, + "loving": 33983, + "ĠMcMahon": 33984, + "Ġambiguity": 33985, + "Label": 33986, + "ĠAlmighty": 33987, + "Ġcoloring": 33988, + "ĠClaus": 33989, + "setting": 33990, + "NULL": 33991, + "ĠFavorite": 33992, + "ĠSIG": 33993, + ">(": 33994, + "ĠShiva": 33995, + "ĠMayer": 33996, + "Ġstormed": 33997, + "ĠCoverage": 33998, + "weapons": 33999, + "igham": 34000, + "Ġunanswered": 34001, + "Ġleve": 34002, + "Ġcoy": 34003, + "cas": 34004, + "bags": 34005, + "asured": 34006, + "Seattle": 34007, + "ĠSantorum": 34008, + "serious": 34009, + "Ġcourageous": 34010, + "ĠSoup": 34011, + "Ġconfiscated": 34012, + "Ġ///": 34013, + "Ġunconventional": 34014, + "Ġmoms": 34015, + "ĠRohingya": 34016, + "ĠOrchestra": 34017, + "ĠPotion": 34018, + "Ġdiscredit": 34019, + "ĠFIL": 34020, + "fixed": 34021, + "ĠDeer": 34022, + "doi": 34023, + "ĠDimension": 34024, + "Ġbureaucrats": 34025, + "eteen": 34026, + "ĠactionGroup": 34027, + "ohm": 34028, + "Ġbumps": 34029, + "ĠUtility": 34030, + "Ġsubmarines": 34031, + "renheit": 34032, + "research": 34033, + "ĠShapiro": 34034, + "Ġsketches": 34035, + "Ġdeceptive": 34036, + "ĠVil": 34037, + "esame": 34038, + "ĠEssentially": 34039, + "Ġrampage": 34040, + "isky": 34041, + "Ġmuttered": 34042, + "thritis": 34043, + "Ġ236": 34044, + "fet": 34045, + "bars": 34046, + "Ġpupil": 34047, + "ĠThou": 34048, + "oS": 34049, + "song": 34050, + "Ġfractured": 34051, + "Ġrevert": 34052, + "picture": 34053, + "Ġcriterion": 34054, + "usher": 34055, + "Ġrepercussions": 34056, + "ĠVintage": 34057, + "ĠSuperintendent": 34058, + "Officers": 34059, + "Ġflagged": 34060, + "Ġblames": 34061, + "Ġinverse": 34062, + "ographers": 34063, + "Ġmakeshift": 34064, + "Ġdevoid": 34065, + "Ġfossils": 34066, + "ĠAristotle": 34067, + "ĠFunds": 34068, + "Ġdepleted": 34069, + "ĠFlu": 34070, + "ĠYuan": 34071, + "Ġwoes": 34072, + "Ġlipid": 34073, + "Ġsitu": 34074, + "requisites": 34075, + "Ġfurnish": 34076, + "ĠSamar": 34077, + "Ġshameful": 34078, + "Ġadversely": 34079, + "Ġadept": 34080, + "Ġremorse": 34081, + "Ġmurderous": 34082, + "uckles": 34083, + "ĠESL": 34084, + "Ġ314": 34085, + "sent": 34086, + "Ġredef": 34087, + "ĠCache": 34088, + "ĠPurs": 34089, + "igans": 34090, + "Ġ460": 34091, + "Ġprescriptions": 34092, + "Ġfres": 34093, + "Fuck": 34094, + "ocrates": 34095, + "Twenty": 34096, + "ĠWeird": 34097, + "ĠToggle": 34098, + "ĠCalled": 34099, + "itizens": 34100, + "Ġpoultry": 34101, + "Ġharvesting": 34102, + "ãĤ¦ãĤ¹": 34103, + "Bottom": 34104, + "Ġcautioned": 34105, + "tn": 34106, + "396": 34107, + "ĠNikki": 34108, + "Ġevaluations": 34109, + "Ġharassing": 34110, + "Ġbindings": 34111, + "ĠMonetary": 34112, + "Ġhitters": 34113, + "Ġadversary": 34114, + "unts": 34115, + "Ġsetback": 34116, + "Ġencrypt": 34117, + "ĠCait": 34118, + "Ġlows": 34119, + "enges": 34120, + "ĠNorn": 34121, + "Ġbulbs": 34122, + "Ġbottled": 34123, + "ĠVoyager": 34124, + "317": 34125, + "Ġspheres": 34126, + "politics": 34127, + "Ġsubtract": 34128, + "Ġsensations": 34129, + "Ġappalling": 34130, + "Ġ316": 34131, + "Ġenvironmentally": 34132, + "ĠSTEM": 34133, + "Ġpublishes": 34134, + "560": 34135, + "Ġdiligence": 34136, + "484": 34137, + "Ġadvises": 34138, + "Ġpetrol": 34139, + "Ġimagining": 34140, + "Ġpatrols": 34141, + "ĠInteger": 34142, + "ĠAshes": 34143, + "actus": 34144, + "ĠRadiant": 34145, + "ĠLT": 34146, + "itability": 34147, + "htaking": 34148, + "Setting": 34149, + "Ġnuanced": 34150, + "ĠReef": 34151, + "ĠDevelopers": 34152, + "Ni": 34153, + "pieces": 34154, + "990": 34155, + "License": 34156, + "Ġlowers": 34157, + "ĠOttoman": 34158, + "327": 34159, + "ooo": 34160, + "Ġquitting": 34161, + "markets": 34162, + "Behind": 34163, + "Ġbasin": 34164, + "Ġdocs": 34165, + "anie": 34166, + "flash": 34167, + "ctl": 34168, + "Ġcivilized": 34169, + "ĠFukushima": 34170, + "\"],\"": 34171, + "ĠKS": 34172, + "ĠHonestly": 34173, + "arat": 34174, + "Ġconstructs": 34175, + "ĠLans": 34176, + "ĠDire": 34177, + "ĠLIKE": 34178, + "ĠTrouble": 34179, + "Ġwithholding": 34180, + "ĠOblivion": 34181, + "Ġsanity": 34182, + "anya": 34183, + "Const": 34184, + "Ġgrocer": 34185, + "ĠCelsius": 34186, + "Ġrecounted": 34187, + "ĠWife": 34188, + "Border": 34189, + "atered": 34190, + "happy": 34191, + "Ġspoiler": 34192, + "Ġlogically": 34193, + "Hall": 34194, + "Ġsucceeding": 34195, + "Ġpolymorph": 34196, + "Ġaxes": 34197, + "ĠShotgun": 34198, + "ĠSlim": 34199, + "ĠPrinciples": 34200, + "ĠLeth": 34201, + "arta": 34202, + "Ġscor": 34203, + "Screenshot": 34204, + "Ġrelaxation": 34205, + "#$#$": 34206, + "Ġdeterrent": 34207, + "iddy": 34208, + "Ġpowerless": 34209, + "Ġlesbians": 34210, + "Ġchords": 34211, + "ĠEdited": 34212, + "selected": 34213, + "Ġseparatists": 34214, + "0002": 34215, + "Ġairspace": 34216, + "Ġturnaround": 34217, + "Ġcunning": 34218, + "PATH": 34219, + "Poly": 34220, + "Ġbombed": 34221, + "Ġtion": 34222, + "xs": 34223, + "Ġwithhold": 34224, + "Ġwaged": 34225, + "ĠLiberties": 34226, + "Flag": 34227, + "Ġcomforting": 34228, + "454": 34229, + "ĠIris": 34230, + "arers": 34231, + "Ġrag": 34232, + "Ġrelocated": 34233, + "ĠGuarant": 34234, + "Ġstrategically": 34235, + "Ġgamma": 34236, + "uberty": 34237, + "ĠLockheed": 34238, + "gres": 34239, + "Ġgrilled": 34240, + "ĠLowe": 34241, + "stats": 34242, + "ĠRocks": 34243, + "Ġsensing": 34244, + "Ġrenting": 34245, + "ĠGeological": 34246, + "اØ": 34247, + "otrop": 34248, + "Ġsew": 34249, + "Ġimproperly": 34250, + "486": 34251, + "Ġâĸł": 34252, + "Ġstarving": 34253, + "ĠBj": 34254, + "Discussion": 34255, + "328": 34256, + "ĠCombo": 34257, + "ĠFixes": 34258, + "NAT": 34259, + "Ġstriving": 34260, + "thora": 34261, + "Ġharvested": 34262, + "ĠPing": 34263, + "Ġplayful": 34264, + "Ġavenues": 34265, + "Ġoccupational": 34266, + "Ġwakes": 34267, + "ĠCourier": 34268, + "Ġdrummer": 34269, + "ĠBrowser": 34270, + "ĠHouth": 34271, + "itu": 34272, + "Ġapparel": 34273, + "paste": 34274, + "Ġhunted": 34275, + "ĠSecondly": 34276, + "lain": 34277, + "XY": 34278, + "ĠPIN": 34279, + "icons": 34280, + "Ġcocktails": 34281, + "Ġsizable": 34282, + "Ġhurdles": 34283, + "estinal": 34284, + "ĠRecreation": 34285, + "Ġeco": 34286, + "648": 34287, + "ĠDied": 34288, + "mint": 34289, + "Ġfingerprints": 34290, + "Ġdispose": 34291, + "ĠBosnia": 34292, + "tsy": 34293, + "2200": 34294, + "Ġinspected": 34295, + "ĠFou": 34296, + "Ġfuss": 34297, + "Ġambush": 34298, + "ĠRak": 34299, + "Ġmanifested": 34300, + "Prosecut": 34301, + "Ġsuffice": 34302, + "rences": 34303, + "Ġcompensated": 34304, + "ĠCyrus": 34305, + "Ġgenus": 34306, + "ĠWolverine": 34307, + "ĠTrends": 34308, + "Ġhikes": 34309, + "ĠSeen": 34310, + "Ġenrol": 34311, + "Cold": 34312, + "Ġpolitely": 34313, + "ĠSlav": 34314, + "ĠRupert": 34315, + "Ġeyewitness": 34316, + "ĠAlto": 34317, + "Ġuncomp": 34318, + "Ġposterior": 34319, + "Must": 34320, + "ĠHerz": 34321, + "Ġprogressively": 34322, + "Ġ234": 34323, + "Ġindifference": 34324, + "ĠCunningham": 34325, + "Ġacademia": 34326, + "Ġsewer": 34327, + "Ġastounding": 34328, + "ĠAES": 34329, + "rather": 34330, + "Ġeldest": 34331, + "Ġclimbs": 34332, + "ĠAdds": 34333, + "Ġoutcry": 34334, + "Ġcontag": 34335, + "ĠHouses": 34336, + "Ġpept": 34337, + "ĠMelania": 34338, + "interested": 34339, + "ĠUCH": 34340, + "ĠRoots": 34341, + "ĠHubbard": 34342, + "ĠTBD": 34343, + "ĠRomanian": 34344, + "filename": 34345, + "Stone": 34346, + "ĠImpl": 34347, + "Ġchromosome": 34348, + "Cle": 34349, + "dx": 34350, + "Ġscrambled": 34351, + "ĠPt": 34352, + "Ġ242": 34353, + "OPLE": 34354, + "Ġtremendously": 34355, + "Street": 34356, + "Ġcraving": 34357, + "Ġbundled": 34358, + "ĠRG": 34359, + "pipe": 34360, + "Ġinjuring": 34361, + "Ġarcane": 34362, + "Particip": 34363, + "ĠHeroic": 34364, + "sty": 34365, + "Ġtopping": 34366, + "ĠTempest": 34367, + "rentices": 34368, + "bh": 34369, + "Ġparanoia": 34370, + "ĠUnicode": 34371, + "Ġegregious": 34372, + "Ġ\\'": 34373, + "ĠOswald": 34374, + "Ġgravel": 34375, + "ĠSimpsons": 34376, + "Ġbland": 34377, + "ĠGuantanamo": 34378, + "Writer": 34379, + "liners": 34380, + "ĠDice": 34381, + "JC": 34382, + "Ġparity": 34383, + "Ġsided": 34384, + "Ġ237": 34385, + "ĠPyrrha": 34386, + "atters": 34387, + "dk": 34388, + "Fine": 34389, + "compan": 34390, + "Ġformulated": 34391, + "ĠIdol": 34392, + "ilers": 34393, + "hemoth": 34394, + "ĠFav": 34395, + "Ġintrusion": 34396, + "Ġcarrots": 34397, + "ĠLayer": 34398, + "ĠHacker": 34399, + "Ġ----------------": 34400, + "Ġmoderation": 34401, + "éģ": 34402, + "ococ": 34403, + "Ġcharacterize": 34404, + "ĠTeresa": 34405, + "Ġsocioeconomic": 34406, + "Ġperk": 34407, + "ĠParticipation": 34408, + "training": 34409, + "ĠPaulo": 34410, + "phys": 34411, + "Ġtrustworthy": 34412, + "Ġembodied": 34413, + "ĠMerch": 34414, + "currency": 34415, + "ĠPriority": 34416, + "Ġteasing": 34417, + "Ġabsorbing": 34418, + "Ġunfinished": 34419, + "ĠComparison": 34420, + "Ġdisple": 34421, + "writers": 34422, + "Ġprofessions": 34423, + "ĠPenguin": 34424, + "Ġangrily": 34425, + "ĠLINK": 34426, + "688": 34427, + "ĠCorrespond": 34428, + "Ġprevailed": 34429, + "Ġcartel": 34430, + "lp": 34431, + "asms": 34432, + "ĠRedemption": 34433, + "ĠIslamists": 34434, + "effects": 34435, + "dose": 34436, + "ĠLatter": 34437, + "ĠHalifax": 34438, + "Ġvas": 34439, + "ĠTopics": 34440, + "ĠNamed": 34441, + "advertising": 34442, + "zza": 34443, + "ICES": 34444, + "Ġretarded": 34445, + "achable": 34446, + "ĠPuppet": 34447, + "ĠItemLevel": 34448, + "Ġretract": 34449, + "Ġidentifiable": 34450, + "Aaron": 34451, + "ĠBuster": 34452, + "sol": 34453, + "helle": 34454, + "assemb": 34455, + "Hope": 34456, + "ranged": 34457, + "Ba": 34458, + "ĠPurch": 34459, + "éĢ": 34460, + "ĠSiri": 34461, + "Ġarrivals": 34462, + "Ġ1912": 34463, + "Ġshortened": 34464, + "Ġ312": 34465, + "Ġdiscrepancy": 34466, + "ĠTemperature": 34467, + "ĠWalton": 34468, + "Ġkinderg": 34469, + "polit": 34470, + "Ġremix": 34471, + "Ġconnectors": 34472, + "ãĥĺãĥ©": 34473, + "ĠKazakhstan": 34474, + "dominated": 34475, + "Ġsugars": 34476, + "imble": 34477, + "ĠPanic": 34478, + "ĠDemand": 34479, + "ĠColony": 34480, + "onen": 34481, + "ĠMER": 34482, + "775": 34483, + "uria": 34484, + "azaar": 34485, + "ĠDegree": 34486, + "Pri": 34487, + "Ġsunshine": 34488, + "Ġ251": 34489, + "Ġpsychedelic": 34490, + "Ġdigitally": 34491, + "ĠBraun": 34492, + "Ġshimmer": 34493, + "Ġshave": 34494, + "ĠTelesc": 34495, + "ĠAstral": 34496, + "ĠVenezuelan": 34497, + "ĠOG": 34498, + "Ġcrawling": 34499, + "Integ": 34500, + "ĠFeather": 34501, + "Ġunfolding": 34502, + "Ġappropriation": 34503, + "Ġè£ıè": 34504, + "ĠMobility": 34505, + "ĠNey": 34506, + "-.": 34507, + "bilt": 34508, + "LIN": 34509, + "ĠTube": 34510, + "ĠConversely": 34511, + "Ġkeyboards": 34512, + "ĠCao": 34513, + "Ġoverth": 34514, + "Ġlaure": 34515, + ">>\\": 34516, + "ĠViper": 34517, + "acha": 34518, + "Offset": 34519, + "ĠRaleigh": 34520, + "ĠJae": 34521, + "Jordan": 34522, + "jp": 34523, + "Ġtotalitarian": 34524, + "Connector": 34525, + "Ġobserves": 34526, + "ĠSpartan": 34527, + "ĠImmediately": 34528, + "ĠScal": 34529, + "Cool": 34530, + "Ġtaps": 34531, + "Ġroar": 34532, + "Past": 34533, + "Ġchars": 34534, + "ĠBender": 34535, + "ĠSheldon": 34536, + "Ġpainter": 34537, + "Ġbeacon": 34538, + "ĠCreatures": 34539, + "Ġdownturn": 34540, + "Ġhinder": 34541, + "ĠAndromeda": 34542, + "ÃĽ": 34543, + "ccoli": 34544, + "ĠFitness": 34545, + "etrical": 34546, + "Ġutilizes": 34547, + "Ġsenate": 34548, + "Ġensemble": 34549, + "Ġcheers": 34550, + "TW": 34551, + "Ġaffluent": 34552, + "kil": 34553, + "rylic": 34554, + "ordering": 34555, + "Computer": 34556, + "Ġgruesome": 34557, + "ostics": 34558, + "ĠUbisoft": 34559, + "ĠKelley": 34560, + "Ġwrench": 34561, + "Ġbourgeoisie": 34562, + "IBLE": 34563, + "ĠPreston": 34564, + "worn": 34565, + "arist": 34566, + "reating": 34567, + "Ġstained": 34568, + "arine": 34569, + "Ġslime": 34570, + "ENN": 34571, + "Ġchests": 34572, + "Ġgroundwater": 34573, + "annot": 34574, + "ĠTray": 34575, + "ĠLocke": 34576, + "ĠCTR": 34577, + "Ġdudes": 34578, + "ĠExternal": 34579, + "ĠDecoder": 34580, + "Ġparamed": 34581, + "ĠMedline": 34582, + "809": 34583, + "ĠDinner": 34584, + "rupal": 34585, + "gz": 34586, + "ĠGum": 34587, + "ĠDemo": 34588, + "jee": 34589, + "Ġdh": 34590, + "berman": 34591, + "archs": 34592, + "Ġenqu": 34593, + "ĠEpstein": 34594, + "Ġdevastation": 34595, + "Ġfriendships": 34596, + "ĠArd": 34597, + "Ġ231": 34598, + "ĠRubin": 34599, + "ĠDistance": 34600, + "Ġspurred": 34601, + "Ġdossier": 34602, + "Ġoverlooking": 34603, + "\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\": 34604, + "Forest": 34605, + "ĠComes": 34606, + "\\\",": 34607, + "ĠIranians": 34608, + "Ġfixtures": 34609, + "Laughs": 34610, + "Ġcurry": 34611, + "ĠKingston": 34612, + "Ġsquash": 34613, + "Ġcatalogue": 34614, + "Ġabnormalities": 34615, + "Ġdigestive": 34616, + ".........": 34617, + "Ġsubordinate": 34618, + "ogly": 34619, + "Ġ249": 34620, + "Middle": 34621, + "Ġmassac": 34622, + "Ġburgers": 34623, + "Ġdownstairs": 34624, + "Ġ1931": 34625, + "394": 34626, + "ĠVG": 34627, + "Ġlasers": 34628, + "ĠSikh": 34629, + "ĠAlexa": 34630, + "derived": 34631, + "Ġcyclist": 34632, + "ãģ®éŃĶ": 34633, + "oneliness": 34634, + "!!!!!!!!": 34635, + "Ġbuffs": 34636, + "legate": 34637, + "Ġraping": 34638, + "Ġrecommending": 34639, + "rored": 34640, + "Ġmulticultural": 34641, + "unique": 34642, + "Ġbusinessmen": 34643, + "Ġuneasy": 34644, + "ĠMAP": 34645, + "Ġdispersed": 34646, + "cipline": 34647, + "Jess": 34648, + "ĠKerala": 34649, + "å§": 34650, + "Ġabstraction": 34651, + "Surv": 34652, + "Uh": 34653, + "Ġprinters": 34654, + "ija": 34655, + "owder": 34656, + "Ġanalogous": 34657, + "ĠASP": 34658, + "afer": 34659, + "Ġunfolded": 34660, + "Ġleveling": 34661, + "Ġbreached": 34662, + "ĠHearing": 34663, + "Ġnat": 34664, + "Ġtranslating": 34665, + "critical": 34666, + "Ġantagonist": 34667, + "ĠYesterday": 34668, + "Ġfuzzy": 34669, + "wash": 34670, + "mere": 34671, + "Ġbewild": 34672, + "ĠMae": 34673, + "Virgin": 34674, + "phrase": 34675, + "Ġsignaled": 34676, + "ĠHIGH": 34677, + "Ġprotester": 34678, + "Ġgarner": 34679, + "unknown": 34680, + "Ġkay": 34681, + "Ġabducted": 34682, + "Ġstalking": 34683, + "amn": 34684, + "Ġdeserving": 34685, + "ĠRiv": 34686, + "ĠJorge": 34687, + "Ġscratching": 34688, + "ĠSaving": 34689, + "iping": 34690, + "Ġtease": 34691, + "Ġmissionary": 34692, + "ĠMorrow": 34693, + "TIME": 34694, + "Present": 34695, + "Ġchemotherapy": 34696, + "terness": 34697, + "ĠHomes": 34698, + "ĠPurdue": 34699, + "Ġstaunch": 34700, + "ĠWhitney": 34701, + "ĠTHERE": 34702, + "μ": 34703, + "iatus": 34704, + "ĠErnest": 34705, + "ĠDeploy": 34706, + "Ġcoveted": 34707, + "FML": 34708, + "ĠDialogue": 34709, + "Ġexited": 34710, + "fruit": 34711, + "Ġnerd": 34712, + "\":\"\",\"": 34713, + "Ġvivo": 34714, + "ruly": 34715, + "460": 34716, + "ĠAmen": 34717, + "rehensible": 34718, + "Ġâĺ": 34719, + "DIR": 34720, + "Ġadherence": 34721, + "Ġchew": 34722, + "ĠCoke": 34723, + "ĠSergei": 34724, + "digital": 34725, + "ĠNeck": 34726, + "gently": 34727, + "enthal": 34728, + "/)": 34729, + "Ġweary": 34730, + "Ġguise": 34731, + "ĠConcord": 34732, + "ĠOnion": 34733, + "atcher": 34734, + "Ġbinge": 34735, + "ĠDirective": 34736, + "Ġmanned": 34737, + "ansk": 34738, + "Ġillusions": 34739, + "Ġbillionaires": 34740, + "383": 34741, + "olyn": 34742, + "odynamic": 34743, + "ĠWheat": 34744, + "ĠAlic": 34745, + "Ġcoloured": 34746, + "ĠNAFTA": 34747, + "abo": 34748, + "Ġmacros": 34749, + "independent": 34750, + "sweet": 34751, + "Ġspac": 34752, + "ĠKabul": 34753, + "ĠÄ": 34754, + "eme": 34755, + "Ġdictated": 34756, + "Ġshouts": 34757, + "={": 34758, + "Ġripping": 34759, + "ĠShay": 34760, + "ĠCricket": 34761, + "directed": 34762, + "Ġanalysed": 34763, + "ĠWARRANT": 34764, + "agons": 34765, + "ĠBlazers": 34766, + "Ġcheered": 34767, + "Ġarithmetic": 34768, + "ĠTanz": 34769, + "373": 34770, + "ĠFlags": 34771, + "Ġ295": 34772, + "Ġwitches": 34773, + "ĠIncluded": 34774, + "ĠGained": 34775, + "ĠBlades": 34776, + "Gam": 34777, + "ĠSamantha": 34778, + "ĠAtlantis": 34779, + "ĠPratt": 34780, + "Ġspoiled": 34781, + "ĠIB": 34782, + "ĠRamirez": 34783, + "Probably": 34784, + "rero": 34785, + "ĠNg": 34786, + "ĠWarlock": 34787, + "tp": 34788, + "Ġoverhe": 34789, + "Ġadministrations": 34790, + "Ġtint": 34791, + "Ġregiment": 34792, + "Ġpistols": 34793, + "Ġblankets": 34794, + "Ġepist": 34795, + "Ġbowls": 34796, + "Ġhydraulic": 34797, + "Ġdean": 34798, + "Ġjung": 34799, + "Ġascend": 34800, + "705": 34801, + "ĠSantiago": 34802, + "î": 34803, + "Ġunavoid": 34804, + "ĠShaman": 34805, + "reb": 34806, + "Ġstemming": 34807, + "998": 34808, + "ĠMG": 34809, + "sticks": 34810, + "esthesia": 34811, + "ERO": 34812, + "Ġmorbid": 34813, + "ĠGrill": 34814, + "ĠPoe": 34815, + "anyl": 34816, + "Ġdeleting": 34817, + "ĠSurveillance": 34818, + "Ġdirectives": 34819, + "Ġiterations": 34820, + "ĠRox": 34821, + "ĠMilky": 34822, + "Father": 34823, + "Ġpatented": 34824, + "447": 34825, + "Ġprecursor": 34826, + "Ġmaiden": 34827, + "ĠPhen": 34828, + "ĠVegan": 34829, + "ĠPatent": 34830, + "Kelly": 34831, + "Redditor": 34832, + "Ġnods": 34833, + "Ġventilation": 34834, + "ĠSchwarz": 34835, + "Ġwizards": 34836, + "Ġominous": 34837, + "ĠHeads": 34838, + "ĠBG": 34839, + "Ġlumber": 34840, + "ĠSpiel": 34841, + "ĠisEnabled": 34842, + "Ġancestral": 34843, + "ĠShips": 34844, + "Ġwrestler": 34845, + "phi": 34846, + "Ġyuan": 34847, + "ĠRebellion": 34848, + "Ġiceberg": 34849, + "Ġmagically": 34850, + "Ġdiversion": 34851, + "arro": 34852, + "ythm": 34853, + "ĠRiders": 34854, + "ĠRobbie": 34855, + "ĠKara": 34856, + "ĠMaintenance": 34857, + "ĠHerb": 34858, + "Ġharms": 34859, + "packed": 34860, + "ĠFeinstein": 34861, + "Ġmarrying": 34862, + "Ġblending": 34863, + "ĠRates": 34864, + "Ġ1880": 34865, + "Ġwrink": 34866, + "ĠUnch": 34867, + "ĠTorch": 34868, + "described": 34869, + "Ġhumanoid": 34870, + "ilitating": 34871, + "ĠConv": 34872, + "ĠFeld": 34873, + "IGHTS": 34874, + "Ġwhistleblower": 34875, + "ortmund": 34876, + "etsy": 34877, + "arrett": 34878, + "ĠMono": 34879, + "ĠIke": 34880, + "ĠCNBC": 34881, + "ĠWAY": 34882, + "ĠMDMA": 34883, + "ĠIndividuals": 34884, + "Ġsupplemental": 34885, + "Ġpowerhouse": 34886, + "ĠStru": 34887, + "Focus": 34888, + "aphael": 34889, + "ĠColleg": 34890, + "atti": 34891, + "ZA": 34892, + "Ġperenn": 34893, + "ĠSignature": 34894, + "ĠRodney": 34895, + "Ġcubes": 34896, + "iddled": 34897, + "ĠDante": 34898, + "ĠINV": 34899, + "ilingual": 34900, + "ĠCth": 34901, + "Ġsofa": 34902, + "Ġintimidate": 34903, + "ĠRoe": 34904, + "ĠDiplom": 34905, + "ĠCountries": 34906, + "ayson": 34907, + "Ġextradition": 34908, + "Ġdisabling": 34909, + "ĠCardiff": 34910, + "Ġmemorandum": 34911, + "ĠTrace": 34912, + "Ġ???": 34913, + "sector": 34914, + "ĠRouhani": 34915, + "ĠYates": 34916, + "ĠFreeze": 34917, + "Ġbladder": 34918, + "Motor": 34919, + "ĠPromise": 34920, + "antasy": 34921, + "Ġforeseeable": 34922, + "ĠCologne": 34923, + "container": 34924, + "ĠTrees": 34925, + "ĠGors": 34926, + "ĠSinclair": 34927, + "Ġbarring": 34928, + "keye": 34929, + "Ġslashed": 34930, + "ĠStatistical": 34931, + "éĩ": 34932, + "Ġâĸº": 34933, + "Allows": 34934, + "Ġhumility": 34935, + "Ġdrilled": 34936, + "ĠFurn": 34937, + "443": 34938, + "Ġsewage": 34939, + "Ġhomepage": 34940, + "Ġcourtyard": 34941, + "Ġvile": 34942, + "Ġsubsidiaries": 34943, + "ajo": 34944, + "directory": 34945, + "Ġammon": 34946, + "Vers": 34947, + "charges": 34948, + "Ġ}}": 34949, + "ĠChains": 34950, + "Ġ246": 34951, + "nob": 34952, + "Ġpercept": 34953, + "Ġgrit": 34954, + "Ġfishermen": 34955, + "ĠIraqis": 34956, + "ĠDISTR": 34957, + "ĠFULL": 34958, + "ĠEvaluation": 34959, + "graph": 34960, + "atial": 34961, + "Ġcooperating": 34962, + "Ġmelan": 34963, + "Ġenlightened": 34964, + "Ġali": 34965, + "tailed": 34966, + "Ġsalute": 34967, + "Ġweakest": 34968, + "ĠBulldogs": 34969, + "UA": 34970, + "ĠAlloy": 34971, + "Ġsemen": 34972, + "ocene": 34973, + "ĠWilliamson": 34974, + "spr": 34975, + ",âĢĶ": 34976, + "ĠGF": 34977, + "ittens": 34978, + "Beat": 34979, + "ĠJunk": 34980, + "iphate": 34981, + "ĠFarmers": 34982, + "ĠBitcoins": 34983, + "igers": 34984, + "dh": 34985, + "ĠLoyal": 34986, + "payer": 34987, + "Ġentertained": 34988, + "Ġpenned": 34989, + "Ġcoupon": 34990, + "Queue": 34991, + "Ġweakening": 34992, + "carry": 34993, + "Ġunderestimate": 34994, + "Ġshootout": 34995, + "Ġcharismatic": 34996, + "ĠProcedure": 34997, + "Ġprudent": 34998, + "inances": 34999, + "Ġriches": 35000, + "Ġcortical": 35001, + "Ġstrides": 35002, + "Ġdrib": 35003, + "ĠOilers": 35004, + "540": 35005, + "ĠPerform": 35006, + "ĠBangkok": 35007, + "Ġeuth": 35008, + "SER": 35009, + "Ġsimplistic": 35010, + "tops": 35011, + "campaign": 35012, + "Quality": 35013, + "Ġimpoverished": 35014, + "ĠEisenhower": 35015, + "Ġaugment": 35016, + "ĠHarden": 35017, + "Ġintervened": 35018, + "Ġlistens": 35019, + "ĠKok": 35020, + "Ġsage": 35021, + "Ġrubbish": 35022, + "ĠDed": 35023, + "Ġmull": 35024, + "pelling": 35025, + "Ġvideot": 35026, + "Production": 35027, + "DJ": 35028, + "miah": 35029, + "Ġadaptations": 35030, + "Ġmedically": 35031, + "Ġboarded": 35032, + "Ġarrogance": 35033, + "Ġscrapped": 35034, + "Ġoppress": 35035, + "FORMATION": 35036, + "Ġjunction": 35037, + "415": 35038, + "EEEE": 35039, + "Skill": 35040, + "Ġsubdu": 35041, + "ĠSuggest": 35042, + "ĠPett": 35043, + "Ġlett": 35044, + "ĠManip": 35045, + "ĠCaf": 35046, + "ĠCooperation": 35047, + "Ther": 35048, + "Ġregained": 35049, + "¶æ": 35050, + "reflect": 35051, + "Ġthugs": 35052, + "ĠShelby": 35053, + "Ġdictates": 35054, + "ĠWeiner": 35055, + "ĠHale": 35056, + "Ġbattleground": 35057, + "schild": 35058, + "Ġcondol": 35059, + "hunt": 35060, + "ositories": 35061, + "Ġaccuses": 35062, + "Filename": 35063, + "Ġshri": 35064, + "Ġmotivate": 35065, + "Ġreflections": 35066, + "Null": 35067, + "ĠLobby": 35068, + "¥µ": 35069, + "ĠSATA": 35070, + "ĠBackup": 35071, + "Ñĥ": 35072, + "nin": 35073, + "ĠCorrection": 35074, + "Ġjuicy": 35075, + "utra": 35076, + "ĠPric": 35077, + "Ġrestraining": 35078, + "ĠAirbnb": 35079, + "ĠArrest": 35080, + "Ġappropriations": 35081, + "Ġslopes": 35082, + "Ġmanslaughter": 35083, + "Ġworkings": 35084, + "ĠHuss": 35085, + "ĠFrey": 35086, + "Leave": 35087, + "ĠHarmony": 35088, + "ĠFeder": 35089, + "Ġ430": 35090, + "Ġtrench": 35091, + "Ġgladly": 35092, + "Ġbullpen": 35093, + "ĠGau": 35094, + "bones": 35095, + "Ġgroove": 35096, + "Ġpretext": 35097, + "ãħĭ": 35098, + "Ġtransmitter": 35099, + "ĠComponent": 35100, + "Ġunderage": 35101, + "ĠEmpires": 35102, + "Tile": 35103, + "Ġoy": 35104, + "ĠMarvin": 35105, + "ĠCAS": 35106, + "Ġbloss": 35107, + "Ġreplicated": 35108, + "ĠMariners": 35109, + "Marcus": 35110, + "ĠBlocks": 35111, + "Ġliberated": 35112, + "Ġbutterfly": 35113, + "Feel": 35114, + "Ġfermentation": 35115, + "Ġyoutube": 35116, + "Ġoffend": 35117, + "ĠTerm": 35118, + "resist": 35119, + "Ġcessation": 35120, + "Ġinsurgency": 35121, + "Ġbir": 35122, + "ĠRaise": 35123, + "595": 35124, + "Ġhypotheses": 35125, + "502": 35126, + "Ġplaque": 35127, + "ocrat": 35128, + "Ġjackets": 35129, + "ĠHuffPost": 35130, + "among": 35131, + "Ġconfer": 35132, + "487": 35133, + "ĠLilly": 35134, + "Ġadapting": 35135, + "ĠFay": 35136, + "Ġshoved": 35137, + "vec": 35138, + "Ġrefine": 35139, + "Ġgon": 35140, + "Ġgunmen": 35141, + "zai": 35142, + "ĠShuttle": 35143, + "ĠIzan": 35144, + "Ġ1913": 35145, + "Ġplethora": 35146, + "··": 35147, + "Ġ510": 35148, + "Ġpuberty": 35149, + "Ġ241": 35150, + "ĠWealth": 35151, + "ĠAlma": 35152, + "ĠMEM": 35153, + "ĠAdults": 35154, + "Cas": 35155, + "prison": 35156, + "Race": 35157, + "Ġwaterproof": 35158, + "Ġathleticism": 35159, + "Ġcapitalize": 35160, + "ĠJuice": 35161, + "Ġilluminated": 35162, + "ĠPascal": 35163, + "Ġirritation": 35164, + "ĠWitnesses": 35165, + "adle": 35166, + "ĠAstro": 35167, + "Ġfax": 35168, + "ĠElvis": 35169, + "Primary": 35170, + "ĠLich": 35171, + "ĠElves": 35172, + "Ġresiding": 35173, + "Ġstumble": 35174, + "319": 35175, + "ĠPKK": 35176, + "Ġadversaries": 35177, + "DOS": 35178, + "ĠRitual": 35179, + "Ġsmear": 35180, + "Ġarson": 35181, + "idental": 35182, + "Ġscant": 35183, + "Ġmonarchy": 35184, + "Ġhalftime": 35185, + "Ġresidue": 35186, + "Ġindign": 35187, + "ĠShaun": 35188, + "ĠElm": 35189, + "auri": 35190, + "Aff": 35191, + "WATCH": 35192, + "ĠLyon": 35193, + "helps": 35194, + "361": 35195, + "Ġlobbyist": 35196, + "Ġdiminishing": 35197, + "Ġoutbreaks": 35198, + "Ġgoats": 35199, + "favorite": 35200, + "ĠNah": 35201, + "sonian": 35202, + "ĠBooster": 35203, + "Ġsandbox": 35204, + "ĠFare": 35205, + "ĠMalta": 35206, + "ĠattRot": 35207, + "ĠMOR": 35208, + "lde": 35209, + "Ġnavigating": 35210, + "Touch": 35211, + "Ġuntrue": 35212, + "ĠDisaster": 35213, + "Ġludicrous": 35214, + "Password": 35215, + "ĠJFK": 35216, + "blogspot": 35217, + "416": 35218, + "ĠUNDER": 35219, + "ernal": 35220, + "Ġdelaying": 35221, + "TOP": 35222, + "Ġimplants": 35223, + "ĠAVG": 35224, + "ĠHuge": 35225, + "attr": 35226, + "Ġjournalistic": 35227, + "ĠPeyton": 35228, + "ĠIA": 35229, + "Rap": 35230, + "goal": 35231, + "ĠProgramme": 35232, + "Ġsmashing": 35233, + "wives": 35234, + "println": 35235, + "ĠPlague": 35236, + "inus": 35237, + "EEP": 35238, + "Ġcruiser": 35239, + "ĠParish": 35240, + "uminium": 35241, + "Ġoccupants": 35242, + "ĠJihad": 35243, + "mop": 35244, + "Ġpint": 35245, + "Ġhect": 35246, + "ĠMecca": 35247, + "director": 35248, + "ĠFunding": 35249, + "ĠMixed": 35250, + "Ġstag": 35251, + "Tier": 35252, + "Ġgust": 35253, + "Ġbrightly": 35254, + "orsi": 35255, + "Ġuphill": 35256, + "RD": 35257, + "Ġlesions": 35258, + "ĠBundy": 35259, + "livious": 35260, + "Ġbiologist": 35261, + "ĠFaculty": 35262, + "ĠAuthorization": 35263, + "Ġ244": 35264, + "Allow": 35265, + "ï¸": 35266, + "ĠGiul": 35267, + "Ġpertinent": 35268, + "otaur": 35269, + "esse": 35270, + "ĠRoof": 35271, + "Ġunmanned": 35272, + "351": 35273, + "ĠShak": 35274, + "ĠOrient": 35275, + "Ġendanger": 35276, + "Dir": 35277, + "Ġreplen": 35278, + "edient": 35279, + "Ġtailor": 35280, + "Ġgadgets": 35281, + "Ġaudible": 35282, + "âĺĨ": 35283, + "Nice": 35284, + "Ġbombard": 35285, + "ĠRape": 35286, + "Ġdefiance": 35287, + "ĠTWO": 35288, + "ĠFilipino": 35289, + "Ġunaffected": 35290, + "ervatives": 35291, + "Ġsoared": 35292, + "ĠBolton": 35293, + "Ġcompromising": 35294, + "ĠBrewers": 35295, + "RAL": 35296, + "ĠAHL": 35297, + "icycle": 35298, + "Ġvampires": 35299, + "Ġdipped": 35300, + "oyer": 35301, + "ĠXIII": 35302, + "Ġsideways": 35303, + "ĠWaste": 35304, + "ĠDiss": 35305, + "ĠâĶľâĶĢâĶĢ": 35306, + "$.": 35307, + "Ġhabitats": 35308, + "ĠBeef": 35309, + "truth": 35310, + "trained": 35311, + "split": 35312, + "Rus": 35313, + "Andy": 35314, + "ĠBram": 35315, + "REP": 35316, + "pid": 35317, + "è£ħ": 35318, + "ĠMutant": 35319, + "Anim": 35320, + "ĠMarina": 35321, + "Ġfutile": 35322, + "highest": 35323, + "frequency": 35324, + "Ġepilepsy": 35325, + "Ġcoping": 35326, + "Ġconcise": 35327, + "Ġtracing": 35328, + "ĠSUN": 35329, + "panel": 35330, + "ĠSophie": 35331, + "ĠCrowley": 35332, + "ĠAdolf": 35333, + "ĠShooter": 35334, + "Ġshaky": 35335, + "ĠIG": 35336, + "ĠLies": 35337, + "ĠBarber": 35338, + "pkg": 35339, + "Ġuptake": 35340, + "Ġpredatory": 35341, + "ULTS": 35342, + "/**": 35343, + "Ġintoxicated": 35344, + "ĠWestbrook": 35345, + "odder": 35346, + "hement": 35347, + "Ġbaseman": 35348, + "APD": 35349, + "storage": 35350, + "ĠFifty": 35351, + "editor": 35352, + "GEN": 35353, + "UTION": 35354, + "irting": 35355, + "Ġsewing": 35356, + "rift": 35357, + "Ġagony": 35358, + "ĠSands": 35359, + "Ġ254": 35360, + "Cash": 35361, + "Ġlodge": 35362, + "Ġpunt": 35363, + "Natural": 35364, + "ĠIdeas": 35365, + "Ġerroneous": 35366, + "ĠSensor": 35367, + "ĠHannity": 35368, + "Ġ1921": 35369, + "Ġmould": 35370, + "ĠGon": 35371, + "kaya": 35372, + "Ġanonymously": 35373, + "ĠKEY": 35374, + "Ġsimulator": 35375, + "Winter": 35376, + "Ġstreamed": 35377, + "507": 35378, + "?\",": 35379, + "Ġteased": 35380, + "Ġcoefficient": 35381, + "Ġwartime": 35382, + "ĠTHR": 35383, + "''.": 35384, + "ĠBanking": 35385, + "mpire": 35386, + "Ġfandom": 35387, + "Ġlia": 35388, + "Ga": 35389, + "Ġdownhill": 35390, + "Ġinterpreting": 35391, + "Individual": 35392, + "Norm": 35393, + "Ġjealousy": 35394, + "bitcoin": 35395, + "Ġpleasures": 35396, + "ĠToys": 35397, + "ĠChevrolet": 35398, + "ĠAdvisor": 35399, + "IZE": 35400, + "Ġreceptions": 35401, + "706": 35402, + "Cro": 35403, + "Ġ262": 35404, + "Ġcitrus": 35405, + "iru": 35406, + "Reviewer": 35407, + "jected": 35408, + "UES": 35409, + "anz": 35410, + "1981": 35411, + "ĠWorker": 35412, + "Ġcomplied": 35413, + "orescent": 35414, + "continental": 35415, + "Ton": 35416, + "ĠPrism": 35417, + "ĠSheep": 35418, + "Ġ288": 35419, + "nox": 35420, + "ĠVog": 35421, + "Ord": 35422, + "Ġrealms": 35423, + "tek": 35424, + "Ġirrigation": 35425, + "Ġbicycles": 35426, + "Ġelectronically": 35427, + "poly": 35428, + "tall": 35429, + "());": 35430, + "Ġaesthetics": 35431, + "ĠIntegrated": 35432, + "Explore": 35433, + "Ġdunk": 35434, + "476": 35435, + "pain": 35436, + "ĠJacques": 35437, + "ĠDmit": 35438, + "Frames": 35439, + "Ġreunited": 35440, + "Ġhumid": 35441, + "Dro": 35442, + "Political": 35443, + "Ġyouthful": 35444, + "Ġentails": 35445, + "Ġmosquito": 35446, + "363": 35447, + "species": 35448, + "Ġcoordinating": 35449, + "ĠMayhem": 35450, + "ĠMagnus": 35451, + "Mount": 35452, + "Improved": 35453, + "ĠSTATE": 35454, + "ATTLE": 35455, + "Ġflowed": 35456, + "Ġtackled": 35457, + "Ġfashioned": 35458, + "Ġreorgan": 35459, + "ivari": 35460, + "finger": 35461, + "Ġreluctantly": 35462, + "etting": 35463, + "ĠVand": 35464, + "young": 35465, + "ĠGarland": 35466, + "Ġpresumption": 35467, + "Ġamenities": 35468, + "ĠPleasant": 35469, + "onential": 35470, + "ĠOxy": 35471, + "Ġmorals": 35472, + "ĠYah": 35473, + "Ready": 35474, + "Simon": 35475, + "Enh": 35476, + "Demon": 35477, + "Ġclich": 35478, + "Monitor": 35479, + "ĠDU": 35480, + "Ġwelcomes": 35481, + "Ġstandout": 35482, + "Ġdreadful": 35483, + "Ġbananas": 35484, + "Ġballoons": 35485, + "hooting": 35486, + "basic": 35487, + "Ġsuffix": 35488, + "Ġduly": 35489, + "cano": 35490, + "Chain": 35491, + "atos": 35492, + "Ġgeopolitical": 35493, + "Ġ(&": 35494, + "ĠGemini": 35495, + "ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ": 35496, + "Ġacquitted": 35497, + "Luck": 35498, + "protect": 35499, + "1024": 35500, + "Ġscarcity": 35501, + "Ġmindfulness": 35502, + "ecided": 35503, + "DN": 35504, + "prime": 35505, + "ĠPresidents": 35506, + "ĠVIDEO": 35507, + "Ġ(âĪĴ": 35508, + "addock": 35509, + "NOR": 35510, + "ĠPru": 35511, + "pun": 35512, + "ĠLOL": 35513, + "))))": 35514, + "ĠLiqu": 35515, + "ĠSAS": 35516, + "Ġstyling": 35517, + "Ġpunishments": 35518, + "Ġnumb": 35519, + "Ġascertain": 35520, + "ĠRockies": 35521, + "flu": 35522, + "Thumbnail": 35523, + "Ġperpetrated": 35524, + "ĠSemi": 35525, + "Ġdisarm": 35526, + "ĠOlder": 35527, + "ĠException": 35528, + "Ġexponentially": 35529, + "ĠCommunities": 35530, + "Ġabolish": 35531, + "ĠPartner": 35532, + "ptoms": 35533, + "Ġ777": 35534, + "ĠFoley": 35535, + "ĠCases": 35536, + "Ġgrease": 35537, + "ĠRebirth": 35538, + "Ground": 35539, + "Ġ;)": 35540, + "ĠDoctrine": 35541, + "ikini": 35542, + "Ye": 35543, + "ĠBlossom": 35544, + "Ġpersists": 35545, + "bill": 35546, + "Ġinfusion": 35547, + "Ġbuddies": 35548, + "911": 35549, + "ĠPatient": 35550, + "Ġdemos": 35551, + "Ġacquaintance": 35552, + "ĠPaw": 35553, + "atari": 35554, + "Ġxml": 35555, + "Ġfascination": 35556, + "ĠServe": 35557, + "ÏĤ": 35558, + "branded": 35559, + "Ġaz": 35560, + "Returns": 35561, + "Ġovershadow": 35562, + "Ġroam": 35563, + "Ġspeedy": 35564, + "numbered": 35565, + "helial": 35566, + "Ġdisciple": 35567, + "Ġassurances": 35568, + "given": 35569, + "pecting": 35570, + "ĠNatalie": 35571, + "çĶ°": 35572, + "Ġmosquitoes": 35573, + "rotein": 35574, + "Ġnumeric": 35575, + "Ġindependents": 35576, + "Ġtransitional": 35577, + "Ġreactionary": 35578, + "ĠMechdragon": 35579, + "doctor": 35580, + "Ġshortest": 35581, + "Ġsequential": 35582, + "ĠBac": 35583, + "ĠAccounts": 35584, + "ãģĮ": 35585, + "achy": 35586, + "ractive": 35587, + "ĠRegiment": 35588, + "Ġbreathtaking": 35589, + "fficiency": 35590, + "ĠBates": 35591, + "Ġ311": 35592, + "Ġwardrobe": 35593, + "fts": 35594, + "ĠBerk": 35595, + "Simply": 35596, + "ĠRiverside": 35597, + "ivering": 35598, + "idential": 35599, + "lucent": 35600, + "Ġenriched": 35601, + "ĠConver": 35602, + "ĠGiving": 35603, + "ãĥĻ": 35604, + "Ġlegalize": 35605, + "ĠFTC": 35606, + "Ġfreaking": 35607, + "Mix": 35608, + "Ġterrestrial": 35609, + "esian": 35610, + "cients": 35611, + "Wing": 35612, + "LOAD": 35613, + "Ġledge": 35614, + "ĠViolent": 35615, + "ĠMetall": 35616, + "Ġ308": 35617, + "Ġsoutheastern": 35618, + "hetto": 35619, + "Meat": 35620, + "Ġslowdown": 35621, + "Ġretreated": 35622, + "Jeremy": 35623, + "endas": 35624, + "*****": 35625, + "eric": 35626, + "Ġreins": 35627, + "oppable": 35628, + "ĠHumanity": 35629, + "earances": 35630, + "rigan": 35631, + "Camera": 35632, + "Ġwaivers": 35633, + "soc": 35634, + "Ġalteration": 35635, + "transform": 35636, + "ĠCemetery": 35637, + "506": 35638, + "Ġindefinite": 35639, + "Ġstimulating": 35640, + "yg": 35641, + "603": 35642, + "ĠSop": 35643, + "Ġdescriptive": 35644, + "Phase": 35645, + "ĠEdmund": 35646, + "Ġpneumonia": 35647, + "ventus": 35648, + "Amb": 35649, + "Ġlaboratories": 35650, + "ĠExclusive": 35651, + "ugar": 35652, + "Were": 35653, + "Ġmalfunction": 35654, + "Ġhomosexuals": 35655, + "Ġ-------": 35656, + "uni": 35657, + "Ġturbines": 35658, + "ĠEquity": 35659, + "Du": 35660, + "Ġminded": 35661, + "ĠRH": 35662, + "ĠBlackhawks": 35663, + "Ġfeats": 35664, + "Ġ1700": 35665, + "repl": 35666, + "362": 35667, + "laden": 35668, + "Ġindispensable": 35669, + "lyss": 35670, + "tti": 35671, + "Ġreel": 35672, + "Ġdiverted": 35673, + "Ġlikeness": 35674, + "Ġsubscriptions": 35675, + "Ġfingert": 35676, + "Ġfilthy": 35677, + "destruct": 35678, + "draft": 35679, + "ĠBernardino": 35680, + "launch": 35681, + "Ġperplex": 35682, + "ĠSUM": 35683, + "carb": 35684, + "Ġsweater": 35685, + "ĠVenture": 35686, + "ĠJag": 35687, + "ĠCeleb": 35688, + "ĠVoters": 35689, + "Ġsteadfast": 35690, + "Ġathletics": 35691, + "ĠHanson": 35692, + "ĠDrac": 35693, + "Tracker": 35694, + "Ġcommend": 35695, + "ĠPresidency": 35696, + "ĠDID": 35697, + "informed": 35698, + "Ġwebpage": 35699, + "Pretty": 35700, + "Ġforcefully": 35701, + "ãĥĥãĤ¯": 35702, + "Ġrelocation": 35703, + "Ġsatire": 35704, + "âī": 35705, + "ĠSunderland": 35706, + "æĦ": 35707, + "Voice": 35708, + "????????": 35709, + "Ġinformant": 35710, + "Ġbowel": 35711, + "ĠUniform": 35712, + "Ġ...\"": 35713, + "Ġpurge": 35714, + "Ġpicnic": 35715, + "ĠUmb": 35716, + "ĠUPDATE": 35717, + "ĠSapphire": 35718, + "ĠStall": 35719, + "learn": 35720, + "Ġobjectively": 35721, + "Ġobliter": 35722, + "Ġloophole": 35723, + "Ġjourneys": 35724, + "Ġomission": 35725, + "Pros": 35726, + "ĠSidney": 35727, + "ploma": 35728, + "Ġsprayed": 35729, + "Ġguru": 35730, + "Ġtraitor": 35731, + "Ġtimet": 35732, + "Ġsnapping": 35733, + "ĠSevent": 35734, + "urnal": 35735, + "ĠUkip": 35736, + "Ġbowed": 35737, + "poral": 35738, + "liberal": 35739, + "Ros": 35740, + "Questions": 35741, + "iOS": 35742, + "Ġsummarize": 35743, + "STAT": 35744, + "Ġ1850": 35745, + "apest": 35746, + "Ġlender": 35747, + "ĠVariable": 35748, + "bringing": 35749, + "ĠLORD": 35750, + ",)": 35751, + "Ġcollapses": 35752, + "xiety": 35753, + "ĠNed": 35754, + "YD": 35755, + "ĠScha": 35756, + "Ġantibody": 35757, + "Ġdisband": 35758, + "yre": 35759, + "illusion": 35760, + "Ġrover": 35761, + "shed": 35762, + "ĠHirosh": 35763, + "cci": 35764, + "Ġcalam": 35765, + "ĠMorton": 35766, + "Pinterest": 35767, + "Ġ1928": 35768, + "ĠEuras": 35769, + "ordes": 35770, + "Ġfences": 35771, + "ĠInventory": 35772, + "ĠValencia": 35773, + "ĠUd": 35774, + "ĠTiff": 35775, + "Ġsque": 35776, + "Ġquotation": 35777, + "Ġtroublesome": 35778, + "erker": 35779, + "QUEST": 35780, + "ĠKingdoms": 35781, + "south": 35782, + "Ġlevy": 35783, + "Prince": 35784, + "ĠSting": 35785, + "Ġnicknamed": 35786, + "Ġappe": 35787, + "Ġphotographic": 35788, + "Ġcorpus": 35789, + "reference": 35790, + "ĠTrog": 35791, + "Unt": 35792, + ")=(": 35793, + "ĠLatvia": 35794, + "Ġactivating": 35795, + "Ġlicensee": 35796, + "Ġdisparities": 35797, + "ĠNewsletter": 35798, + "ãĥĥãĥĪ": 35799, + "Ġfreeing": 35800, + "ĠJeep": 35801, + "ĠPerception": 35802, + "insk": 35803, + "Ġsilicone": 35804, + "ĠHayden": 35805, + "Lean": 35806, + "ĠSuzuki": 35807, + "ibrarian": 35808, + "668": 35809, + "Ġspor": 35810, + "Ġcorrelations": 35811, + "aghetti": 35812, + "Ġtuber": 35813, + "ĠIPCC": 35814, + "ilus": 35815, + "ĠVu": 35816, + "Ġwealthiest": 35817, + "ĠCarbuncle": 35818, + "anza": 35819, + "Ġfooled": 35820, + "ĠZur": 35821, + "Ġdaddy": 35822, + "rano": 35823, + "ilian": 35824, + "Ġknockout": 35825, + "fman": 35826, + "required": 35827, + "ĠWikileaks": 35828, + "ĠDuffy": 35829, + "ONT": 35830, + "Ġinsol": 35831, + "ĠObjects": 35832, + "Ġbou": 35833, + "ĠNordic": 35834, + "ĠInsert": 35835, + "scan": 35836, + "Ġdancers": 35837, + "Ġidiots": 35838, + "majority": 35839, + "ĠNeville": 35840, + "ĠFreeBSD": 35841, + "Ġtart": 35842, + "panic": 35843, + "690": 35844, + "Ġcocoa": 35845, + "Ġsampled": 35846, + "Ġlookup": 35847, + "Indust": 35848, + "Ġinjections": 35849, + "genre": 35850, + "Ġau": 35851, + "Ġroadway": 35852, + "Ġgenitals": 35853, + "Kind": 35854, + "ĠExaminer": 35855, + "ĠYaz": 35856, + "Fresh": 35857, + "Ġparalysis": 35858, + "ĠAluminum": 35859, + "Ġreap": 35860, + "oké": 35861, + "Ġsloppy": 35862, + "ĠTunnel": 35863, + "posium": 35864, + "nery": 35865, + "enic": 35866, + "Ġherbal": 35867, + "ĠOuter": 35868, + "ĠBuilder": 35869, + "Ġincur": 35870, + "Ġideologies": 35871, + "Ġbackups": 35872, + "consuming": 35873, + "ĠDetect": 35874, + "deck": 35875, + "ĠKNOW": 35876, + "ĠGret": 35877, + "ĠMIC": 35878, + "Ġtoughness": 35879, + "ĠExhibit": 35880, + "Ġhive": 35881, + "Les": 35882, + "ĠSCHOOL": 35883, + "ĠAtari": 35884, + "alde": 35885, + "ĠNull": 35886, + "andestine": 35887, + "mouse": 35888, + "Ġbrigade": 35889, + "489": 35890, + "Ġrevol": 35891, + "ĠLawson": 35892, + "ĠWah": 35893, + "opoly": 35894, + "ebted": 35895, + "ĠSaunders": 35896, + "Ġ313": 35897, + "ĠWinc": 35898, + "Ġtaboo": 35899, + "ĠHelmet": 35900, + "Ġwedge": 35901, + "chip": 35902, + "ĠTina": 35903, + "bg": 35904, + "Ġinfuri": 35905, + "rn": 35906, + "Ġanomalies": 35907, + "ĠSync": 35908, + "ĠExam": 35909, + "ĠCommit": 35910, + "ĠDiary": 35911, + "ĠALSO": 35912, + "ĠDebor": 35913, + "omedical": 35914, + "Ġcomprehension": 35915, + "655": 35916, + "Ġempowering": 35917, + "Ġire": 35918, + "Ġjuices": 35919, + "ĠETH": 35920, + "ĠBoxing": 35921, + "=\"/": 35922, + "Ġfacilitated": 35923, + "poke": 35924, + "ĠParsons": 35925, + "ĠModer": 35926, + "travel": 35927, + "Ġcivilizations": 35928, + "Ġlibertarians": 35929, + "Ġrune": 35930, + "ĠClarks": 35931, + "athed": 35932, + "Ġcampaigners": 35933, + "ĠDispatch": 35934, + "ĠFahrenheit": 35935, + "ĠCapcom": 35936, + "----------": 35937, + "Ġlace": 35938, + "Ġdraining": 35939, + "Ġliner": 35940, + "ĠArtificial": 35941, + "én": 35942, + "task": 35943, + "]).": 35944, + "ĠGMO": 35945, + "ĠOperator": 35946, + "ordinary": 35947, + "ĠInfluence": 35948, + "ĠUps": 35949, + "Ġpotency": 35950, + "ussen": 35951, + "ospons": 35952, + "ĠSwim": 35953, + "ĠDeadline": 35954, + "Unity": 35955, + "Ġculinary": 35956, + "Ġenlightenment": 35957, + "Ġwearer": 35958, + "Ġmined": 35959, + "Ġply": 35960, + "Ġincest": 35961, + "ĠDVDs": 35962, + "Walk": 35963, + "BTC": 35964, + "Trade": 35965, + "Ġdeval": 35966, + "iband": 35967, + "ĠOversight": 35968, + "Palestinian": 35969, + "Ġdart": 35970, + "Ġmul": 35971, + "LR": 35972, + "Ġremovable": 35973, + "ĠRealms": 35974, + "ìĿ": 35975, + "Ġmiscar": 35976, + "ĠVulkan": 35977, + "685": 35978, + "ère": 35979, + "ĠSap": 35980, + "Ġmerging": 35981, + "ĠCarly": 35982, + "chester": 35983, + "Ġbrisk": 35984, + "Ġluxurious": 35985, + "ĠGenerator": 35986, + "Ġbitterness": 35987, + "Ġedible": 35988, + "Ġ243": 35989, + "TG": 35990, + "Ġrectangle": 35991, + "WithNo": 35992, + "below": 35993, + "Jenn": 35994, + "Ġdarkest": 35995, + "Ġhitch": 35996, + "Ġdosage": 35997, + "Ġscaven": 35998, + "ĠKeller": 35999, + "ĠIllustrated": 36000, + "Certainly": 36001, + "ĠMavericks": 36002, + "Marginal": 36003, + "Ġdiarrhea": 36004, + "Ġenormously": 36005, + "Ġ999": 36006, + "shr": 36007, + "quart": 36008, + "Ġadamant": 36009, + "ĠMew": 36010, + "Ġrenovation": 36011, + "Ġcervical": 36012, + "ĠPercentage": 36013, + "eners": 36014, + "ĠKimber": 36015, + "Ġfloats": 36016, + "Ġdex": 36017, + "ĠWitcher": 36018, + "ĠSwansea": 36019, + "dm": 36020, + "Ġsalty": 36021, + "yellow": 36022, + "Ġcape": 36023, + "ĠDrain": 36024, + "ĠPaula": 36025, + "ĠToledo": 36026, + "lesi": 36027, + "Magazine": 36028, + "ĠWick": 36029, + "ĠMn": 36030, + "ĠAck": 36031, + "ĠRiding": 36032, + "ASON": 36033, + "Ġhomophobic": 36034, + "ARP": 36035, + "Ġwandered": 36036, + "CPU": 36037, + "oodoo": 36038, + "ĠPipe": 36039, + "Ġtightening": 36040, + "ĠButt": 36041, + "318": 36042, + "Ġdeserted": 36043, + "Session": 36044, + "Ġfacilitating": 36045, + "Jump": 36046, + "Ġemergencies": 36047, + "OWER": 36048, + "Ġexhaustive": 36049, + "ĠAFTER": 36050, + "Ġheartbeat": 36051, + "ĠLabel": 36052, + "acky": 36053, + "ĠCertified": 36054, + "iltration": 36055, + "Ze": 36056, + "ĠUtt": 36057, + "Ġ1300": 36058, + "Ġpresume": 36059, + "ĠDisp": 36060, + "Ġsurged": 36061, + "Ġdolls": 36062, + "Columb": 36063, + "Ġchimpan": 36064, + "ĠRazor": 36065, + "Ġticks": 36066, + "Ġcouncillor": 36067, + "Ġpilgrimage": 36068, + "ĠRebels": 36069, + "ĠQC": 36070, + "ĠAuction": 36071, + "xia": 36072, + "ikk": 36073, + "bred": 36074, + "Ġinsertion": 36075, + "Ġcoarse": 36076, + "dB": 36077, + "SEE": 36078, + "ĠZap": 36079, + "ĠFoo": 36080, + "Ġcontempor": 36081, + "ĠQuarterly": 36082, + "otions": 36083, + "ĠAlchemist": 36084, + "ĠTrey": 36085, + "ĠDuo": 36086, + "Sweet": 36087, + "804": 36088, + "ĠGiov": 36089, + "Ġfunn": 36090, + "Nin": 36091, + "hoff": 36092, + "Ġramifications": 36093, + "Ġ1922": 36094, + "ĠExperts": 36095, + "azes": 36096, + "Ġgarments": 36097, + "arial": 36098, + "ĠNab": 36099, + "Ġ257": 36100, + "ĠVed": 36101, + "Ġhumorous": 36102, + "ĠPompe": 36103, + "Ġnylon": 36104, + "Ġlurking": 36105, + "ĠSergey": 36106, + "ĠMattis": 36107, + "Ġmisogyny": 36108, + "ĠComponents": 36109, + "ĠWatching": 36110, + "ĠFolk": 36111, + "ractical": 36112, + "Bush": 36113, + "Ġtaped": 36114, + "Ġgrouping": 36115, + "Ġbeads": 36116, + "Ġ2048": 36117, + "Ġcondu": 36118, + "querque": 36119, + "Reading": 36120, + "Ġgrievances": 36121, + "Ultra": 36122, + "Ġendpoint": 36123, + "Hig": 36124, + "ĠStatic": 36125, + "ĠScarborough": 36126, + "Lua": 36127, + "ĠMessi": 36128, + "aqu": 36129, + "ĠPsyNet": 36130, + "ĠRudd": 36131, + "Ġavenue": 36132, + "vp": 36133, + "Jer": 36134, + "Ġshady": 36135, + "ĠResist": 36136, + "ĠArtemis": 36137, + "Ġcareless": 36138, + "Ġbrokers": 36139, + "Ġtemperament": 36140, + "Ġ520": 36141, + "Tags": 36142, + "ĠTurning": 36143, + "Ġuttered": 36144, + "Ġpedd": 36145, + "Ġimprovised": 36146, + "Ġ:(": 36147, + "Ġtabl": 36148, + "Ġplains": 36149, + "1600": 36150, + "pressure": 36151, + "ĠEssence": 36152, + "margin": 36153, + "friends": 36154, + "ĠRestoration": 36155, + "Ġpollut": 36156, + "ĠPoker": 36157, + "ĠAugustine": 36158, + "ĠCIS": 36159, + "ĠSEAL": 36160, + "orama": 36161, + "Ġthwart": 36162, + "seek": 36163, + "Ġpagan": 36164, + "º": 36165, + "cpu": 36166, + "Ġgarn": 36167, + "Ġassortment": 36168, + "ĠILCS": 36169, + "tower": 36170, + "Recommended": 36171, + "Ġunborn": 36172, + "ĠRandomRedditor": 36173, + "ĠRandomRedditorWithNo": 36174, + "Ġparalyzed": 36175, + "Ġeruption": 36176, + "Ġintersect": 36177, + "ĠStoke": 36178, + "ĠSco": 36179, + "Bind": 36180, + "å¾": 36181, + "ĠPNG": 36182, + "ĠNegative": 36183, + "ĠNOAA": 36184, + "Leon": 36185, + "Ġalloy": 36186, + "ĠLama": 36187, + "ĠDiversity": 36188, + "575": 36189, + "Ġunderestimated": 36190, + "ĠScor": 36191, + "Ġmural": 36192, + "Ġbusted": 36193, + "soon": 36194, + "lif": 36195, + "Ġnonex": 36196, + "Ġallergy": 36197, + "ĠUnderworld": 36198, + "ĠRays": 36199, + "ĠBlasio": 36200, + "Ġhrs": 36201, + "ĠDir": 36202, + "Ġ327": 36203, + "byter": 36204, + "Ġreplacements": 36205, + "Ġactivates": 36206, + "rived": 36207, + "MH": 36208, + "Ġpans": 36209, + "ĠHI": 36210, + "Ġlongitudinal": 36211, + "Ġnuisance": 36212, + "aler": 36213, + "Ġswell": 36214, + "ĠSigned": 36215, + "sci": 36216, + "ĠIsles": 36217, + "ĠAGA": 36218, + "Ġdefiant": 36219, + "Ġsonic": 36220, + "ocon": 36221, + "KC": 36222, + "ĠAim": 36223, + "tie": 36224, + "ahah": 36225, + "ĠmL": 36226, + "DX": 36227, + "Ġbisc": 36228, + "ĠBillboard": 36229, + "ĠSYSTEM": 36230, + "NEY": 36231, + "gaard": 36232, + "Ġdistressed": 36233, + "formerly": 36234, + "Alan": 36235, + "Ġchefs": 36236, + "Ġoptics": 36237, + "ĠComet": 36238, + "ĠAMC": 36239, + "Ġredesigned": 36240, + "irmation": 36241, + "Ġsightings": 36242, + "382": 36243, + "311": 36244, + "ĠWB": 36245, + "Ġcontraction": 36246, + "ĠTOTAL": 36247, + "Dual": 36248, + "Ġstartled": 36249, + "Ġunderstandably": 36250, + "Ġsunglasses": 36251, + "ETHOD": 36252, + "Ġdocker": 36253, + "Ġsurfing": 36254, + "ĠHEL": 36255, + "ĠSlack": 36256, + "tones": 36257, + "Ġshalt": 36258, + "Visual": 36259, + "498": 36260, + "Department": 36261, + "cussion": 36262, + "Ġunrestricted": 36263, + "Ġtad": 36264, + "Ġrename": 36265, + "employed": 36266, + "Ġeducating": 36267, + "Ġgrinned": 36268, + "bedroom": 36269, + "ĠActivities": 36270, + "ĠVelvet": 36271, + "ĠSWAT": 36272, + "Ġshuffle": 36273, + "igor": 36274, + "Ġsaturation": 36275, + "Finding": 36276, + "cream": 36277, + "icter": 36278, + "Ġvodka": 36279, + "tracking": 36280, + "tec": 36281, + "Ġforeground": 36282, + "iesta": 36283, + "Ġvehement": 36284, + "ĠECB": 36285, + "ĠTie": 36286, + "Ey": 36287, + "Ġturtles": 36288, + "ĠRailroad": 36289, + "ĠKatz": 36290, + "ĠFrames": 36291, + "Ġmenace": 36292, + "ĠFellowship": 36293, + "ĠEssential": 36294, + "uggish": 36295, + "Ġdrip": 36296, + "chwitz": 36297, + "ĠKyoto": 36298, + "sb": 36299, + "ĠNina": 36300, + "Parameter": 36301, + "Ġalarms": 36302, + "ĠClaud": 36303, + "Ġpioneering": 36304, + "Ġchiefly": 36305, + "ĠScream": 36306, + "Collection": 36307, + "Ġthankfully": 36308, + "ĠRonaldo": 36309, + "åŃIJ": 36310, + "strip": 36311, + "ĠDisneyland": 36312, + "commercial": 36313, + "Seeing": 36314, + "Soul": 36315, + "Ġevacuate": 36316, + "Ġciv": 36317, + "ĠAshe": 36318, + "Ġdivides": 36319, + "ĠDagger": 36320, + "rehensive": 36321, + "Ġberries": 36322, + "ĠDF": 36323, + "Ġsushi": 36324, + "Ġplurality": 36325, + "WI": 36326, + "Ġdisadvantaged": 36327, + "Ġbattalion": 36328, + "obiles": 36329, + "451": 36330, + "Ġcling": 36331, + "Ġundeniable": 36332, + "ĠLounge": 36333, + "Ġhaunt": 36334, + "phe": 36335, + "Ġquantify": 36336, + "Ġdiffered": 36337, + "Ġ[*]": 36338, + "ĠViz": 36339, + "cum": 36340, + "slave": 36341, + "Ġvideog": 36342, + "Ġquar": 36343, + "Ġbundles": 36344, + "ĠAlonso": 36345, + "tackle": 36346, + "Ġneuronal": 36347, + "Ġlandslide": 36348, + "confirmed": 36349, + "ĠDepth": 36350, + "Ġrenewables": 36351, + "Bear": 36352, + "ĠMacedonia": 36353, + "Ġjerseys": 36354, + "Ġbunk": 36355, + "ĠSpawn": 36356, + "ĠControls": 36357, + "ĠBuchanan": 36358, + "Ġrobotics": 36359, + "Ġemphasizing": 36360, + "ĠTutorial": 36361, + "hyp": 36362, + "iston": 36363, + "Ġmonumental": 36364, + "æ°": 36365, + "ĠCarry": 36366, + "Ġtbsp": 36367, + "enance": 36368, + "Hill": 36369, + "arthed": 36370, + "Ġrotten": 36371, + "Dean": 36372, + "Ġtwisting": 36373, + "Ġgoodwill": 36374, + "Ġimmersion": 36375, + "Living": 36376, + "Ġbrushes": 36377, + "ĠCGI": 36378, + "ĠAtk": 36379, + "traditional": 36380, + "Ġphantom": 36381, + "ĠStamina": 36382, + "Ġexpansions": 36383, + "ĠMarin": 36384, + "Ġembarked": 36385, + "ĠEg": 36386, + "intestinal": 36387, + "ĠPEOPLE": 36388, + "ĠBooth": 36389, + "ĠAppalach": 36390, + "Ġrelegated": 36391, + "VT": 36392, + "MIT": 36393, + "Ġmuster": 36394, + "Ġwithdrawing": 36395, + "Ġmicroscope": 36396, + "ĠGathering": 36397, + "ĠCrescent": 36398, + "ĠArgentine": 36399, + "ĠDecre": 36400, + "ĠDominic": 36401, + "Ġbuds": 36402, + "antage": 36403, + "ĠIon": 36404, + "Ġwidened": 36405, + "ONSORED": 36406, + "ĠGloves": 36407, + "iannopoulos": 36408, + "razen": 36409, + "feel": 36410, + "Ġrepayment": 36411, + "Ġhindsight": 36412, + "ĠREALLY": 36413, + "ĠPistol": 36414, + "ĠBrah": 36415, + "Ġwatts": 36416, + "Ġsurvives": 36417, + "Ġflurry": 36418, + "issy": 36419, + "Alert": 36420, + "ĠUruguay": 36421, + "Phoenix": 36422, + "Slow": 36423, + "ĠGrave": 36424, + "ĠFir": 36425, + "Ġmanageable": 36426, + "Ġtariff": 36427, + "ĠUDP": 36428, + "ĠPistons": 36429, + "ĠNigerian": 36430, + "Ġstrikeouts": 36431, + "Ġcosmetics": 36432, + "whelming": 36433, + "fab": 36434, + "cape": 36435, + "proxy": 36436, + "Ġrethink": 36437, + "Ġovercoming": 36438, + "simple": 36439, + "Ġwoo": 36440, + "Ġdistracting": 36441, + "ĠStanton": 36442, + "ĠTulsa": 36443, + "ĠDock": 36444, + "659": 36445, + "Ġdiscord": 36446, + "ĠEmacs": 36447, + "ĠVes": 36448, + "ĠROB": 36449, + "Ġreassuring": 36450, + "Ġconsortium": 36451, + "Muslims": 36452, + "321": 36453, + "Ġprompts": 36454, + "sei": 36455, + "ĠHitch": 36456, + "imposed": 36457, + "ĠFool": 36458, + "Ġindiscrim": 36459, + "wrong": 36460, + "buquerque": 36461, + "Davis": 36462, + "!]": 36463, + "Ġtimeless": 36464, + "ĠNEED": 36465, + "Ġpesticide": 36466, + "Ġrallying": 36467, + "ĠCalder": 36468, + "Ġå¤": 36469, + "Ġxp": 36470, + "ĠUnle": 36471, + "ĠExport": 36472, + "luaj": 36473, + "Buff": 36474, + ")[": 36937, + "Ġsqor": 36938, + "Saudi": 36939, + "Ġistg": 36940, + "Ġindulge": 36941, + "proc": 36942, + "Ġdisgusted": 36943, + "Ġcompounded": 36944, + "Ġnem": 36945, + "Ġschooling": 36946, + "ĠCure": 36947, + "processing": 36948, + "Sol": 36949, + "Ġproverb": 36950, + "itized": 36951, + "ĠAlvarez": 36952, + "Ġscarf": 36953, + "Ġrectangular": 36954, + "reve": 36955, + "Ġhormonal": 36956, + "ĠStress": 36957, + "itizen": 36958, + "Ġ425": 36959, + "girls": 36960, + "ĠNoir": 36961, + "ĠRapp": 36962, + "Ġmarches": 36963, + "church": 36964, + "ĠUses": 36965, + "Ġ405": 36966, + "ĠBerm": 36967, + "Ġordinances": 36968, + "ĠJudgment": 36969, + "Charges": 36970, + "ĠZin": 36971, + "Ġdusty": 36972, + "Ġstrawberries": 36973, + "Ġperce": 36974, + "ĠThur": 36975, + "ĠDeborah": 36976, + "netflix": 36977, + "ĠLambert": 36978, + "Ġamused": 36979, + "ĠGuang": 36980, + "YOU": 36981, + "RGB": 36982, + "ĠCCTV": 36983, + "Ġfiat": 36984, + "rang": 36985, + "Ġfederation": 36986, + "ĠMant": 36987, + "ĠBust": 36988, + "ĠMare": 36989, + "respective": 36990, + "ĠMigration": 36991, + "ĠBIT": 36992, + "590": 36993, + "Ġpatriotism": 36994, + "Ġoutlining": 36995, + "region": 36996, + "ĠJosé": 36997, + "Ġblasting": 36998, + "ĠEzra": 36999, + "Bs": 37000, + "Ġundermines": 37001, + "ĠSmooth": 37002, + "Ġclashed": 37003, + "radio": 37004, + "Ġtransitioning": 37005, + "ĠBuccaneers": 37006, + "ĠOwl": 37007, + "Ġplugs": 37008, + "Ġhiatus": 37009, + "ĠPinball": 37010, + "Ġmig": 37011, + "ĠNutr": 37012, + "ĠWolfe": 37013, + "Ġintegers": 37014, + "Ġorbits": 37015, + "ĠEdwin": 37016, + "ĠDirectX": 37017, + "bite": 37018, + "Ġblazing": 37019, + "vr": 37020, + "Edge": 37021, + "ĠPID": 37022, + "exit": 37023, + "ĠComed": 37024, + "ĠPathfinder": 37025, + "ĠGuid": 37026, + "ĠSigns": 37027, + "ĠZer": 37028, + "ĠAgenda": 37029, + "Ġreimbursement": 37030, + "Mesh": 37031, + "iPhone": 37032, + "ĠMarcos": 37033, + "ĠSites": 37034, + "hate": 37035, + "enburg": 37036, + "Ġsockets": 37037, + "pend": 37038, + "Batman": 37039, + "vir": 37040, + "ĠSHOW": 37041, + "Ġprovisional": 37042, + "conn": 37043, + "ĠDeaths": 37044, + "ATIVE": 37045, + "Profile": 37046, + "sym": 37047, + "JA": 37048, + "Ġninja": 37049, + "installed": 37050, + "idates": 37051, + "ebra": 37052, + "ĠOmaha": 37053, + "Ġseizing": 37054, + "ĠBeasts": 37055, + "Ġsalts": 37056, + "Mission": 37057, + "Generally": 37058, + "ĠTrilogy": 37059, + "heon": 37060, + "legates": 37061, + "Ġdime": 37062, + "Ġfaire": 37063, + "parable": 37064, + "Graph": 37065, + "Ġtotaling": 37066, + "Ġdiagrams": 37067, + "ĠYanuk": 37068, + "plet": 37069, + "ĠMeh": 37070, + "Ġmythical": 37071, + "ĠStephens": 37072, + "autical": 37073, + "ochemistry": 37074, + "Ġkilograms": 37075, + "Ġelbows": 37076, + "ancock": 37077, + "ĠBCE": 37078, + "ĠPrague": 37079, + "Ġimprov": 37080, + "ĠDevin": 37081, + "Ġ\"\\": 37082, + "paralle": 37083, + "Ġsupremacists": 37084, + "ĠBillion": 37085, + "Ġregimen": 37086, + "innacle": 37087, + "Ġrequisite": 37088, + "angan": 37089, + "ĠBurlington": 37090, + "ainment": 37091, + "ĠObjective": 37092, + "omsky": 37093, + "GV": 37094, + "Ġunilateral": 37095, + "Ġtc": 37096, + "Ġhires": 37097, + "mental": 37098, + "Ġinvoluntary": 37099, + "Ġtranspl": 37100, + "ĠASCII": 37101, + "¨": 37102, + "Events": 37103, + "Ġdoubted": 37104, + "ĠKaplan": 37105, + "ĠCourage": 37106, + "igon": 37107, + "ĠManaging": 37108, + "ĠTart": 37109, + "Ġfalsehood": 37110, + "ĠViolet": 37111, + "Ġairs": 37112, + "Ġfertilizer": 37113, + "Britain": 37114, + "Ġaquatic": 37115, + "ouf": 37116, + "Words": 37117, + "ĠHartford": 37118, + "Ġevenings": 37119, + "ĠVengeance": 37120, + "quite": 37121, + "Gall": 37122, + "ĠPret": 37123, + "Ġpdf": 37124, + "ĠLM": 37125, + "ĠSochi": 37126, + "ĠIntercept": 37127, + "920": 37128, + "Ġprofitability": 37129, + "ĠIdle": 37130, + "ĠMacDonald": 37131, + "ĠEstablishment": 37132, + "umsy": 37133, + "Ġgatherings": 37134, + "ĠNaj": 37135, + "Charlie": 37136, + "Ġascent": 37137, + "ĠProtector": 37138, + "Ġalgebra": 37139, + "Ġbios": 37140, + "forums": 37141, + "ELS": 37142, + "Introduced": 37143, + "Ġ335": 37144, + "Ġastronomy": 37145, + "Contribut": 37146, + "ĠPolic": 37147, + "Platform": 37148, + "Ġcontainment": 37149, + "wrap": 37150, + "Ġcoronary": 37151, + "ĠJelly": 37152, + "manager": 37153, + "Ġheartbreaking": 37154, + "cair": 37155, + "ĠChero": 37156, + "cgi": 37157, + "Medical": 37158, + "ĠAccountability": 37159, + "!!\"": 37160, + "ophile": 37161, + "Ġpsychotic": 37162, + "ĠRestrict": 37163, + "Ġequitable": 37164, + "issues": 37165, + "Ġ1905": 37166, + "ĠNek": 37167, + "cised": 37168, + "ĠTracking": 37169, + "Ġozone": 37170, + "Ġcooker": 37171, + "rosis": 37172, + "Ġreopen": 37173, + "Ġinfinity": 37174, + "ĠPharmaceutical": 37175, + "ensional": 37176, + "Attempt": 37177, + "ĠRory": 37178, + "Marco": 37179, + "Ġawaits": 37180, + "HOW": 37181, + "treated": 37182, + "Ġbolst": 37183, + "Ġrevered": 37184, + "Ġpods": 37185, + "oppers": 37186, + "0010": 37187, + "Ġamplitude": 37188, + "rican": 37189, + "SPONSORED": 37190, + "Ġtrousers": 37191, + "Ġhalves": 37192, + "ĠKaine": 37193, + "ĠCutler": 37194, + "ĠAUTH": 37195, + "Ġsplendid": 37196, + "Ġpreventive": 37197, + "ĠDudley": 37198, + "ifacts": 37199, + "uminati": 37200, + "ĠYin": 37201, + "Ġadmon": 37202, + "ĠVag": 37203, + "Ġinverted": 37204, + "Ġhastily": 37205, + "ĠHague": 37206, + "Lyn": 37207, + "Ġledger": 37208, + "Ġastronomical": 37209, + "getting": 37210, + "Ġcirca": 37211, + "ĠCic": 37212, + "ĠTennis": 37213, + "Limited": 37214, + "Ġdru": 37215, + "ĠBYU": 37216, + "Ġtravellers": 37217, + "Ġpane": 37218, + "ĠIntro": 37219, + "Ġpatiently": 37220, + "Ġaiding": 37221, + "Ġloos": 37222, + "ĠTough": 37223, + "Ġ293": 37224, + "Ġconsumes": 37225, + "SourceFile": 37226, + "Ġ\"\"\"": 37227, + "Ġbonding": 37228, + "Ġtilted": 37229, + "Ġmenstrual": 37230, + "ĠCelestial": 37231, + "ULAR": 37232, + "Plugin": 37233, + "Ġrisking": 37234, + "Naz": 37235, + "ĠRiyadh": 37236, + "Ġaccredited": 37237, + "Ġskirm": 37238, + "éĽ": 37239, + "Ġexaminer": 37240, + "Ġmessing": 37241, + "Ġnearing": 37242, + "ĠChern": 37243, + "ĠBeckham": 37244, + "Ġswapped": 37245, + "Ġgoose": 37246, + "Kay": 37247, + "Ġlofty": 37248, + "ĠWallet": 37249, + "Ġ['": 37250, + "Ġapocalypse": 37251, + "Ġbamboo": 37252, + "ĠSPACE": 37253, + "ĠElena": 37254, + "Ġ306": 37255, + "acons": 37256, + "Ġtightened": 37257, + "Ġadolescence": 37258, + "Ġrainy": 37259, + "Ġvandalism": 37260, + "ĠNewtown": 37261, + "Ġconject": 37262, + "cakes": 37263, + "Ġcheated": 37264, + "Ġmoderators": 37265, + "params": 37266, + "EFF": 37267, + "Ġdeceit": 37268, + "ĠSTL": 37269, + "ĠTanzania": 37270, + "ĠRI": 37271, + "Ġ1923": 37272, + "ĠExile": 37273, + "thel": 37274, + "Ġtheolog": 37275, + "Ġquirky": 37276, + "ĠIrvine": 37277, + "Ġneedy": 37278, + "oris": 37279, + "Um": 37280, + "Ka": 37281, + "Ġmailbox": 37282, + "322": 37283, + "Ġbos": 37284, + "ĠPetra": 37285, + "KING": 37286, + "Ġenlarged": 37287, + "Often": 37288, + "Ġbadass": 37289, + "Ġ343": 37290, + "ĠPlaces": 37291, + "ĠCAD": 37292, + "Ġpristine": 37293, + "Ġintervening": 37294, + "direction": 37295, + "Ġlaz": 37296, + "ĠDSM": 37297, + "Ġprojecting": 37298, + "ĠFunk": 37299, + "agog": 37300, + "payment": 37301, + "nov": 37302, + "Ġchatter": 37303, + "ARB": 37304, + "Ġexaminations": 37305, + "ĠHousehold": 37306, + "ĠGus": 37307, + "Ford": 37308, + "414": 37309, + "Boss": 37310, + "Ġmystic": 37311, + "Ġleaps": 37312, + "ĠBav": 37313, + "ulz": 37314, + "budget": 37315, + "Football": 37316, + "Ġsubsidized": 37317, + "Ġfirsthand": 37318, + "Ġcoincide": 37319, + "ocular": 37320, + "Conn": 37321, + "ĠCollabor": 37322, + "Ġfools": 37323, + "amura": 37324, + "ahar": 37325, + "rists": 37326, + "Ġswollen": 37327, + "Ġexpended": 37328, + "ĠPau": 37329, + "sup": 37330, + "Ġspar": 37331, + "Ġkeynote": 37332, + "suff": 37333, + "Ġunequal": 37334, + "Ġprogressing": 37335, + "strings": 37336, + "ĠGamergate": 37337, + "Disney": 37338, + "ĠEleven": 37339, + "omnia": 37340, + "Ġscripted": 37341, + "Ġearners": 37342, + "brother": 37343, + "ĠEnabled": 37344, + "æ³": 37345, + "Ġlarvae": 37346, + "ĠLOC": 37347, + "mess": 37348, + "Wilson": 37349, + "ĠTemplate": 37350, + "successfully": 37351, + "Ġparamount": 37352, + "Ġcamouflage": 37353, + "Ġbinds": 37354, + "ĠQuiet": 37355, + "ĠShutterstock": 37356, + "rush": 37357, + "Ġmascot": 37358, + "fortune": 37359, + "ĠColt": 37360, + "ĠBeyon": 37361, + "habi": 37362, + "Ġhairc": 37363, + "Ġ267": 37364, + "ĠDeus": 37365, + "Ġtwitch": 37366, + "Ġconcentrating": 37367, + "Ġnipples": 37368, + "cible": 37369, + "Ġgir": 37370, + "NZ": 37371, + "Math": 37372, + "nih": 37373, + "Required": 37374, + "Ġponder": 37375, + "ĠSAN": 37376, + "Ġweddings": 37377, + "Ġloneliness": 37378, + "NES": 37379, + "ĠMahjong": 37380, + "695": 37381, + "addle": 37382, + "ĠGarner": 37383, + "ĠCOUR": 37384, + "Bridge": 37385, + "Ġspree": 37386, + "ĠCaldwell": 37387, + "Ġbribery": 37388, + "Ġ��������": 37389, + "plugins": 37390, + "Ġracket": 37391, + "Ġchampagne": 37392, + "versible": 37393, + "Vote": 37394, + "Ġmodifiers": 37395, + "Mayor": 37396, + "680": 37397, + "Ġassemblies": 37398, + "ĠSultan": 37399, + "ĠNing": 37400, + "ĠLadies": 37401, + "Ġsulfur": 37402, + "Ġorbs": 37403, + "Ġ-----": 37404, + "_______": 37405, + "ĠJournalism": 37406, + "Ġesports": 37407, + "Ġlush": 37408, + "Ġhue": 37409, + "Ġspectral": 37410, + "Honest": 37411, + "ãĥı": 37412, + "Ġbushes": 37413, + "Ġreinforcement": 37414, + "Ġreopened": 37415, + "ĠWheels": 37416, + "ĠMorg": 37417, + "rieving": 37418, + "Ġauxiliary": 37419, + "ĠjQuery": 37420, + "ĠBAT": 37421, + "tesque": 37422, + "Ġvertex": 37423, + "pure": 37424, + "frey": 37425, + "ãĤº": 37426, + "dos": 37427, + "Ġtyph": 37428, + "Ġcull": 37429, + "Ġeq": 37430, + "Ġdecon": 37431, + "Ġtossing": 37432, + "Ġdisparate": 37433, + "ĠBrigham": 37434, + "printf": 37435, + "ledged": 37436, + "Ġsund": 37437, + "Ġcozy": 37438, + "Ġhepatitis": 37439, + "performing": 37440, + "Ġaval": 37441, + "ĠGG": 37442, + "future": 37443, + "Ġpetertodd": 37444, + "ĠKosovo": 37445, + "Ġmagnets": 37446, + "Already": 37447, + "ĠEdison": 37448, + "ĠCeres": 37449, + "ĠRAID": 37450, + "Ġbrilliance": 37451, + "576": 37452, + "Ġderives": 37453, + "Ġhypertension": 37454, + "ĠÎĶ": 37455, + "Ġlambda": 37456, + "Ġflair": 37457, + "Ġmissionaries": 37458, + "Ġrapes": 37459, + "ĠStarter": 37460, + "ĠMonths": 37461, + "Ġdefy": 37462, + "Ġseismic": 37463, + "ĠRaphael": 37464, + "Ġeurozone": 37465, + "656": 37466, + "zsche": 37467, + "Ġscratched": 37468, + "Ġbows": 37469, + "ĠLennon": 37470, + "ĠGaia": 37471, + "Ġdripping": 37472, + "facts": 37473, + "Ale": 37474, + "Ġfrogs": 37475, + "ĠBreast": 37476, + "ogeneity": 37477, + "ĠProsecutor": 37478, + "Ġamplified": 37479, + "ĠHodg": 37480, + "ĠFn": 37481, + "Thousands": 37482, + "ĠNIH": 37483, + "ĠMonitoring": 37484, + "FTWARE": 37485, + "ĠPriebus": 37486, + "ĠGrowing": 37487, + "hunter": 37488, + "Ġdiagnose": 37489, + "ĠMald": 37490, + "ĠLR": 37491, + "Ġcrowned": 37492, + "Ġbursting": 37493, + "Ġdissolution": 37494, + "javascript": 37495, + "Ġusefulness": 37496, + "ĠExecution": 37497, + ":(": 37498, + "ĠIvory": 37499, + "aah": 37500, + "Ġpersecuted": 37501, + "violence": 37502, + "istas": 37503, + "ĠCrate": 37504, + "Ġimpulses": 37505, + "ĠSpani": 37506, + "edes": 37507, + "Handle": 37508, + "ĠZerg": 37509, + "thinkable": 37510, + "Lastly": 37511, + "Ġspontaneously": 37512, + "Ġinconvenient": 37513, + "Ġdismissing": 37514, + "Ġplotted": 37515, + "Ġeighty": 37516, + "Ġ737": 37517, + "rish": 37518, + "ĠThornton": 37519, + "atham": 37520, + "Ġsitcom": 37521, + "Ven": 37522, + "Recipe": 37523, + "tel": 37524, + "lund": 37525, + "Ġclears": 37526, + "ĠSasuke": 37527, + "Ġ258": 37528, + "Ġopting": 37529, + "Ġenraged": 37530, + "esthetic": 37531, + "ĠAe": 37532, + "uchs": 37533, + "Prep": 37534, + "Flow": 37535, + "Ġrunoff": 37536, + "ĠEating": 37537, + "ĠGiles": 37538, + "ĠActing": 37539, + "resources": 37540, + "ibaba": 37541, + "Ġrpm": 37542, + "Ġskewed": 37543, + "ĠBlanc": 37544, + "ĠSakuya": 37545, + "Ġhotter": 37546, + "Ġ1924": 37547, + "opian": 37548, + "cko": 37549, + "Ġcrumbling": 37550, + "Ġcaptains": 37551, + "ĠAppropriations": 37552, + "leaders": 37553, + "dropping": 37554, + "anuts": 37555, + "Ġreversing": 37556, + "ĠPose": 37557, + "ĠSek": 37558, + "Scot": 37559, + "ĠIdea": 37560, + "cise": 37561, + "ĠSlovenia": 37562, + "Ġ317": 37563, + "Doctor": 37564, + "Ġcrocod": 37565, + "aldi": 37566, + "Sea": 37567, + "ĠFarrell": 37568, + "Ġmercenaries": 37569, + "ĠRNC": 37570, + "ĠGuess": 37571, + "Ġpacing": 37572, + "Machine": 37573, + "StreamerBot": 37574, + "ĠCharity": 37575, + "Ġ298": 37576, + "Ġcannons": 37577, + "ĠToby": 37578, + "TPPStreamerBot": 37579, + "ĠPassion": 37580, + "cfg": 37581, + "Thom": 37582, + "Ġbadges": 37583, + "ĠBernstein": 37584, + ".âĢĵ": 37585, + "ĠPOP": 37586, + "ĠConj": 37587, + "Ġinitialization": 37588, + "Ġbiodiversity": 37589, + "Dub": 37590, + "Ġfeudal": 37591, + "Ġdisclaimer": 37592, + "Ġcrow": 37593, + "Ġignition": 37594, + "arf": 37595, + "SHA": 37596, + "ĠkHz": 37597, + "hazard": 37598, + "ĠArtists": 37599, + "oeuv": 37600, + "679": 37601, + "ĠRudy": 37602, + "Nine": 37603, + "ĠRamadan": 37604, + "å½": 37605, + "itto": 37606, + "Ġadrenaline": 37607, + "Cert": 37608, + "Ġsmelled": 37609, + "Ġimpunity": 37610, + "Ġagendas": 37611, + "ĠReborn": 37612, + "ĠConcent": 37613, + "ĠSeems": 37614, + "Ġomega": 37615, + "ĠDustin": 37616, + "Ġbacker": 37617, + "ĠSauce": 37618, + "ĠBoyle": 37619, + "WIN": 37620, + "Ġspins": 37621, + "Ġpauses": 37622, + "upt": 37623, + "Ġshredded": 37624, + "Ġstrapped": 37625, + "ĠCorruption": 37626, + "Ġscratches": 37627, + "Ġni": 37628, + "Ġattire": 37629, + "ĠSAF": 37630, + "FactoryReloaded": 37631, + "ĠIPS": 37632, + "Ġ(%": 37633, + "Ġseminar": 37634, + "focus": 37635, + "civil": 37636, + "Ġ1860": 37637, + "intosh": 37638, + "Ġcontinual": 37639, + "Ġabbrevi": 37640, + "ĠSok": 37641, + "ocobo": 37642, + "XM": 37643, + "Ġfrantic": 37644, + "Ġunavoidable": 37645, + "Ġartery": 37646, + "Ġannotations": 37647, + "bath": 37648, + "Climate": 37649, + "Ġdors": 37650, + "ĠSlide": 37651, + "coord": 37652, + "ĠReload": 37653, + "ĠLDL": 37654, + "ĠLovecraft": 37655, + "Ġunimagin": 37656, + "Ġresembled": 37657, + "Ġbarracks": 37658, + "np": 37659, + "Ġsurrogate": 37660, + "Ġcategorized": 37661, + "ãĤ©": 37662, + "Ġvaccinated": 37663, + "Ġdrainage": 37664, + "Ġindist": 37665, + "ĠWhatsApp": 37666, + "Ġ1870": 37667, + "olerance": 37668, + "invoke": 37669, + "amorph": 37670, + "Ġreconnect": 37671, + "Ġemanc": 37672, + "Ġblindness": 37673, + "Ġ1280": 37674, + "internet": 37675, + "collar": 37676, + "Ġaltru": 37677, + "Ġabyss": 37678, + "ĠTRI": 37679, + "657": 37680, + "Ġinfused": 37681, + "HEAD": 37682, + "Ġforestry": 37683, + "ĠWoody": 37684, + "ĠCi": 37685, + "wi": 37686, + "sam": 37687, + "784": 37688, + "holiday": 37689, + "Ġmogul": 37690, + "ĠFees": 37691, + "ĠDEN": 37692, + "Internal": 37693, + "urbed": 37694, + "fusc": 37695, + "atom": 37696, + "ĠIllusion": 37697, + "Ġpolled": 37698, + "Ġflap": 37699, + "Ġcoax": 37700, + "LGBT": 37701, + "Analy": 37702, + "ĠSections": 37703, + "ĠCaliforn": 37704, + "emn": 37705, + "Ġhither": 37706, + "ĠNIGHT": 37707, + "Ġnailed": 37708, + "ĠPipeline": 37709, + "391": 37710, + "oof": 37711, + "ĠPrimal": 37712, + "verend": 37713, + "Ġslashing": 37714, + "Ġretri": 37715, + "aviour": 37716, + "Ġdeparting": 37717, + "gil": 37718, + "ISC": 37719, + "Ġmidway": 37720, + "Ġultrasound": 37721, + "Ġbehaving": 37722, + "ĠTara": 37723, + "classes": 37724, + "Virtual": 37725, + "ĠColonial": 37726, + "Ġstripping": 37727, + "Ġorchestrated": 37728, + "ĠGraves": 37729, + "452": 37730, + "ĠIronically": 37731, + "ĠWriters": 37732, + "Ġlends": 37733, + "ĠManz": 37734, + "Ġraven": 37735, + "Ġoxidative": 37736, + "Ġ266": 37737, + "ELF": 37738, + "actually": 37739, + "ascar": 37740, + "Draft": 37741, + "Ġfavourable": 37742, + "Ġhumiliating": 37743, + "Ġfidelity": 37744, + "ĠHof": 37745, + "ĠXuan": 37746, + "496": 37747, + "Ġlayered": 37748, + "atis": 37749, + "790": 37750, + "Ġpaycheck": 37751, + "iton": 37752, + "Kar": 37753, + "ĠVMware": 37754, + "ĠFarmer": 37755, + "Ġservic": 37756, + "glomer": 37757, + "Ġslump": 37758, + "ĠFabric": 37759, + "ĠDOC": 37760, + "esting": 37761, + "Ġreassure": 37762, + "Ġphyl": 37763, + "volt": 37764, + "itory": 37765, + "Rules": 37766, + "Ġoxidation": 37767, + "Ġprized": 37768, + "Ġmistress": 37769, + "ĠDjango": 37770, + "WARN": 37771, + "åij": 37772, + "Ġencode": 37773, + "ĠFeedback": 37774, + "Ġstupidity": 37775, + "Ian": 37776, + "ĠYugoslavia": 37777, + "ר": 37778, + "acl": 37779, + "UTE": 37780, + "1977": 37781, + "Ġqualifies": 37782, + "Ġpulses": 37783, + "pretty": 37784, + "Ġfroze": 37785, + "Ġss": 37786, + "Iterator": 37787, + "Ġurgently": 37788, + "Ġmailed": 37789, + "ĠCham": 37790, + "Ġsustaining": 37791, + "Ġbasil": 37792, + "Ġpuppies": 37793, + "ilant": 37794, + "ĠPLEASE": 37795, + "lap": 37796, + "aceous": 37797, + "Fear": 37798, + "ĠMastery": 37799, + "automatic": 37800, + "ĠTAG": 37801, + "Ġantim": 37802, + "agles": 37803, + "473": 37804, + "frames": 37805, + "Ġwhispers": 37806, + "ĠWhoever": 37807, + "Ġbravery": 37808, + "ĠUKIP": 37809, + "ractions": 37810, + "\"\"\"": 37811, + "Ġtame": 37812, + "Ġparted": 37813, + "everything": 37814, + "CONT": 37815, + "Ġindebted": 37816, + "Ġaddr": 37817, + "rek": 37818, + "IRED": 37819, + "Ġeminent": 37820, + "clinton": 37821, + "Ġousted": 37822, + "Ġreviewer": 37823, + "Ġmeltdown": 37824, + "Ġrearr": 37825, + "ĠYao": 37826, + "thereal": 37827, + "abyte": 37828, + "Ġstumbling": 37829, + "Ġbatches": 37830, + "Ġ259": 37831, + "Ġcontraceptive": 37832, + "Ġprostitute": 37833, + "ensis": 37834, + "Decl": 37835, + "ĠStrikes": 37836, + "Military": 37837, + "ĠOath": 37838, + "vacc": 37839, + "ppings": 37840, + "052": 37841, + "ĠpartName": 37842, + "amping": 37843, + "Reports": 37844, + "KI": 37845, + "CHR": 37846, + "Ġsubtly": 37847, + "swers": 37848, + "Blake": 37849, + "usual": 37850, + "Ġcontestants": 37851, + "Ġcartridges": 37852, + "ĠGREAT": 37853, + "Ġblush": 37854, + "ĠâĢº": 37855, + "472": 37856, + "Ġreasoned": 37857, + "ãĥ¤": 37858, + "paralleled": 37859, + "Ġdyn": 37860, + "agate": 37861, + "Ġnightly": 37862, + "åĨ": 37863, + "556": 37864, + "Ġsemantic": 37865, + "ĠAdvoc": 37866, + "Ġ!!": 37867, + "Ġdisagrees": 37868, + "ĠBW": 37869, + "Veh": 37870, + "Ġharming": 37871, + "Ġembraces": 37872, + "Ġstrives": 37873, + "Ġinland": 37874, + "ĠKard": 37875, + "Ġheats": 37876, + "ĠGinny": 37877, + "utan": 37878, + "ernaut": 37879, + "ylene": 37880, + "ĠElev": 37881, + "JD": 37882, + "Ġhars": 37883, + "ĠStarr": 37884, + "Ġskysc": 37885, + "Ġcollaborators": 37886, + "Usually": 37887, + "Ġrevolutions": 37888, + "ĠSTATS": 37889, + "Ġdismantle": 37890, + "Ġconfidently": 37891, + "Ġkinetic": 37892, + "Ali": 37893, + "Ġpercentile": 37894, + "Ġextracting": 37895, + "illian": 37896, + "estead": 37897, + "Ġphysicists": 37898, + "ĠMarshal": 37899, + "Ġfellowship": 37900, + "Ġdashed": 37901, + "ĠUR": 37902, + "ĠSioux": 37903, + "ĠCompact": 37904, + "amide": 37905, + "Python": 37906, + "ĠLeigh": 37907, + "ĠPharmac": 37908, + "istrates": 37909, + "herical": 37910, + "Ġfue": 37911, + "ĠEmin": 37912, + "Ġ({": 37913, + "ĠNeighborhood": 37914, + "Ġdisrupting": 37915, + "ĠDup": 37916, + "Ġgland": 37917, + "ĠSev": 37918, + "ĠMarian": 37919, + "argon": 37920, + "ĠDund": 37921, + "Ġ": 46904, + "ĠPhilips": 46905, + "ĠKafka": 46906, + "Ġupheaval": 46907, + "Ġsentimental": 46908, + "Ġsax": 46909, + "ĠAkira": 46910, + "serial": 46911, + "Matrix": 46912, + "Ġelecting": 46913, + "Ġcommenter": 46914, + "ĠNebula": 46915, + "plets": 46916, + "ĠNadu": 46917, + "ĠAdren": 46918, + "Ġenshr": 46919, + "ĠRAND": 46920, + "financial": 46921, + "ĠClyde": 46922, + "utherford": 46923, + "Ġsignage": 46924, + "Ġdeline": 46925, + "Ġphosphate": 46926, + "roversial": 46927, + "fascist": 46928, + "ĠVall": 46929, + "ĠBethlehem": 46930, + "Ġfors": 46931, + "Ġenglish": 46932, + "Solid": 46933, + "Nature": 46934, + "Ġva": 46935, + "ĠGuests": 46936, + "Ġtantal": 46937, + "Ġautoimmune": 46938, + ";;;;;;;;;;;;": 46939, + "ĠTotally": 46940, + "ĠOv": 46941, + "Ġdefences": 46942, + "ĠCoconut": 46943, + "Ġtranquil": 46944, + "Ġploy": 46945, + "Ġflavours": 46946, + "ĠFlask": 46947, + "ãĤ¨ãĥ«": 46948, + "ĠWeston": 46949, + "ĠVolvo": 46950, + "870": 46951, + "Ġmicrophones": 46952, + "verbal": 46953, + "RPG": 46954, + "Ġiii": 46955, + ";}": 46956, + "028": 46957, + "Ġheadlined": 46958, + "Ġprimed": 46959, + "Ġhoard": 46960, + "ĠShad": 46961, + "ĠENTER": 46962, + "Ġtriangular": 46963, + "Ġcapit": 46964, + "lik": 46965, + "ĠAncients": 46966, + "Ġlash": 46967, + "Ġconvol": 46968, + "Ġcolonel": 46969, + "enemy": 46970, + "Gra": 46971, + "Ġpubs": 46972, + "utters": 46973, + "Ġassigns": 46974, + "ĠPenet": 46975, + "ĠMonstrous": 46976, + "ĠBowen": 46977, + "ilver": 46978, + "Haunted": 46979, + "ĠDing": 46980, + "started": 46981, + "plin": 46982, + "Ġcontaminants": 46983, + "ĠDOE": 46984, + "ffen": 46985, + "ĠTechnician": 46986, + "Ry": 46987, + "Ġrobbers": 46988, + "Ġhotline": 46989, + "ĠGuardiola": 46990, + "ĠKaufman": 46991, + "rower": 46992, + "ĠDresden": 46993, + "ĠAlpine": 46994, + "Elf": 46995, + "Ġfmt": 46996, + "ĠSard": 46997, + "urses": 46998, + "gpu": 46999, + "Unix": 47000, + "Ġunequivocally": 47001, + "ĠCitizenship": 47002, + "quad": 47003, + "mire": 47004, + "ĠSweeney": 47005, + "Battery": 47006, + "615": 47007, + "Ġpancakes": 47008, + "Ġoats": 47009, + "Maps": 47010, + "ĠContrast": 47011, + "mbudsman": 47012, + "ĠEPS": 47013, + "Ġsubcommittee": 47014, + "Ġsourcing": 47015, + "Ġsizing": 47016, + "ĠBuffer": 47017, + "ĠMandatory": 47018, + "Ġmoderates": 47019, + "ĠPatterns": 47020, + "ĠChocobo": 47021, + "ĠZan": 47022, + "ĠSTATES": 47023, + "ĠJudging": 47024, + "ĠInher": 47025, + "*:": 47026, + "Ġbil": 47027, + "ĠYen": 47028, + "Ġexhilar": 47029, + "ollower": 47030, + "zers": 47031, + "Ġsnug": 47032, + "maximum": 47033, + "Ġdespicable": 47034, + "ĠPACK": 47035, + "ĠAnnex": 47036, + "Ġsarcastic": 47037, + "Ġlatex": 47038, + "Ġtamp": 47039, + "ĠSao": 47040, + "bah": 47041, + "ĠReverend": 47042, + "ĠChinatown": 47043, + "ĠAUT": 47044, + "documented": 47045, + "ĠGABA": 47046, + "ĠCanaan": 47047, + "ĠÙħ": 47048, + "Ġgoverns": 47049, + "prev": 47050, + "Esc": 47051, + "ĠEstimates": 47052, + "OSP": 47053, + "Ġendeavour": 47054, + "ĠClosing": 47055, + "ometime": 47056, + "everyone": 47057, + "Ġworsen": 47058, + "Ġscanners": 47059, + "Ġdeviations": 47060, + "ĠRobotics": 47061, + "ĠCompton": 47062, + "Ġsorcerer": 47063, + "Ġendogenous": 47064, + "Ġemulation": 47065, + "ĠPiercing": 47066, + "ĠAph": 47067, + "ĠSocket": 47068, + "Ġbould": 47069, + "ĠOU": 47070, + "ĠBorderlands": 47071, + "Ġ1863": 47072, + "Gordon": 47073, + "ĠWTO": 47074, + "Ġrestricts": 47075, + "Ġmosaic": 47076, + "Ġmelodies": 47077, + "çĦ": 47078, + "Tar": 47079, + "Ġdisson": 47080, + "ĠProvides": 47081, + "Ġ......": 47082, + "bek": 47083, + "FIX": 47084, + "Ġbroom": 47085, + "anship": 47086, + "Doctors": 47087, + "Ġnerds": 47088, + "ĠRegions": 47089, + "naissance": 47090, + "Ġmete": 47091, + "Ġcrept": 47092, + "plings": 47093, + "Ġgirlfriends": 47094, + "knit": 47095, + "igent": 47096, + "owe": 47097, + "Ġushered": 47098, + "ĠBaz": 47099, + "Mobil": 47100, + "434": 47101, + "ĠPresents": 47102, + "origin": 47103, + "Ġinsomnia": 47104, + "ĠAux": 47105, + "439": 47106, + "ĠChili": 47107, + "irsch": 47108, + "GAME": 47109, + "Ġgestation": 47110, + "algia": 47111, + "romising": 47112, + "$,": 47113, + "crow": 47114, + "ĠInspection": 47115, + "atomic": 47116, + "Relations": 47117, + "JOHN": 47118, + "roman": 47119, + "ĠClockwork": 47120, + "ĠBakr": 47121, + "mone": 47122, + "MET": 47123, + "Ġthirsty": 47124, + "Ġbc": 47125, + "Ġfaculties": 47126, + "Rum": 47127, + "Ġnuance": 47128, + "ĠDarius": 47129, + "pleting": 47130, + "fters": 47131, + "etchup": 47132, + "Registration": 47133, + "ĠKE": 47134, + "Rah": 47135, + "Ġpreferential": 47136, + "ĠLash": 47137, + "ĠHH": 47138, + "Valid": 47139, + "ĠNAV": 47140, + "Ġstarve": 47141, + "ĠGong": 47142, + "zynski": 47143, + "ĠActress": 47144, + "Ġwik": 47145, + "Ġunaccompanied": 47146, + "lvl": 47147, + "Bride": 47148, + "ADS": 47149, + "ĠCommando": 47150, + "ĠVaughn": 47151, + "Wallet": 47152, + "Ġhopping": 47153, + "ĠVie": 47154, + "Ġcaveats": 47155, + "Ġalas": 47156, + "ifled": 47157, + "abuse": 47158, + "661": 47159, + "Ġibn": 47160, + "Ġgul": 47161, + "Ġrobbing": 47162, + "til": 47163, + "ILA": 47164, + "Ġmitigating": 47165, + "Ġaptly": 47166, + "Ġtyrant": 47167, + "Ġmidday": 47168, + "ĠGilmore": 47169, + "ĠDecker": 47170, + "Ġ§§": 47171, + "partial": 47172, + "Exactly": 47173, + "Ġphenotype": 47174, + "Ġ[+]": 47175, + "ĠPlex": 47176, + "ĠIps": 47177, + "versions": 47178, + "Ġebook": 47179, + "Ġchic": 47180, + "gross": 47181, + "\":\"\"},{\"": 47182, + "ĠSurprisingly": 47183, + "Morgan": 47184, + "Ġresidues": 47185, + "ĠConfederation": 47186, + "infeld": 47187, + "Ġlyr": 47188, + "moderate": 47189, + "Ġperpendicular": 47190, + "VK": 47191, + "Ġsynchronized": 47192, + "Ġrefreshed": 47193, + "Ġadore": 47194, + "ĠTorment": 47195, + "olina": 47196, + "Ġ2600": 47197, + "ItemTracker": 47198, + "Ġpies": 47199, + "ĠFAT": 47200, + "ĠRHP": 47201, + "048": 47202, + "ĠRESP": 47203, + "ĠBJ": 47204, + "allows": 47205, + "Pand": 47206, + "Ġunwelcome": 47207, + "ĠVoc": 47208, + "ĠBastard": 47209, + "ĠOW": 47210, + "ĠLAR": 47211, + "ĠHealer": 47212, + "Environmental": 47213, + "ĠKenyan": 47214, + "ĠTrance": 47215, + "ĠPats": 47216, + "Ġaliases": 47217, + "ĠGarfield": 47218, + "Ġcampaigner": 47219, + "Ġadvancements": 47220, + "ĠOkinawa": 47221, + "ĠCoh": 47222, + "owsky": 47223, + "Ġstarved": 47224, + "Ġsizeable": 47225, + "Ġ:-)": 47226, + "ĠmRNA": 47227, + "Ġsuspensions": 47228, + "istar": 47229, + "Scotland": 47230, + "Prin": 47231, + "------------------------------------------------": 47232, + "Ġ502": 47233, + "Ġteaspoons": 47234, + "Ġ1050": 47235, + "Ġcoercive": 47236, + "ĠMasonic": 47237, + "edded": 47238, + "ĠPassenger": 47239, + "Ġlatt": 47240, + "Ġbraces": 47241, + "ĠSteal": 47242, + "ĠNYT": 47243, + "ĠKats": 47244, + "ĠCelest": 47245, + "aez": 47246, + "Tu": 47247, + "ĠCoulter": 47248, + "ðŁĺ": 47249, + "Flickr": 47250, + "ĠWilmington": 47251, + "iths": 47252, + "++;": 47253, + "Ġvending": 47254, + "Ġnegro": 47255, + "ĠPhi": 47256, + "ĠYellowstone": 47257, + "Callback": 47258, + "Ġshampoo": 47259, + "ĠShades": 47260, + "wat": 47261, + "Ġsuperhuman": 47262, + "Ġridiculed": 47263, + "Ġholiest": 47264, + "ombo": 47265, + "Ġinterns": 47266, + "Ġhone": 47267, + "ĠParagu": 47268, + "URI": 47269, + "Ġdangling": 47270, + "ãĤ»": 47271, + "sov": 47272, + "ictional": 47273, + "availability": 47274, + "Ġrevocation": 47275, + "Ġdow": 47276, + "inic": 47277, + "ĠTHEIR": 47278, + "Ġiso": 47279, + "Ġoutings": 47280, + "ĠLethal": 47281, + "Ġ)))": 47282, + "Ġinaccur": 47283, + "Ġoutlandish": 47284, + "Ġanus": 47285, + "letico": 47286, + "idon": 47287, + "lol": 47288, + "Ġunregulated": 47289, + "Ġsuccumbed": 47290, + "Ġcuff": 47291, + "ĠWasteland": 47292, + "letal": 47293, + "Ġsubstr": 47294, + "Ġcoffers": 47295, + "Ġautomakers": 47296, + "ovi": 47297, + "ĠXue": 47298, + "ĠDaytona": 47299, + "Ġjarring": 47300, + "Ġfumes": 47301, + "Ġdisbanded": 47302, + "zik": 47303, + "itton": 47304, + "Ġstrikingly": 47305, + "Ġspores": 47306, + "Adapter": 47307, + ".):": 47308, + "ĠLyndon": 47309, + "ivalry": 47310, + "Ġorally": 47311, + "Ġtumultuous": 47312, + "Ġdispleasure": 47313, + "Ġcones": 47314, + "orrect": 47315, + "Ġappease": 47316, + "Ġderby": 47317, + "ĠTripoli": 47318, + "ĠAless": 47319, + "Ġpoked": 47320, + "ĠGuilty": 47321, + "vP": 47322, + "Enough": 47323, + "Ġoriginals": 47324, + "699": 47325, + "Ġrabbi": 47326, + "Ġproverbial": 47327, + "Ġpostpone": 47328, + "elope": 47329, + "ĠMisty": 47330, + "Ġstaffed": 47331, + "ĠUnemployment": 47332, + "reditary": 47333, + "Ġdiligent": 47334, + "recomm": 47335, + "measures": 47336, + "asin": 47337, + "825": 47338, + "Ġponds": 47339, + "Ġmmol": 47340, + "ĠSAR": 47341, + "ĠCARE": 47342, + "Ġ371": 47343, + "Ġclenched": 47344, + "ĠCorsair": 47345, + "Ġcaricature": 47346, + "zn": 47347, + "attach": 47348, + "ĠSchro": 47349, + "speak": 47350, + "painted": 47351, + "ĠSuc": 47352, + "ĠENT": 47353, + "Ġcellul": 47354, + "ĠPaid": 47355, + "diagn": 47356, + "WHERE": 47357, + "Ġtexted": 47358, + "Barn": 47359, + "Ġretracted": 47360, + "ĠReferred": 47361, + "Sav": 47362, + "Ġupkeep": 47363, + "Ġworkplaces": 47364, + "ĠTokens": 47365, + "Ġamplify": 47366, + "clinical": 47367, + "Ġmultic": 47368, + "mberg": 47369, + "Ġconvoluted": 47370, + "Region": 47371, + "565": 47372, + "ĠTopic": 47373, + "Ġsnail": 47374, + "Ġsaline": 47375, + "Ġinsurrection": 47376, + "ĠPetr": 47377, + "forts": 47378, + "BAT": 47379, + "ĠNavajo": 47380, + "Ġrudimentary": 47381, + "ĠLaksh": 47382, + "ONDON": 47383, + "Measure": 47384, + "Ġtransformer": 47385, + "ĠGoddard": 47386, + "Ġcoincides": 47387, + "irin": 47388, + "Rex": 47389, + "ĠBok": 47390, + "quit": 47391, + "Ġshotguns": 47392, + "Ġproletarian": 47393, + "Ġscorp": 47394, + "ĠAda": 47395, + "514": 47396, + "Ġslander": 47397, + "recorded": 47398, + "Ġembell": 47399, + "risome": 47400, + "Ġapologizing": 47401, + "ĠMulcair": 47402, + "ĠGibraltar": 47403, + "Cla": 47404, + "Ġallot": 47405, + "ĠAttention": 47406, + "Ġ433": 47407, + "leave": 47408, + "Ġwhine": 47409, + "ĠIssa": 47410, + "ĠFaust": 47411, + "ĠBarron": 47412, + "heny": 47413, + "Ġvictimized": 47414, + "Jews": 47415, + "Ġnurturing": 47416, + "ettel": 47417, + "Winged": 47418, + "ĠSubtle": 47419, + "Ġflavorful": 47420, + "ĠReps": 47421, + "enged": 47422, + "callback": 47423, + "Ġdirectional": 47424, + "Ġclasp": 47425, + "ĠDirections": 47426, + "planet": 47427, + "iculture": 47428, + "Helper": 47429, + "icion": 47430, + "acia": 47431, + "Ġç¥ŀ": 47432, + "Ġsurges": 47433, + "Ġcanoe": 47434, + "ĠPremiership": 47435, + "been": 47436, + "Ġdefied": 47437, + "ĠTrooper": 47438, + "Ġtripod": 47439, + "Ġgasp": 47440, + "ĠEuph": 47441, + "ĠAds": 47442, + "vernight": 47443, + "highly": 47444, + "Role": 47445, + "Ġentangled": 47446, + "ĠZeit": 47447, + "618": 47448, + "ĠRusty": 47449, + "Ġhavens": 47450, + "ĠVaughan": 47451, + "HAEL": 47452, + "ĠSERVICE": 47453, + "/,": 47454, + "Ġstricken": 47455, + "Ġdelusions": 47456, + "Ġbis": 47457, + "ĠHaf": 47458, + "Ġgratification": 47459, + "Ġenticing": 47460, + "UNCH": 47461, + "Adams": 47462, + "ĠOLED": 47463, + "ĠBeetle": 47464, + "Ġ1899": 47465, + "ĠSOFTWARE": 47466, + "ategor": 47467, + "VL": 47468, + "ĠTotem": 47469, + "ĠGators": 47470, + "ATURES": 47471, + "Ġimpedance": 47472, + "Registered": 47473, + "ĠCary": 47474, + "ĠAerial": 47475, + "onne": 47476, + "enium": 47477, + "Ġdred": 47478, + "ĠBeg": 47479, + "Ġconcurrently": 47480, + "Ġsuperpower": 47481, + "ĠXan": 47482, + "jew": 47483, + "imester": 47484, + "ĠDickinson": 47485, + "âĶģ": 47486, + "Fla": 47487, + "Ġpree": 47488, + "ĠRollins": 47489, + "©¶æ": 47490, + "Ġdenomination": 47491, + "ĠLana": 47492, + "516": 47493, + "Ġinciting": 47494, + "scribed": 47495, + "juries": 47496, + "ĠWonders": 47497, + "approximately": 47498, + "Ġsuspending": 47499, + "Ġmountainous": 47500, + "ĠLaugh": 47501, + "oidal": 47502, + "Ns": 47503, + "Detect": 47504, + ")=": 47505, + "ĠLuthor": 47506, + "ĠSchwarzenegger": 47507, + "ĠMuller": 47508, + "ĠDevi": 47509, + "ecycle": 47510, + "Jar": 47511, + "613": 47512, + "ĠLongh": 47513, + "Bah": 47514, + "ĠSPORTS": 47515, + "nw": 47516, + "Ġrefinement": 47517, + "Ġwaterways": 47518, + "Ġdiner": 47519, + "Blade": 47520, + "683": 47521, + "Fac": 47522, + "Ġinitials": 47523, + "Ġrog": 47524, + "Ġparanormal": 47525, + "BUT": 47526, + "Ġ[(": 47527, + "ĠSwanson": 47528, + "ĠMesh": 47529, + "âĸ¬": 47530, + "Improve": 47531, + "ĠRadiation": 47532, + "ĠEsther": 47533, + "ĠEsk": 47534, + "ĠAly": 47535, + "iky": 47536, + "Ġirrad": 47537, + "ĠBuckingham": 47538, + "Ġrefill": 47539, + "Ġ._": 47540, + "Repe": 47541, + "CONCLUS": 47542, + "Ġdifferentiated": 47543, + "Ġchirop": 47544, + "ĠAtkins": 47545, + "Pattern": 47546, + "Ġexcise": 47547, + "Ġcabal": 47548, + "NSA": 47549, + "ĠSTA": 47550, + "ĠSIL": 47551, + "ĠParaly": 47552, + "Ġrye": 47553, + "ĠHowell": 47554, + "ĠCountdown": 47555, + "nesses": 47556, + "alysed": 47557, + "Ġresize": 47558, + "ãĤ½": 47559, + "Ġbudgetary": 47560, + "ĠStras": 47561, + "wang": 47562, + "Ġapiece": 47563, + "Ġprecincts": 47564, + "Ġpeach": 47565, + "Ġskyline": 47566, + "Ġ353": 47567, + "popular": 47568, + "Appearances": 47569, + "ĠMechanics": 47570, + "ĠDevOnline": 47571, + "Sullivan": 47572, + "Zen": 47573, + "Ġpu": 47574, + "opolis": 47575, + "544": 47576, + "Ġdeform": 47577, + "Ġcounteract": 47578, + "ĠLange": 47579, + "Ġ417": 47580, + "Console": 47581, + "774": 47582, + "Ġnodding": 47583, + "Ġpopulism": 47584, + "Ġhep": 47585, + "Ġcounselling": 47586, + "compliance": 47587, + "UFF": 47588, + "Ġundeniably": 47589, + "Ġrailing": 47590, + "ĠHorowitz": 47591, + "ĠSimone": 47592, + "ĠBungie": 47593, + "Ġak": 47594, + "ĠTalks": 47595, + "xff": 47596, + "flake": 47597, + "Crash": 47598, + "Ġsweaty": 47599, + "Ġbanquet": 47600, + "ĠOFFIC": 47601, + "Ġinventive": 47602, + "Ġastronomer": 47603, + "ĠStamford": 47604, + "ĠScare": 47605, + "ĠGREEN": 47606, + "olicited": 47607, + "Ġrusher": 47608, + "Ġcentrist": 47609, + "ighting": 47610, + "Ġsubclass": 47611, + "Ġdisav": 47612, + "Ġdefund": 47613, + "ĠNanto": 47614, + "ociate": 47615, + "mast": 47616, + "Ġpacif": 47617, + "Ġmend": 47618, + "eers": 47619, + "immigration": 47620, + "ESSION": 47621, + "Ġnumbering": 47622, + "Ġlaughable": 47623, + "ĠEnded": 47624, + "viation": 47625, + "emark": 47626, + "Pitt": 47627, + "Ġmeticulous": 47628, + "ĠLF": 47629, + "Ġcongratulated": 47630, + "ĠBirch": 47631, + "Ġswayed": 47632, + "Ġsemifinals": 47633, + "Ġhumankind": 47634, + "matter": 47635, + "ĠEquip": 47636, + "opausal": 47637, + "Said": 47638, + "ĠLayout": 47639, + "Ġvoicing": 47640, + "Ġthug": 47641, + "Ġpornographic": 47642, + "IPS": 47643, + "Ġmoaning": 47644, + "Ġgrievance": 47645, + "Ġconfessions": 47646, + "escal": 47647, + "TEXTURE": 47648, + "Authent": 47649, + "osaurus": 47650, + "Purchase": 47651, + "Ġrelegation": 47652, + "alter": 47653, + "Ġ³³": 47654, + "Ġriddled": 47655, + "Ġogre": 47656, + "ĠLowell": 47657, + "Occup": 47658, + "Eat": 47659, + "ĠHyder": 47660, + "ĠAdviser": 47661, + "Commerce": 47662, + "Hunt": 47663, + "ĠOrth": 47664, + "ĠCompetitive": 47665, + "ĠCLA": 47666, + "CDC": 47667, + "Ġsalads": 47668, + "Fle": 47669, + "Ġindustrialized": 47670, + "`,": 47671, + "ĠOWN": 47672, + "Ġbeck": 47673, + "ĠParticularly": 47674, + "oubt": 47675, + "ĠmM": 47676, + "ĠHussain": 47677, + "ĠChennai": 47678, + "Ġ920": 47679, + "Ġappointing": 47680, + "ĠCullen": 47681, + ",,,,,,,,": 47682, + "Ġpores": 47683, + "verified": 47684, + "Ġbiochemical": 47685, + "emate": 47686, + "Ġcowardly": 47687, + "ĠHelsinki": 47688, + "ĠEthiopian": 47689, + "SOURCE": 47690, + "ERC": 47691, + "estro": 47692, + "Ġbiotech": 47693, + "ĠSour": 47694, + "Ġbrewer": 47695, + "Bloomberg": 47696, + "Ġintensify": 47697, + "Glass": 47698, + "anco": 47699, + "ĠFDR": 47700, + "greSQL": 47701, + "ĠFires": 47702, + "©¶æ¥µ": 47703, + "eco": 47704, + "1001": 47705, + "ĠHomeless": 47706, + "Ġinstantaneous": 47707, + "ĠHaste": 47708, + "igel": 47709, + "Diamond": 47710, + "Ġpaving": 47711, + "Ġlandfill": 47712, + "Ġdads": 47713, + "houn": 47714, + ":]": 47715, + "Ġincendiary": 47716, + "ĠLivingston": 47717, + "ĠHilbert": 47718, + "ĠChecks": 47719, + "styles": 47720, + "inators": 47721, + "ĠClive": 47722, + "phrine": 47723, + "Ġchimpanzees": 47724, + "Ġpall": 47725, + "ĠJM": 47726, + "ĠAadhaar": 47727, + "ðĿ": 47728, + "Ġachievable": 47729, + "disabled": 47730, + "PET": 47731, + "OOOOOOOO": 47732, + "Mot": 47733, + "Ġintangible": 47734, + "Ġballet": 47735, + "ĠWebs": 47736, + "ĠEstimated": 47737, + "Effects": 47738, + "Ġbailed": 47739, + "Joshua": 47740, + "Ġturbulence": 47741, + "Ġoccupant": 47742, + "ĠDaylight": 47743, + "Ġ361": 47744, + "meet": 47745, + "Ġstatically": 47746, + "Ġonlook": 47747, + "Ġki": 47748, + "illegal": 47749, + "Ġvelvet": 47750, + "Ġdehydration": 47751, + "Ġacquies": 47752, + "ĠRez": 47753, + "akura": 47754, + "ĠUpton": 47755, + "atro": 47756, + "Ġincomprehensible": 47757, + "Ġbackdoor": 47758, + "ĠRhino": 47759, + "727": 47760, + "Ġmaths": 47761, + ")+": 47762, + "Ġheresy": 47763, + "Ġdf": 47764, + "ĠRoche": 47765, + "ĠLydia": 47766, + "Ġpancreat": 47767, + "reply": 47768, + "arrell": 47769, + "Ġsolicitation": 47770, + "Ġcircadian": 47771, + "BIP": 47772, + "Ġforay": 47773, + "Ġcryptic": 47774, + "izu": 47775, + "imeo": 47776, + "ĠTomato": 47777, + "ĠHoms": 47778, + "examination": 47779, + "Ġquarry": 47780, + "ĠValiant": 47781, + "ĠJericho": 47782, + "ĠINCLUD": 47783, + "Ġ1840": 47784, + "519": 47785, + "Ġresists": 47786, + "Ġsnapshots": 47787, + "ĠSpur": 47788, + "ĠAntiqu": 47789, + "Login": 47790, + "Ġbestselling": 47791, + "Ġantic": 47792, + "ĠSutherland": 47793, + "ãĤ¢ãĥ«": 47794, + "Ġ~/": 47795, + "ĠParm": 47796, + "èĥ": 47797, + "Pages": 47798, + "intensity": 47799, + "Ġimmobil": 47800, + "Ġ1865": 47801, + "zzo": 47802, + "Ġnifty": 47803, + "Ġfentanyl": 47804, + "ĠPreservation": 47805, + "ophen": 47806, + "Ġdarts": 47807, + "ĠDinosaur": 47808, + "pointers": 47809, + "ĠRite": 47810, + "suggest": 47811, + "awareness": 47812, + "ĠSheridan": 47813, + "Ġstances": 47814, + "Ġsorcery": 47815, + "Ġperjury": 47816, + "ĠNikola": 47817, + "iever": 47818, + "Ġfiance": 47819, + "ĠJordanian": 47820, + "ĠBalloon": 47821, + "Ġnab": 47822, + "Ġkb": 47823, + "Ġhumanities": 47824, + "ĠTanaka": 47825, + "hillary": 47826, + "Ġconsultancy": 47827, + "ĠZub": 47828, + "Ġremission": 47829, + "Ġconfid": 47830, + "CHQ": 47831, + "ĠFug": 47832, + "Ġimprovis": 47833, + "Yep": 47834, + "/_": 47835, + "Ġunwillingness": 47836, + "Ġportfolios": 47837, + "055": 47838, + "ĠInstructor": 47839, + "aiman": 47840, + "Ġclaimants": 47841, + "Mbps": 47842, + "ĠBye": 47843, + "received": 47844, + "Tweet": 47845, + "Ġindemn": 47846, + "riz": 47847, + "amara": 47848, + "Nat": 47849, + "Ġevaluates": 47850, + "ĠLur": 47851, + "epad": 47852, + "FOX": 47853, + "ĠThro": 47854, + "Ġrusty": 47855, + "Ġbedrock": 47856, + "ĠOprah": 47857, + "JB": 47858, + "Ġmanipulative": 47859, + "Ġwillful": 47860, + "Ġrelapse": 47861, + "Ġextant": 47862, + "Theme": 47863, + "Sensor": 47864, + "ĠStability": 47865, + "govern": 47866, + "Ġpoppy": 47867, + "Ġknack": 47868, + "Ġinsulated": 47869, + "ĠTile": 47870, + "ĠExtrem": 47871, + "Ġuntold": 47872, + "Ġconverge": 47873, + "Ġrefuel": 47874, + "igroup": 47875, + "Ġdistortions": 47876, + "Ġravaged": 47877, + "Ġmechanically": 47878, + "ĠReilly": 47879, + "ĠNose": 47880, + "ĠIncarnation": 47881, + "ĠBecky": 47882, + "abbling": 47883, + "Ġtaco": 47884, + "Ġrake": 47885, + "Ġmelancholy": 47886, + "Ġillustrious": 47887, + "ĠDartmouth": 47888, + "Guide": 47889, + "ĠRazer": 47890, + "ĠBenz": 47891, + "Ultimate": 47892, + "ĠSurprise": 47893, + "Ġpageant": 47894, + "offer": 47895, + "Whoever": 47896, + "Ġwiser": 47897, + "Ġchemist": 47898, + "ĠHELL": 47899, + "ĠBulk": 47900, + "Ġplutonium": 47901, + "ĠCOVER": 47902, + "Ö¼": 47903, + "failed": 47904, + "Ġtirelessly": 47905, + "Ġinfertility": 47906, + "ĠTrident": 47907, + "ĠShowtime": 47908, + "ĠCiv": 47909, + "Vice": 47910, + "requires": 47911, + "ittance": 47912, + "Ġuncontrolled": 47913, + "interesting": 47914, + "561": 47915, + "Ġinnovate": 47916, + "ategic": 47917, + "Lie": 47918, + "ĠSelling": 47919, + "Ul": 47920, + "Ġsavior": 47921, + "ĠTosh": 47922, + "Ġswast": 47923, + "PASS": 47924, + "Ġrink": 47925, + "Ġcardio": 47926, + "ĠIro": 47927, + "udi": 47928, + "Ġvantage": 47929, + "Ġvans": 47930, + "ĠNiño": 47931, + "+=": 47932, + "Ġpropagate": 47933, + "": 49029, + "Ġleukemia": 49030, + "Ġeluc": 49031, + "Ġannouncer": 49032, + "ĠLithuan": 49033, + "ĠArmageddon": 49034, + "åĩ": 49035, + "Lenin": 49036, + "ĠRuk": 49037, + "Ġpepp": 49038, + "ĠRomantic": 49039, + "ĠPIT": 49040, + "ĠInterstellar": 49041, + "ĠAtkinson": 49042, + "Raid": 49043, + "Js": 49044, + "Goal": 49045, + "Course": 49046, + "Ġvanishing": 49047, + "esley": 49048, + "ĠRounds": 49049, + "Elsa": 49050, + "593": 49051, + "Ġredundancy": 49052, + "ĠSTAND": 49053, + "Ġprophetic": 49054, + "Ġhabitable": 49055, + "ryu": 49056, + "Ġfaintly": 49057, + "MODE": 49058, + "Ġflanked": 49059, + "IRC": 49060, + "Awesome": 49061, + "Ġspurious": 49062, + "ĠZah": 49063, + "ĠMSG": 49064, + "Ġshading": 49065, + "Ġmotivational": 49066, + "ĠSantana": 49067, + "ĠSPR": 49068, + "Ġexcruciating": 49069, + "omial": 49070, + "ĠMiko": 49071, + "ĠLeopard": 49072, + "Abyss": 49073, + "Ġ[|": 49074, + "dirty": 49075, + "Ġbaths": 49076, + "Ġdemoral": 49077, + "andre": 49078, + "PB": 49079, + "Ġunification": 49080, + "Ġsacrament": 49081, + "Ġ[&": 49082, + "Ġpriceless": 49083, + "Ġgelatin": 49084, + "Ġemanating": 49085, + "ĠAllaah": 49086, + "986": 49087, + "Ġoutburst": 49088, + "Ġeras": 49089, + "ĠXVI": 49090, + "ĠSPI": 49091, + "Ott": 49092, + "ĠLazarus": 49093, + "PLIED": 49094, + "Flying": 49095, + "blogs": 49096, + "Wisconsin": 49097, + "Raven": 49098, + "Ġrebate": 49099, + "Ġcreeps": 49100, + "ĠSpan": 49101, + "ĠPainter": 49102, + "ĠKira": 49103, + "ĠAmos": 49104, + "ĠCorvette": 49105, + "Consumer": 49106, + "ĠRecover": 49107, + "cki": 49108, + "Ġpesky": 49109, + "ĠInvention": 49110, + "Companies": 49111, + "Ġchallengers": 49112, + "ademic": 49113, + "ĠUkrainians": 49114, + "ĠNeurolog": 49115, + "ĠForsaken": 49116, + "Ġentrants": 49117, + "Ġembattled": 49118, + "Ġdefunct": 49119, + "ĠGlacier": 49120, + "Ġpoisons": 49121, + "ĠHorses": 49122, + "makes": 49123, + "ĠDirt": 49124, + "Ġ423": 49125, + "hhh": 49126, + "ĠTransformation": 49127, + "QUIRE": 49128, + "..................": 49129, + "Ġtraveller": 49130, + "ĠSexy": 49131, + "ĠKern": 49132, + "ipolar": 49133, + "Ġransomware": 49134, + "oooooooooooooooo": 49135, + "Ec": 49136, + "ruby": 49137, + "Professional": 49138, + "ĠOutbreak": 49139, + "argument": 49140, + "Grey": 49141, + "ĠFifa": 49142, + "ĠCHO": 49143, + "ĠFORM": 49144, + "ĠAmtrak": 49145, + "-[": 49146, + "Ġcradle": 49147, + "Ġantioxidants": 49148, + "ãģ®å®": 49149, + "736": 49150, + "ĠNASL": 49151, + "ĠContributions": 49152, + "Indiana": 49153, + "ĠSTEP": 49154, + "CSS": 49155, + "Ġsalient": 49156, + "Ġallocations": 49157, + "yrights": 49158, + "Ġmashed": 49159, + "ĠCutter": 49160, + "Sexual": 49161, + "Ġpounded": 49162, + "Ġfanbase": 49163, + "Ġcasc": 49164, + "ĠTransparency": 49165, + "Ġanalytic": 49166, + "ĠSummoner": 49167, + "×ŀ": 49168, + "ĠADC": 49169, + "detail": 49170, + "Ġvanquished": 49171, + "Ġcrabs": 49172, + "arie": 49173, + "Destroy": 49174, + "ĠSack": 49175, + "Ġtransistor": 49176, + "Alabama": 49177, + "ĠKoen": 49178, + "ĠFisheries": 49179, + "cone": 49180, + "Ġannexed": 49181, + "ĠMGM": 49182, + "esa": 49183, + "Ġfaked": 49184, + "ĠCongratulations": 49185, + "Ġhindered": 49186, + "Ġcorrectional": 49187, + "ĠITV": 49188, + "leeve": 49189, + "Ġinappropriately": 49190, + "licks": 49191, + "Ġtrespass": 49192, + "Ġpaws": 49193, + "Ġnegotiator": 49194, + "ĠChristensen": 49195, + "limits": 49196, + "ĠDianne": 49197, + "Ġelegance": 49198, + "ĠContracts": 49199, + "anke": 49200, + "Obj": 49201, + "Ġvigilance": 49202, + "Ġcastles": 49203, + "ĠNAD": 49204, + "ĠHolo": 49205, + "Ġemphatically": 49206, + "ĠTitus": 49207, + "ĠServing": 49208, + "ĠRichie": 49209, + "ĠPigs": 49210, + "568": 49211, + "Ġanimosity": 49212, + "ĠAttributes": 49213, + "ĠUriel": 49214, + "MQ": 49215, + "myra": 49216, + "ĠApplicant": 49217, + "Ġpsychiatrists": 49218, + "ĠVij": 49219, + "ĠAbby": 49220, + "agree": 49221, + "Push": 49222, + "ĠkWh": 49223, + "hiba": 49224, + "Ġincite": 49225, + "ĠWeasley": 49226, + "ĠTaxi": 49227, + "ministic": 49228, + "hyper": 49229, + "ĠFarn": 49230, + "Ġ601": 49231, + "ĠNationwide": 49232, + "Fake": 49233, + "952": 49234, + "Ġmaize": 49235, + "Ġinteracted": 49236, + "Ġtransitioned": 49237, + "Ġparasitic": 49238, + "Ġharmonic": 49239, + "Ġdecaying": 49240, + "Ġbaseless": 49241, + "nsics": 49242, + "Ġtranspired": 49243, + "Ġabundantly": 49244, + "ĠForensic": 49245, + "Ġtreadmill": 49246, + "ĠJav": 49247, + "aband": 49248, + "Ġsshd": 49249, + "Ġfrontman": 49250, + "ĠJakarta": 49251, + "oller": 49252, + "drops": 49253, + "ĠSERVICES": 49254, + "romptu": 49255, + "ophical": 49256, + "hospital": 49257, + "bledon": 49258, + "645": 49259, + "Ġmidrange": 49260, + "ĠEVENT": 49261, + "culated": 49262, + "rawled": 49263, + "Ġperched": 49264, + "Ġoverboard": 49265, + "ĠPeel": 49266, + "ĠPwr": 49267, + "ĠCarth": 49268, + "ĠCOMPLE": 49269, + "coe": 49270, + "shall": 49271, + "Ġdeterrence": 49272, + "METHOD": 49273, + "ĠAbsent": 49274, + "MEN": 49275, + "Ġsill": 49276, + "ĠLEVEL": 49277, + "York": 49278, + "Ġsinners": 49279, + "ĠOPEC": 49280, + "ĠNur": 49281, + "ĠDesigns": 49282, + "selection": 49283, + "Ġunworthy": 49284, + "CHA": 49285, + "Ġstrengthens": 49286, + "883": 49287, + "edly": 49288, + "Ġslicing": 49289, + "Ġmalnutrition": 49290, + "Ġfilmmaking": 49291, + "ĠPolk": 49292, + "urated": 49293, + "Ġ421": 49294, + "breakers": 49295, + "!'\"": 49296, + "Ġwetlands": 49297, + "ĠDiscrimination": 49298, + "Ġallowable": 49299, + "Ġsteered": 49300, + "ĠSicily": 49301, + "SAM": 49302, + "Ġmustache": 49303, + "Ġmids": 49304, + "Ġclipped": 49305, + "Ġcirculate": 49306, + "Ġbrittle": 49307, + "ĠBuildings": 49308, + "raised": 49309, + "ĠRoundup": 49310, + "Ġwealthier": 49311, + "Ġoverwrite": 49312, + "Ġoverpowered": 49313, + "ĠGerrard": 49314, + "sites": 49315, + "PDATED": 49316, + "Ġacutely": 49317, + "ĠGamble": 49318, + "Ġpim": 49319, + "ĠKus": 49320, + "Typically": 49321, + "Deploy": 49322, + "ĠMoroccan": 49323, + "potion": 49324, + "combe": 49325, + "Ġvigilante": 49326, + "Ġ363": 49327, + "Stew": 49328, + "ĠBagg": 49329, + "Ġresided": 49330, + "ĠSpo": 49331, + "Ġremnant": 49332, + "Ġemptiness": 49333, + "brainer": 49334, + "Ġoutpatient": 49335, + "priority": 49336, + "Ġleptin": 49337, + "ĠPayton": 49338, + "ĠGleaming": 49339, + "ĠShed": 49340, + "ĠPolo": 49341, + "ĠMormonism": 49342, + "restricted": 49343, + "arlane": 49344, + "wx": 49345, + "Ġcreatine": 49346, + "ĠAnon": 49347, + "ĠSTUD": 49348, + "ĠJUL": 49349, + "ĠTee": 49350, + "528": 49351, + "089": 49352, + "Ġhatched": 49353, + "Dispatch": 49354, + "ĠComposite": 49355, + "Ġ451": 49356, + "puff": 49357, + "ĠXCOM": 49358, + "ĠOrn": 49359, + "ĠTHANK": 49360, + "ENDED": 49361, + "ĠAsheville": 49362, + "ĠÃľ": 49363, + "Ġmango": 49364, + "ĠSlightly": 49365, + "worldly": 49366, + "ĠWander": 49367, + "ĠExpand": 49368, + "ĠChr": 49369, + "Mist": 49370, + "Ġorthodoxy": 49371, + "ĠUNESCO": 49372, + "regate": 49373, + "Elsewhere": 49374, + "kie": 49375, + "irled": 49376, + "Ġtopple": 49377, + "Ġadoptive": 49378, + "ĠLegs": 49379, + "dress": 49380, + "ĠSagan": 49381, + "bare": 49382, + "ĠGlou": 49383, + "Crunch": 49384, + "Ġhelpers": 49385, + "Ġchronically": 49386, + "ĠHuma": 49387, + "10000": 49388, + "Ġaccommodating": 49389, + "äºĶ": 49390, + "Ġwrinkles": 49391, + "Ġdodged": 49392, + "fourth": 49393, + "Ġprecon": 49394, + "Ġcompressor": 49395, + "ĠKare": 49396, + "Ġevict": 49397, + "ĠWarwick": 49398, + "imar": 49399, + "Ġmodernization": 49400, + "Ġbandwagon": 49401, + "Ġrefuted": 49402, + "Ġnetted": 49403, + "ĠNaples": 49404, + "ĠGenie": 49405, + "perors": 49406, + "Ġfielded": 49407, + "Ġdere": 49408, + "ĠParables": 49409, + "lees": 49410, + "Ġtrout": 49411, + "aspers": 49412, + "Ġnihil": 49413, + "Ġhappiest": 49414, + "Ġfloppy": 49415, + "ĠLoft": 49416, + "ĠHeard": 49417, + "Ġunison": 49418, + "Ġlug": 49419, + "ĠRedmond": 49420, + "classic": 49421, + "Supporters": 49422, + "SHIP": 49423, + "GMT": 49424, + "Ġfuelled": 49425, + "çIJ": 49426, + "Ġdd": 49427, + "ĠEminem": 49428, + "Ġ1897": 49429, + "NYSE": 49430, + "Ġsecretaries": 49431, + "ĠFIA": 49432, + "ĠCanaveral": 49433, + "Favorite": 49434, + "Ġpomp": 49435, + "Ġdetainee": 49436, + "ership": 49437, + "aimon": 49438, + "iour": 49439, + "ĠApex": 49440, + "Ġplantations": 49441, + "amia": 49442, + "acion": 49443, + "Rust": 49444, + "Ġtowed": 49445, + "ĠTruly": 49446, + "577": 49447, + "Ġsheltered": 49448, + "rider": 49449, + "Wo": 49450, + "Ġlair": 49451, + "ĠIntelligent": 49452, + "improve": 49453, + "matically": 49454, + "Ġetiquette": 49455, + "adra": 49456, + "allo": 49457, + "ĠJuno": 49458, + "anything": 49459, + "ĠStruggle": 49460, + "ĠPredict": 49461, + "ĠGrimes": 49462, + "ĠAMERICA": 49463, + "ctx": 49464, + "ĠSituation": 49465, + "WOOD": 49466, + "Ġsoluble": 49467, + "meier": 49468, + "Ġintolerable": 49469, + "angering": 49470, + "Ġuninterrupted": 49471, + "Ġtooltip": 49472, + "Ġinterrogated": 49473, + "Ġgunned": 49474, + "ĠSneak": 49475, + "æѦ": 49476, + "Ġtether": 49477, + "Ġcrumble": 49478, + "Lens": 49479, + "Ġclustered": 49480, + "ĠSyl": 49481, + "ĠHasan": 49482, + "Ġdystopian": 49483, + "wana": 49484, + "Ġjoystick": 49485, + "ĠThib": 49486, + "ammu": 49487, + "Tomorrow": 49488, + "546": 49489, + "Ġovercame": 49490, + "Ġminimized": 49491, + "ceptor": 49492, + "Runner": 49493, + "ENGTH": 49494, + "ĠBrenda": 49495, + "ĠAchievements": 49496, + "Ġtorches": 49497, + "Ġrapport": 49498, + "ĠInvestigator": 49499, + "ĠHandling": 49500, + "relation": 49501, + "grey": 49502, + "815": 49503, + "Ġkcal": 49504, + "ĠCommands": 49505, + "dq": 49506, + "Ġcurls": 49507, + "Ġbearer": 49508, + "Ġcynicism": 49509, + "itri": 49510, + "ĠUseful": 49511, + "Bee": 49512, + "DCS": 49513, + "Ġabras": 49514, + "Pract": 49515, + "BILITIES": 49516, + "712": 49517, + "Ġdebugger": 49518, + "Ġdebtor": 49519, + "ĠLia": 49520, + "ĠKers": 49521, + "Ġexacerbate": 49522, + "ĠStacy": 49523, + "ĠBland": 49524, + "ĠScenes": 49525, + "Ġbranching": 49526, + "âĸĪâĸĪâĸĪâĸĪâĸĪâĸĪâĸĪâĸĪ": 49527, + "apeake": 49528, + "Ġsalsa": 49529, + "Ġmishand": 49530, + "ĠKonami": 49531, + "ĠNib": 49532, + "Ġanecdote": 49533, + "Ġagreeable": 49534, + "Ïī": 49535, + "ĠNathaniel": 49536, + "ĠHeisman": 49537, + "ĠBeware": 49538, + "Ġ1886": 49539, + "spective": 49540, + "691": 49541, + "522": 49542, + "Ġinhibits": 49543, + "Ġhashing": 49544, + "Ġ1889": 49545, + "å°Ĩ": 49546, + "vich": 49547, + "Pure": 49548, + "Ġsolidly": 49549, + "Ġaspirin": 49550, + "imaru": 49551, + "Ġstreetcar": 49552, + "ĠUCS": 49553, + "ĠJudd": 49554, + "Ġflashbacks": 49555, + "pins": 49556, + "Ġ1440": 49557, + "ĠUNHCR": 49558, + "ĠSymptoms": 49559, + "TIT": 49560, + "538": 49561, + "Fra": 49562, + "%);": 49563, + "Ġooz": 49564, + "Ġcurfew": 49565, + "Ġcalmed": 49566, + "Ġparticipates": 49567, + "TeX": 49568, + "Ġnonsensical": 49569, + "Ġfullback": 49570, + "ĠDeL": 49571, + "monkey": 49572, + "hari": 49573, + "Ġmetabolites": 49574, + "Ġlooted": 49575, + "ĠALWAYS": 49576, + "ĠBCC": 49577, + "Lt": 49578, + "ochet": 49579, + "Bone": 49580, + "Ġvetoed": 49581, + "Ġgcc": 49582, + "ĠCLICK": 49583, + "Ġ1888": 49584, + "saf": 49585, + "Ġstiffness": 49586, + "Ġlowly": 49587, + "ĠGeh": 49588, + "verson": 49589, + "orset": 49590, + "Ġunforeseen": 49591, + "Ġanesthesia": 49592, + "ĠOptical": 49593, + "Ġreconstructed": 49594, + "ĠTup": 49595, + "shows": 49596, + "NEWS": 49597, + "ĠNewspaper": 49598, + "ĠASA": 49599, + "tera": 49600, + "Numbers": 49601, + "Ġinexplicable": 49602, + "×ij": 49603, + "Ġhardness": 49604, + "untarily": 49605, + "ĠAcer": 49606, + "gradient": 49607, + "ARDIS": 49608, + "Ġwoodland": 49609, + "Ġmetaphors": 49610, + "ĠWembley": 49611, + "ĠPavel": 49612, + "philis": 49613, + "Ġrewriting": 49614, + "Ġperceptual": 49615, + "Ġ1070": 49616, + "worms": 49617, + "ĠDowns": 49618, + "Ġunsurprisingly": 49619, + "Ġtagging": 49620, + "flame": 49621, + "Ġlitres": 49622, + "Ġbounces": 49623, + "ĠBabe": 49624, + "shut": 49625, + "Ġoverdoses": 49626, + "ĠSheila": 49627, + "ĠChau": 49628, + "ĠBless": 49629, + "Capture": 49630, + "ĠSignificant": 49631, + "ĠScion": 49632, + "Ġ389": 49633, + "ĠMcH": 49634, + "ĠTitanium": 49635, + "ĠMeal": 49636, + "ameda": 49637, + "agents": 49638, + "aggressive": 49639, + "Billy": 49640, + "763": 49641, + "ĠSaying": 49642, + "DERR": 49643, + "itone": 49644, + "Collins": 49645, + "Bound": 49646, + "Ġbolted": 49647, + "ĠDMCA": 49648, + "953": 49649, + "Ġuniqueness": 49650, + "Ġepigen": 49651, + "unci": 49652, + "antam": 49653, + "Ġreckoning": 49654, + "chairs": 49655, + "OGR": 49656, + "ĠSenegal": 49657, + "Ġ1862": 49658, + "relevant": 49659, + "Ġ¯": 49660, + "Ġpharmacies": 49661, + "ĠGeral": 49662, + "vier": 49663, + "Yan": 49664, + "ORPG": 49665, + "Ġrabid": 49666, + "bending": 49667, + "ĠUNITED": 49668, + "Ġ465": 49669, + "Assembly": 49670, + "Ġweep": 49671, + "Ġbehest": 49672, + "ĠMothers": 49673, + "ĠJace": 49674, + "hid": 49675, + "Ġwhirlwind": 49676, + "ĠUNIVERS": 49677, + "Ġutopian": 49678, + "Ġkidnap": 49679, + "Philipp": 49680, + "Kin": 49681, + "893": 49682, + "Ġlivestream": 49683, + "ĠMISS": 49684, + "Ġsubversive": 49685, + "ĠTechniques": 49686, + "ĠJUSTICE": 49687, + "ĠBASE": 49688, + "Ġ387": 49689, + "Ġassailants": 49690, + "ĠHardcore": 49691, + "Ġsprinkled": 49692, + "ĠPse": 49693, + "éļ": 49694, + "printed": 49695, + "ĠHau": 49696, + "ORGE": 49697, + "ĠTOUR": 49698, + "Ġlaced": 49699, + "Ġitch": 49700, + "Giving": 49701, + "Ġported": 49702, + "781": 49703, + "////////////////////////////////": 49704, + "breeding": 49705, + "Ġlogger": 49706, + "ĠHOL": 49707, + "innie": 49708, + "Firstly": 49709, + "Ġembryonic": 49710, + "Ġdelegated": 49711, + "pai": 49712, + "OIL": 49713, + "Ġcentrally": 49714, + "ĠRx": 49715, + "ĠScouting": 49716, + "Dutch": 49717, + "Ġhereditary": 49718, + "ĠCruiser": 49719, + "sat": 49720, + "529": 49721, + "ĠMarriott": 49722, + "othermal": 49723, + "Ġprohibitions": 49724, + "Earn": 49725, + "ĠStab": 49726, + "ĠColleges": 49727, + "ĠBelief": 49728, + "stretched": 49729, + "ĠLH": 49730, + "ĠEntityItem": 49731, + "CIA": 49732, + "Ġunrem": 49733, + "Ġlaureate": 49734, + "Ġdenominations": 49735, + "summary": 49736, + "hler": 49737, + "Spect": 49738, + "ĠKlaus": 49739, + "ĠBeans": 49740, + "Ġinsur": 49741, + "ĠPAX": 49742, + "Ġfielder": 49743, + "ĠVet": 49744, + "ĠSparrow": 49745, + "zie": 49746, + "ĠSQ": 49747, + "ĠMondays": 49748, + "ĠOffline": 49749, + "ĠLerner": 49750, + "ĠExtensions": 49751, + "Ireland": 49752, + "Ġpatronage": 49753, + "Ġcontrasted": 49754, + "ĠMania": 49755, + "hirt": 49756, + "Moscow": 49757, + "Ġcondemns": 49758, + "ĠAnge": 49759, + "Ġcomposing": 49760, + "ĠPepe": 49761, + "ĠPaddock": 49762, + "Ġheterogeneity": 49763, + "Ġideologically": 49764, + "Ġfishes": 49765, + "Ġcursing": 49766, + "ĠRutherford": 49767, + "ĠFloating": 49768, + "ĠAmelia": 49769, + "Tea": 49770, + "Synopsis": 49771, + "Ġstunts": 49772, + "Ġbead": 49773, + "Ġstocking": 49774, + "ĠMILL": 49775, + "obook": 49776, + "massive": 49777, + "\\<": 49778, + "Ġhump": 49779, + "ĠPreferences": 49780, + "EngineDebug": 49781, + "geist": 49782, + "ĠNieto": 49783, + "omever": 49784, + "ishy": 49785, + "evaluate": 49786, + "colonial": 49787, + "Alternative": 49788, + "ĠGoPro": 49789, + "ĠVortex": 49790, + "ĠNETWORK": 49791, + "ansky": 49792, + "Secure": 49793, + "ĠThrust": 49794, + "Snake": 49795, + "Ġparcels": 49796, + "Ġsamurai": 49797, + "Ġactresses": 49798, + "Nap": 49799, + "MF": 49800, + "iferation": 49801, + "Beer": 49802, + "523": 49803, + "ĠIly": 49804, + "ointment": 49805, + "Ping": 49806, + "Ġstriped": 49807, + "ĠMellon": 49808, + "ossession": 49809, + "Ġneutron": 49810, + "endium": 49811, + "Ġaph": 49812, + "ĠFlavoring": 49813, + "Ġ383": 49814, + "Ġresponsiveness": 49815, + "ĠJindal": 49816, + "ĠHitchcock": 49817, + "Denver": 49818, + "ĠDRAGON": 49819, + "smanship": 49820, + "ĠDupl": 49821, + "Ġsly": 49822, + "Ġwebcam": 49823, + "ĠTwain": 49824, + "ĠDarling": 49825, + "iliate": 49826, + "consumer": 49827, + "DIT": 49828, + "Ġnamesake": 49829, + "Ġunorthodox": 49830, + "Ġfuner": 49831, + "ĠPLoS": 49832, + "ĠCONTROL": 49833, + "ozyg": 49834, + "oglobin": 49835, + "FACE": 49836, + "ERG": 49837, + "ĠDia": 49838, + "ĠFiesta": 49839, + "cele": 49840, + "034": 49841, + "Ġenclave": 49842, + "âĸ¬âĸ¬": 49843, + "onement": 49844, + "alist": 49845, + "Mand": 49846, + "Ġhomegrown": 49847, + "ĠFancy": 49848, + "Ġconceptions": 49849, + "ĠContains": 49850, + "ureen": 49851, + "Ġreiterate": 49852, + "Ġmeager": 49853, + "Ġinstallments": 49854, + "Spawn": 49855, + "627": 49856, + "Ġphotoc": 49857, + "ĠCabrera": 49858, + "ĠRosenthal": 49859, + "ĠLansing": 49860, + "isner": 49861, + "Ġinvests": 49862, + "ĠUFOs": 49863, + "EXP": 49864, + "Hardware": 49865, + "Ġtragically": 49866, + "Ġconcedes": 49867, + "ieft": 49868, + "cham": 49869, + "borgh": 49870, + "ĠSchr": 49871, + "ĠMelanie": 49872, + "ĠHoy": 49873, + "Ġvisitation": 49874, + "Ġidiosyncr": 49875, + "Ġfractions": 49876, + "Ġforeskin": 49877, + "obos": 49878, + "Ġpoaching": 49879, + "ĠVIEW": 49880, + "Ġstimulates": 49881, + "ĠGork": 49882, + "canon": 49883, + "MIC": 49884, + "ĠNemesis": 49885, + "ĠIndra": 49886, + "ĠDMV": 49887, + "Ġ529": 49888, + "Ġinspecting": 49889, + "Ġgrandma": 49890, + "ĠWhedon": 49891, + "ĠShant": 49892, + "ĠPurg": 49893, + "ikan": 49894, + "ĠTeg": 49895, + "ĠCLR": 49896, + "zac": 49897, + "Victoria": 49898, + "ĠVerify": 49899, + "ionics": 49900, + "Ġpartying": 49901, + "ĠMou": 49902, + "colour": 49903, + "Ġtestimonies": 49904, + "lations": 49905, + "Ġpressuring": 49906, + "hiro": 49907, + "acers": 49908, + "Ġfid": 49909, + "angler": 49910, + "ĠCSI": 49911, + "Ġhereafter": 49912, + "Ġdissidents": 49913, + "reporting": 49914, + "iphany": 49915, + "chev": 49916, + "Ġsolitude": 49917, + "Ġlobe": 49918, + "Ġindis": 49919, + "Ġcredential": 49920, + "recent": 49921, + "adult": 49922, + "ĠNirvana": 49923, + "ĠFranchise": 49924, + "Layer": 49925, + "Hyp": 49926, + "ĠBerkshire": 49927, + "Ġwills": 49928, + "tif": 49929, + "Ġtotem": 49930, + "ĠJudah": 49931, + "repair": 49932, + "Instant": 49933, + "548": 49934, + "Ġembassies": 49935, + "Ġbottleneck": 49936, + "Ġbount": 49937, + "Ġtypew": 49938, + "ĠAlvin": 49939, + "jing": 49940, + "imilar": 49941, + "Rush": 49942, + "Ġbrim": 49943, + "ĠHELP": 49944, + "Aim": 49945, + "]'": 49946, + "Ġpassively": 49947, + "Ġbounded": 49948, + "ĠRated": 49949, + "Ġcriminality": 49950, + "Ġbiomark": 49951, + "Ġdispatcher": 49952, + "ĠTowards": 49953, + "Ġ+++": 49954, + "righteous": 49955, + "frog": 49956, + "ĠPanc": 49957, + "Carter": 49958, + "032": 49959, + "æ©Ł": 49960, + "Ġultraviolet": 49961, + "ĠLicensed": 49962, + "ĠTata": 49963, + "ĠBlessing": 49964, + "ĠGAM": 49965, + "Ġchemically": 49966, + "ĠSeaf": 49967, + "ĠRELE": 49968, + "ĠMercenary": 49969, + "capitalist": 49970, + "Ġformulations": 49971, + "Ġannihilation": 49972, + "ĠVerb": 49973, + "ĠArgon": 49974, + "Ġunloaded": 49975, + "Ġmorphed": 49976, + "Ġconquering": 49977, + "backer": 49978, + "IELD": 49979, + "Ġthefts": 49980, + "Ġfrontrunner": 49981, + "ĠRoyale": 49982, + "ĠFundamental": 49983, + "elight": 49984, + "Chip": 49985, + "necessary": 49986, + "ayn": 49987, + "ĠSlip": 49988, + "Ġ448": 49989, + "cerned": 49990, + "Pause": 49991, + "Ġshockingly": 49992, + "ĠABV": 49993, + "Ġcomposure": 49994, + "733": 49995, + "ĠMotorsport": 49996, + "ahime": 49997, + "Murray": 49998, + "Mach": 49999, + "Ġgrids": 50000, + "Ġdebian": 50001, + "Ġfurthermore": 50002, + "Ġdexterity": 50003, + "ĠCollections": 50004, + "oslov": 50005, + "ilage": 50006, + "bj": 50007, + "ĠMonteneg": 50008, + "ĠstrutConnector": 50009, + "Ġmassacres": 50010, + "Ġbriefs": 50011, + "fetched": 50012, + "uvian": 50013, + "olition": 50014, + "Failure": 50015, + "emonic": 50016, + "Ġflared": 50017, + "Ġclaimant": 50018, + "Ġcures": 50019, + "Ġgiveaways": 50020, + "ĠSubstance": 50021, + "alions": 50022, + "Ġcringe": 50023, + "ĠKul": 50024, + "Ġaristocracy": 50025, + "ĠUlster": 50026, + "olated": 50027, + "housing": 50028, + "ĠMIS": 50029, + "Ġglared": 50030, + "ĠWilhelm": 50031, + "needs": 50032, + "lambda": 50033, + "builders": 50034, + "ĠVIS": 50035, + "Ġradiator": 50036, + "ĠGhostbusters": 50037, + "Ġ436": 50038, + "actual": 50039, + "Ġherds": 50040, + "ça": 50041, + "watching": 50042, + "Ġcountering": 50043, + "Charge": 50044, + "Ġcharred": 50045, + "Ġwarheads": 50046, + "Ġiodine": 50047, + "ĠMacy": 50048, + "041": 50049, + "Ġdepartures": 50050, + "ĠSins": 50051, + "Ġdyed": 50052, + "ĠConcepts": 50053, + "gado": 50054, + "713": 50055, + "Ġquotations": 50056, + "Ġgist": 50057, + "ĠChristy": 50058, + "Ġantigen": 50059, + "ĠHemp": 50060, + "ĠDrawn": 50061, + "ĠBarg": 50062, + "ezvous": 50063, + "Ġpaternity": 50064, + "Ġardu": 50065, + "ĠAnchorage": 50066, + "ĠRik": 50067, + "Ġoverloaded": 50068, + "ĠUsername": 50069, + "ĠTammy": 50070, + "ĠNau": 50071, + "ĠCellular": 50072, + "Ġwaning": 50073, + "Ġrodent": 50074, + "ĠWorcester": 50075, + "ilts": 50076, + "ĠTad": 50077, + "Ġdwellings": 50078, + "Ġbullish": 50079, + "431": 50080, + "Ġretaliate": 50081, + "Ġmigraine": 50082, + "ĠChevron": 50083, + "CHECK": 50084, + "Ġdonkey": 50085, + "crim": 50086, + "SPA": 50087, + "ĠAnalog": 50088, + "Ġmarquee": 50089, + "ĠHaas": 50090, + "Bir": 50091, + "ĠGDDR": 50092, + "ĠDownloads": 50093, + "Ġwillpower": 50094, + "ĠForth": 50095, + "ĠRecorded": 50096, + "Ġimpossibility": 50097, + "ĠLogged": 50098, + "ĠFranks": 50099, + "ĠRatt": 50100, + "initions": 50101, + "Ġcleaners": 50102, + "Ġsorely": 50103, + "Ġflickering": 50104, + "ĠExamination": 50105, + "catching": 50106, + "alloween": 50107, + "Msg": 50108, + "Ġdunno": 50109, + "Fa": 50110, + "Ġdysph": 50111, + "crazy": 50112, + ".''.": 50113, + "Ġmainline": 50114, + "Ġcs": 50115, + "Ġptr": 50116, + "ĠWally": 50117, + "igun": 50118, + "951": 50119, + "ĠBigfoot": 50120, + "fights": 50121, + "Ġretrieving": 50122, + "Jr": 50123, + "Ġduplication": 50124, + "ĠExplan": 50125, + "Ġrelational": 50126, + "Ġquaint": 50127, + "Ġbiscuits": 50128, + "Ġado": 50129, + "Ġshudder": 50130, + "Ġantidote": 50131, + "blooded": 50132, + "ksh": 50133, + "Ġsauces": 50134, + "Ġreinvest": 50135, + "Ġdispensary": 50136, + "ĠDiver": 50137, + "Ġ9000": 50138, + "student": 50139, + "Ġinsepar": 50140, + "escap": 50141, + "Ġtoddlers": 50142, + "ĠGPIO": 50143, + "ĠAssignment": 50144, + "headers": 50145, + "Ġlackluster": 50146, + "Ġaback": 50147, + "956": 50148, + "Ġtoolbar": 50149, + "745": 50150, + "Ġoust": 50151, + "Ġcontemplation": 50152, + "ĠPRESIDENT": 50153, + "Ġ458": 50154, + "======": 50155, + "Ġguaranteeing": 50156, + "ĠHeist": 50157, + "ĠCannes": 50158, + "Ļ½": 50159, + "Ġcollaborator": 50160, + "ĠAmp": 50161, + "Ġgou": 50162, + "ĠSHALL": 50163, + "stories": 50164, + "783": 50165, + "Ġmobilized": 50166, + "Ġbrood": 50167, + "ĠLU": 50168, + "ĠðŁij": 50169, + "Ġrefin": 50170, + "ĠAnthropology": 50171, + "vind": 50172, + "illi": 50173, + "Ġwarranties": 50174, + "ĠBabel": 50175, + "Ġswath": 50176, + "Ġcaches": 50177, + "Ġantagonists": 50178, + "artifacts": 50179, + "Ġhotly": 50180, + "ĠStarts": 50181, + "ĠGö": 50182, + "zag": 50183, + "!!!!!": 50184, + "Ġscourge": 50185, + "Ġconspiring": 50186, + "ruits": 50187, + "reverse": 50188, + "ĠSheen": 50189, + "ĠJesuit": 50190, + "ĠGiovanni": 50191, + "adies": 50192, + "Ġbuttocks": 50193, + "earcher": 50194, + "acan": 50195, + "Ġvolleyball": 50196, + "Ġshrouded": 50197, + "Ġscoreboard": 50198, + "bats": 50199, + "ĠIPM": 50200, + "Ġasses": 50201, + "Ġderegulation": 50202, + "ĠTelegram": 50203, + "ĠReboot": 50204, + "Ġ7000": 50205, + "ĠCanary": 50206, + "Ġkernels": 50207, + "ĠFrançois": 50208, + "ĠDuff": 50209, + "ĠPon": 50210, + "ĠLeica": 50211, + "ĠGarmin": 50212, + "Ġorphans": 50213, + "ĠClaudia": 50214, + "Ġcalendars": 50215, + "ĠLeilan": 50216, + "ento": 50217, + "Rocket": 50218, + "Ġbrunch": 50219, + "ĠHawking": 50220, + "ainers": 50221, + "Ġsensibilities": 50222, + "ĠkW": 50223, + "ĠKand": 50224, + "Ġreclaimed": 50225, + "Ġinterestingly": 50226, + "ש": 50227, + "romy": 50228, + "JM": 50229, + "ĠEnhancement": 50230, + "bush": 50231, + "Skip": 50232, + "Ġrappers": 50233, + "Ġgazing": 50234, + "pedia": 50235, + "athlon": 50236, + "Revolution": 50237, + "Ġsnipers": 50238, + "Ġreverted": 50239, + "Ġconglomerate": 50240, + "Terry": 50241, + "794": 50242, + "Ġharsher": 50243, + "Ġdesolate": 50244, + "ĠHitman": 50245, + "Commission": 50246, + "Ġ(/": 50247, + "âĢ¦.\"": 50248, + "Compar": 50249, + "Ġamplification": 50250, + "ominated": 50251, + "Ġregress": 50252, + "ĠCollider": 50253, + "Ġinformants": 50254, + "Ġgazed": 50255, + "<|endoftext|>": 50256 + }, + "merges": [ + "Ġ t", + "Ġ a", + "h e", + "i n", + "r e", + "o n", + "Ġt he", + "e r", + "Ġ s", + "a t", + "Ġ w", + "Ġ o", + "e n", + "Ġ c", + "i t", + "i s", + "a n", + "o r", + "e s", + "Ġ b", + "e d", + "Ġ f", + "in g", + "Ġ p", + "o u", + "Ġa n", + "a l", + "a r", + "Ġt o", + "Ġ m", + "Ġo f", + "Ġ in", + "Ġ d", + "Ġ h", + "Ġan d", + "i c", + "a s", + "l e", + "Ġt h", + "i on", + "o m", + "l l", + "en t", + "Ġ n", + "Ġ l", + "s t", + "Ġ re", + "v e", + "Ġ e", + "r o", + "l y", + "Ġb e", + "Ġ g", + "Ġ T", + "c t", + "Ġ S", + "i d", + "o t", + "Ġ I", + "u t", + "e t", + "Ġ A", + "Ġ is", + "Ġ on", + "i m", + "a m", + "o w", + "a y", + "a d", + "s e", + "Ġth at", + "Ġ C", + "i g", + "Ġf or", + "a c", + "Ġ y", + "v er", + "u r", + "Ġ u", + "l d", + "Ġs t", + "Ġ M", + "' s", + "Ġ he", + "Ġ it", + "at ion", + "it h", + "i r", + "c e", + "Ġy ou", + "i l", + "Ġ B", + "Ġw h", + "o l", + "Ġ P", + "Ġw ith", + "Ġ 1", + "t er", + "c h", + "Ġa s", + "Ġw e", + "Ġ (", + "n d", + "i ll", + "Ġ D", + "i f", + "Ġ 2", + "a g", + "er s", + "k e", + "Ġ \"", + "Ġ H", + "e m", + "Ġc on", + "Ġ W", + "Ġ R", + "he r", + "Ġw as", + "Ġ r", + "o d", + "Ġ F", + "u l", + "at e", + "Ġa t", + "r i", + "p p", + "o re", + "ĠT he", + "Ġs e", + "u s", + "Ġp ro", + "Ġh a", + "u m", + "Ġa re", + "Ġd e", + "a in", + "an d", + "Ġo r", + "ig h", + "es t", + "is t", + "a b", + "r om", + "Ġ N", + "t h", + "Ġc om", + "Ġ G", + "u n", + "o p", + "0 0", + "Ġ L", + "Ġn ot", + "es s", + "Ġe x", + "Ġ v", + "re s", + "Ġ E", + "e w", + "it y", + "an t", + "Ġb y", + "e l", + "o s", + "or t", + "o c", + "q u", + "Ġf rom", + "Ġha ve", + "Ġs u", + "i ve", + "ou ld", + "Ġs h", + "Ġth is", + "n t", + "r a", + "p e", + "igh t", + "ar t", + "m ent", + "Ġa l", + "u st", + "en d", + "- -", + "al l", + "Ġ O", + "ac k", + "Ġc h", + "Ġ le", + "i es", + "re d", + "ar d", + "â Ģ", + "ou t", + "Ġ J", + "Ġa b", + "e ar", + "i v", + "al ly", + "ou r", + "o st", + "g h", + "p t", + "Ġp l", + "as t", + "Ġc an", + "a k", + "om e", + "u d", + "T he", + "Ġh is", + "Ġd o", + "Ġg o", + "Ġh as", + "g e", + "' t", + "Ġ U", + "r ou", + "Ġs a", + "Ġ j", + "Ġb ut", + "Ġw or", + "Ġa ll", + "e ct", + "Ġ k", + "am e", + "Ġw ill", + "o k", + "Ġw he", + "Ġthe y", + "id e", + "0 1", + "f f", + "ic h", + "p l", + "t her", + "Ġt r", + ". .", + "Ġin t", + "i e", + "u re", + "ag e", + "Ġn e", + "i al", + "a p", + "in e", + "ic e", + "Ġm e", + "Ġo ut", + "an s", + "on e", + "on g", + "ion s", + "Ġwh o", + "Ġ K", + "Ġu p", + "Ġthe ir", + "Ġa d", + "Ġ 3", + "Ġu s", + "at ed", + "ou s", + "Ġm ore", + "u e", + "o g", + "ĠS t", + "in d", + "i ke", + "Ġs o", + "im e", + "p er", + ". \"", + "b er", + "i z", + "a ct", + "Ġon e", + "Ġsa id", + "Ġ -", + "a re", + "Ġyou r", + "c c", + "ĠT h", + "Ġc l", + "e p", + "a ke", + "ab le", + "i p", + "Ġcon t", + "Ġwh ich", + "i a", + "Ġ im", + "Ġab out", + "Ġwe re", + "ver y", + "u b", + "Ġh ad", + "Ġ en", + "Ġcom p", + ", \"", + "ĠI n", + "Ġu n", + "Ġa g", + "i re", + "ac e", + "a u", + "ar y", + "Ġw ould", + "as s", + "r y", + "Ġ âĢ", + "c l", + "o ok", + "e re", + "s o", + "Ġ V", + "ig n", + "i b", + "Ġof f", + "Ġt e", + "v en", + "Ġ Y", + "i le", + "o se", + "it e", + "or m", + "Ġ2 01", + "Ġre s", + "Ġm an", + "Ġp er", + "Ġo ther", + "or d", + "ul t", + "Ġbe en", + "Ġl ike", + "as e", + "an ce", + "k s", + "ay s", + "ow n", + "en ce", + "Ġd is", + "ct ion", + "Ġan y", + "Ġa pp", + "Ġs p", + "in t", + "res s", + "ation s", + "a il", + "Ġ 4", + "ic al", + "Ġthe m", + "Ġhe r", + "ou nt", + "ĠC h", + "Ġa r", + "Ġ if", + "Ġthe re", + "Ġp e", + "Ġy ear", + "a v", + "Ġm y", + "Ġs ome", + "Ġwhe n", + "ou gh", + "ac h", + "Ġth an", + "r u", + "on d", + "ic k", + "Ġo ver", + "ve l", + "Ġ qu", + "Ċ Ċ", + "Ġs c", + "re at", + "re e", + "ĠI t", + "ou nd", + "p ort", + "Ġal so", + "Ġp art", + "f ter", + "Ġk n", + "Ġbe c", + "Ġt ime", + "en s", + "Ġ 5", + "op le", + "Ġwh at", + "Ġn o", + "d u", + "m er", + "an g", + "Ġn ew", + "-- --", + "Ġg et", + "or y", + "it ion", + "ing s", + "Ġj ust", + "Ġint o", + "Ġ 0", + "ent s", + "o ve", + "t e", + "Ġpe ople", + "Ġp re", + "Ġit s", + "Ġre c", + "Ġt w", + "i an", + "ir st", + "ar k", + "or s", + "Ġwor k", + "ad e", + "o b", + "Ġs he", + "Ġo ur", + "w n", + "in k", + "l ic", + "Ġ1 9", + "ĠH e", + "is h", + "nd er", + "au se", + "Ġh im", + "on s", + "Ġ [", + "Ġ ro", + "f orm", + "i ld", + "at es", + "ver s", + "Ġon ly", + "o ll", + "Ġs pe", + "c k", + "e ll", + "am p", + "Ġa cc", + "Ġb l", + "i ous", + "ur n", + "f t", + "o od", + "Ġh ow", + "he d", + "Ġ '", + "Ġa fter", + "a w", + "Ġat t", + "o v", + "n e", + "Ġpl ay", + "er v", + "ic t", + "Ġc ould", + "it t", + "Ġa m", + "Ġf irst", + "Ġ 6", + "Ġa ct", + "Ġ $", + "e c", + "h ing", + "u al", + "u ll", + "Ġcom m", + "o y", + "o ld", + "c es", + "at er", + "Ġf e", + "Ġbe t", + "w e", + "if f", + "Ġtw o", + "oc k", + "Ġb ack", + ") .", + "id ent", + "Ġu nder", + "rou gh", + "se l", + "x t", + "Ġm ay", + "rou nd", + "Ġp o", + "p h", + "is s", + "Ġd es", + "Ġm ost", + "Ġd id", + "Ġad d", + "j ect", + "Ġin c", + "f ore", + "Ġp ol", + "on t", + "Ġag ain", + "cl ud", + "ter n", + "Ġkn ow", + "Ġne ed", + "Ġcon s", + "Ġc o", + "Ġ .", + "Ġw ant", + "Ġse e", + "Ġ 7", + "n ing", + "i ew", + "ĠTh is", + "c ed", + "Ġe ven", + "Ġin d", + "t y", + "ĠW e", + "at h", + "Ġthe se", + "Ġp r", + "Ġu se", + "Ġbec ause", + "Ġf l", + "n g", + "Ġn ow", + "ĠâĢ ĵ", + "c om", + "is e", + "Ġm ake", + "Ġthe n", + "ow er", + "Ġe very", + "ĠU n", + "Ġse c", + "os s", + "u ch", + "Ġe m", + "Ġ =", + "ĠR e", + "i ed", + "r it", + "Ġin v", + "le ct", + "Ġsu pp", + "at ing", + "Ġl ook", + "m an", + "pe ct", + "Ġ 8", + "ro w", + "Ġb u", + "Ġwhe re", + "if ic", + "Ġyear s", + "i ly", + "Ġd iff", + "Ġsh ould", + "Ġre m", + "T h", + "I n", + "Ġe v", + "d ay", + "' re", + "ri b", + "Ġre l", + "s s", + "Ġde f", + "Ġr ight", + "Ġs y", + ") ,", + "l es", + "00 0", + "he n", + "Ġth rough", + "ĠT r", + "_ _", + "Ġw ay", + "Ġd on", + "Ġ ,", + "Ġ1 0", + "as ed", + "Ġas s", + "ub lic", + "Ġre g", + "ĠA nd", + "i x", + "Ġ very", + "Ġin clud", + "ot her", + "Ġim p", + "ot h", + "Ġsu b", + "ĠâĢ Ķ", + "Ġbe ing", + "ar g", + "ĠW h", + "= =", + "ib le", + "Ġdo es", + "an ge", + "r am", + "Ġ 9", + "er t", + "p s", + "it ed", + "ation al", + "Ġb r", + "Ġd own", + "Ġman y", + "ak ing", + "Ġc all", + "ur ing", + "it ies", + "Ġp h", + "ic s", + "al s", + "Ġde c", + "at ive", + "en er", + "Ġbe fore", + "il ity", + "Ġwe ll", + "Ġm uch", + "ers on", + "Ġth ose", + "Ġsu ch", + "Ġ ke", + "Ġ end", + "ĠB ut", + "as on", + "t ing", + "Ġl ong", + "e f", + "Ġth ink", + "y s", + "Ġbe l", + "Ġs m", + "it s", + "a x", + "Ġo wn", + "Ġpro v", + "Ġs et", + "if e", + "ment s", + "b le", + "w ard", + "Ġsh ow", + "Ġp res", + "m s", + "om et", + "Ġo b", + "Ġs ay", + "ĠS h", + "t s", + "f ul", + "Ġe ff", + "Ġg u", + "Ġin st", + "u nd", + "re n", + "c ess", + "Ġ ent", + "ĠY ou", + "Ġgo od", + "Ġst art", + "in ce", + "Ġm ade", + "t t", + "st em", + "ol og", + "u p", + "Ġ |", + "um p", + "Ġhe l", + "ver n", + "ul ar", + "u ally", + "Ġa c", + "Ġm on", + "Ġl ast", + "Ġ2 00", + "1 0", + "Ġst ud", + "u res", + "ĠA r", + "sel f", + "ar s", + "mer ic", + "u es", + "c y", + "Ġm in", + "oll ow", + "Ġc ol", + "i o", + "Ġm od", + "Ġc ount", + "ĠC om", + "he s", + "Ġf in", + "a ir", + "i er", + "âĢ Ķ", + "re ad", + "an k", + "at ch", + "e ver", + "Ġst r", + "Ġpo int", + "or k", + "ĠN ew", + "Ġs ur", + "o ol", + "al k", + "em ent", + "Ġus ed", + "ra ct", + "we en", + "Ġs ame", + "ou n", + "ĠA l", + "c i", + "Ġdiff ere", + "Ġwh ile", + "---- ----", + "Ġg ame", + "ce pt", + "Ġs im", + ".. .", + "Ġin ter", + "e k", + "Ġre port", + "Ġpro du", + "Ġst ill", + "l ed", + "a h", + "Ġhe re", + "Ġwor ld", + "Ġth ough", + "Ġn um", + "ar ch", + "im es", + "al e", + "ĠS e", + "ĠI f", + "/ /", + "ĠL e", + "Ġre t", + "Ġre f", + "Ġtr ans", + "n er", + "ut ion", + "ter s", + "Ġt ake", + "ĠC l", + "Ġcon f", + "w ay", + "a ve", + "Ġgo ing", + "Ġs l", + "u g", + "ĠA meric", + "Ġspe c", + "Ġh and", + "Ġbet ween", + "ist s", + "ĠD e", + "o ot", + "I t", + "Ġe ar", + "Ġagain st", + "Ġh igh", + "g an", + "a z", + "at her", + "Ġex p", + "Ġo p", + "Ġin s", + "Ġg r", + "Ġhel p", + "Ġre qu", + "et s", + "in s", + "ĠP ro", + "is m", + "Ġf ound", + "l and", + "at a", + "us s", + "am es", + "Ġp erson", + "Ġg reat", + "p r", + "Ġs ign", + "ĠA n", + "' ve", + "Ġs omet", + "Ġs er", + "h ip", + "Ġr un", + "Ġ :", + "Ġt er", + "ire ct", + "Ġf ollow", + "Ġd et", + "ic es", + "Ġf ind", + "1 2", + "Ġm em", + "Ġc r", + "e red", + "e x", + "Ġex t", + "ut h", + "en se", + "c o", + "Ġte am", + "v ing", + "ou se", + "as h", + "at t", + "v ed", + "Ġsy stem", + "ĠA s", + "d er", + "iv es", + "m in", + "Ġle ad", + "ĠB l", + "c ent", + "Ġa round", + "Ġgo vern", + "Ġc ur", + "vel op", + "an y", + "Ġc our", + "al th", + "ag es", + "iz e", + "Ġc ar", + "od e", + "Ġl aw", + "Ġre ad", + "' m", + "c on", + "Ġre al", + "Ġsupp ort", + "Ġ1 2", + ".. ..", + "Ġre ally", + "n ess", + "Ġf act", + "Ġd ay", + "Ġb oth", + "y ing", + "Ġs erv", + "ĠF or", + "Ġth ree", + "Ġw om", + "Ġm ed", + "od y", + "ĠThe y", + "5 0", + "Ġex per", + "t on", + "Ġe ach", + "ak es", + "Ġc he", + "Ġc re", + "in es", + "Ġre p", + "1 9", + "g g", + "ill ion", + "Ġg rou", + "ut e", + "i k", + "W e", + "g et", + "E R", + "Ġm et", + "Ġs ays", + "o x", + "Ġd uring", + "er n", + "iz ed", + "a red", + "Ġf am", + "ic ally", + "Ġha pp", + "ĠI s", + "Ġch ar", + "m ed", + "v ent", + "Ġg ener", + "i ent", + "p le", + "i et", + "re nt", + "1 1", + "v es", + "pt ion", + "Ġ2 0", + "form ation", + "Ġc or", + "Ġoff ic", + "ie ld", + "Ġto o", + "is ion", + "Ġin f", + "Ġ Z", + "t he", + "o ad", + "Ġp ublic", + "Ġpro g", + "r ic", + "* *", + "Ġw ar", + "Ġp ower", + "v iew", + "Ġf ew", + "Ġl oc", + "Ġdiffere nt", + "Ġst ate", + "Ġhe ad", + "' ll", + "Ġp oss", + "Ġst at", + "re t", + "ant s", + "Ġv al", + "Ġis s", + "Ġc le", + "i vers", + "an c", + "Ġex pl", + "Ġan other", + "Ġ Q", + "Ġa v", + "th ing", + "n ce", + "W h", + "Ġch ild", + "Ġs ince", + "i red", + "l ess", + "Ġl ife", + "Ġde velop", + "itt le", + "Ġde p", + "Ġp ass", + "ã ĥ", + "Ġt urn", + "or n", + "Th is", + "b ers", + "ro ss", + "ĠA d", + "Ġf r", + "Ġres p", + "Ġsec ond", + "o h", + "Ġ /", + "Ġdis c", + "Ġ &", + "Ġsomet hing", + "Ġcomp le", + "Ġ ed", + "Ġf il", + "Ġmon th", + "a j", + "u c", + "Ġgovern ment", + "Ġwith out", + "Ġle g", + "Ġd ist", + "Ġp ut", + "Ġqu est", + "an n", + "Ġpro t", + "2 0", + "Ġne ver", + "i ence", + "Ġle vel", + "Ġar t", + "Ġth ings", + "Ġm ight", + "Ġeff ect", + "Ġcont ro", + "Ġc ent", + "Ġ1 8", + "Ġall ow", + "Ġbel ie", + "ch ool", + "ot t", + "Ġinc re", + "Ġfe el", + "Ġres ult", + "Ġl ot", + "Ġf un", + "ot e", + "Ġt y", + "ere st", + "Ġcont in", + "Ġus ing", + "Ġb ig", + "2 01", + "Ġas k", + "Ġb est", + "Ġ )", + "I N", + "Ġo pp", + "3 0", + "Ġnum ber", + "in ess", + "S t", + "le ase", + "Ġc a", + "Ġm ust", + "Ġd irect", + "Ġg l", + "Ġ <", + "Ġop en", + "Ġp ost", + "Ġcom e", + "Ġse em", + "ord ing", + "Ġwe ek", + "ate ly", + "it al", + "Ġe l", + "ri end", + "Ġf ar", + "Ġt ra", + "in al", + "Ġp ri", + "ĠU S", + "Ġpl ace", + "Ġfor m", + "Ġto ld", + "\" :", + "ain s", + "at ure", + "ĠTr ump", + "Ġst and", + "Ġ #", + "id er", + "ĠF r", + "Ġne xt", + "Ġs oc", + "Ġp ur", + "Ġle t", + "Ġl ittle", + "Ġh um", + "Ġ i", + "r on", + "1 5", + "Ġ1 5", + "Ġcomm un", + "Ġm ark", + "ĠThe re", + "Ġw r", + "ĠTh at", + "Ġin formation", + "w ays", + "Ġb us", + "a pp", + "Ġinv est", + "m e", + "Ġh ard", + "ain ed", + "e ad", + "Ġim port", + "Ġapp ro", + "Ġt est", + "Ġt ri", + "Ġre st", + "os ed", + "Ġf ull", + "Ġc are", + "ĠS p", + "Ġc ase", + "O N", + "Ġs k", + "Ġl ess", + "Ġ +", + "Ġpart ic", + "ĠP l", + "ab ly", + "u ck", + "is hed", + "ch n", + "b e", + "Ġl ist", + "at or", + "Ġto p", + "Ġad v", + "ĠB e", + "ru ct", + "Ġd em", + "r ation", + "l ing", + "g y", + "re en", + "g er", + "Ġh ome", + "Ġle ft", + "Ġbet ter", + "Ġd ata", + "Ġ1 1", + "Ġatt ack", + "Ġpro ble", + "l ine", + "ard s", + "Ġbe h", + "r al", + "ĠH ow", + "ĠS he", + "ar ge", + "Ġ --", + ": //", + "Ġb ro", + "ĠP h", + "at s", + "Ġbu ild", + "w w", + "id ed", + "a im", + "as es", + "en cy", + "Ġm ain", + "in ed", + "Ġinclud ing", + "Ġ {", + "Ġg ot", + "Ġint erest", + "Ġke ep", + "Ġ X", + "Ġe as", + "ain ing", + "Ġcl ass", + "âĢ ¦", + "ĠN o", + "Ġv ar", + "Ġsm all", + "amp le", + "A T", + "Ġ ide", + "ĠS o", + "Ġre ce", + "Ġpol it", + "Ġm ov", + "Ġpl an", + "Ġper cent", + "iv ing", + "Ġc amp", + "Ġp ay", + "1 4", + "s c", + "is ed", + "Ġu nt", + "one y", + "pl oy", + "== ==", + "Ġdid n", + "ĠI nd", + "el s", + "ert ain", + "Ġp os", + "__ __", + "i ver", + "Ġpro cess", + "Ġprog ram", + "if ied", + "ĠR ep", + "1 6", + "u ro", + "olog y", + "at ter", + "in a", + "Ġn ame", + "ĠA ll", + "Ġf our", + "Ġret urn", + "v ious", + "b s", + "Ġcall ed", + "Ġm ove", + "ĠS c", + "ir d", + "Ġgrou p", + "Ġb re", + "Ġm en", + "Ġc ap", + "t en", + "e e", + "Ġd ri", + "le g", + "he re", + "uth or", + "Ġp at", + "Ġcur rent", + "id es", + "Ġp op", + "t o", + "ent ion", + "Ġal ways", + "Ġm il", + "Ġwom en", + "Ġ1 6", + "Ġo ld", + "iv en", + "ra ph", + "ĠO r", + "r or", + "ent ly", + "Ġn ear", + "ĠE x", + "re am", + "s h", + "Ġ1 4", + "Ġf ree", + "iss ion", + "st and", + "ĠC on", + "al ity", + "us ed", + "1 3", + "Ġdes ign", + "Ġch ange", + "Ġch ang", + "Ġb o", + "Ġv is", + "em ber", + "Ġb ook", + "read y", + "Ġk ill", + "2 5", + "pp ed", + "Ġa way", + "Ġab le", + "Ġcount ry", + "Ġcon st", + "ar n", + "Ġor der", + "A R", + "i or", + "i um", + "or th", + "1 8", + "ail able", + "Ġs w", + "Ġm illion", + "Ġ1 3", + "at ic", + "t ed", + "ĠG o", + "Ġo per", + "en g", + "Ġth ing", + "aj or", + "con om", + "ĠCom m", + "Ġwh y", + "u red", + "ur al", + "Ġs chool", + "b y", + "ĠM ar", + "Ġa ff", + "Ġd ays", + "Ġan n", + "us h", + "an e", + "I f", + "e g", + "Ġpro f", + "Ġhe alth", + "ou th", + "B ut", + "ion al", + ". ,", + "Ġs ol", + "Ġal ready", + "Ġ3 0", + "Ġchar act", + "H e", + "Ġf riend", + "E S", + "i ans", + "ic le", + "' d", + "ĠO n", + "Ġle ast", + "Ġp rom", + "Ġd r", + "Ġh ist", + "it her", + "Ġ est", + "i qu", + "1 7", + "s on", + "Ġte ll", + "Ġt alk", + "oh n", + "o int", + "le ction", + "A N", + "Ġunt il", + "au gh", + "Ġl ater", + "Ġ ve", + "Ġv iew", + "end ing", + "iv ed", + "Ġwor d", + "w are", + "Ġc ost", + "Ġen ough", + "Ġg ive", + "ĠUn ited", + "Ġte chn", + "are nt", + "O R", + "Ġp ar", + "ĠD r", + "Ġ201 6", + "r ist", + "er ing", + "Ġ Â", + "Ġl arge", + "s ide", + "ac y", + "cc ess", + "Ġw in", + "Ġimport ant", + "Ġ19 9", + "Ġdoes n", + "Ġ1 7", + "Ġbus iness", + "Ġcle ar", + "Ġre se", + "\" ,", + "ur y", + "Ġe qu", + "as ter", + "al f", + "ĠAmeric an", + "n ect", + "Ġex pect", + "ivers ity", + "Ġo cc", + "ĠF l", + "Ġk ind", + "Ġme an", + "Ġp ast", + "Ġde v", + "Ġb as", + "le t", + "ra ft", + "Ġor gan", + "Ġde l", + "Ġper form", + "Ġst ory", + "Ġse ason", + "ĠC ol", + "Ġcl aim", + "Ġc ame", + "Ġwith in", + "Ġl ine", + "Ġpro ject", + "ĠA t", + "Ġcontro l", + "end ed", + "ĠS y", + "Ġa ir", + "iz ation", + "Ġ *", + "le y", + "Ġm oney", + "id d", + "Y ou", + "f or", + "Ġfam ily", + "Ġm aking", + "Ġb it", + "Ġpol ice", + "Ġhapp en", + "Ġ vers", + "on y", + "u ff", + "ĠW hen", + "Ġs it", + "ide o", + "l f", + "is on", + "Ġsu re", + "g in", + "Ġapp ear", + "Ġl ight", + "Ġ es", + "o f", + "Ġw ater", + "Ġt imes", + "n ot", + "Ġg row", + "Ġcomp any", + "ĠT e", + "ow s", + "Ġm ar", + "our ce", + "i ol", + "ar m", + "b r", + "Ġex ample", + "Ġcon c", + "Ġf ore", + "ĠT o", + "p ro", + "E N", + "ri es", + "Ġ2 5", + "ĠC an", + "ne y", + "Ġact ually", + "Ġe ver", + "ur ity", + "ak en", + "ap s", + "Ġt ax", + "Ġm ajor", + "am a", + "Ġof ten", + "er al", + "Ġhum an", + "Ġj ob", + "is ter", + "Ġav ailable", + "oc r", + "en n", + "a id", + "iv id", + "Ġrec ord", + "? \"", + "Ġs ing", + "ĠA m", + "id ence", + "Ġnew s", + "st er", + "Ġe conom", + "Ġfollow ing", + "ĠB r", + "is ing", + "Ġh our", + "m ost", + "um ent", + "Ġse x", + "Ġdes c", + "Ġbec ome", + "ĠE d", + "Ġto ok", + "Ġha ving", + "Ġprodu ct", + "a ult", + "A s", + "ar ing", + "Ġme ans", + "Ġh op", + "un e", + "Ġch o", + "Ġc ertain", + "Ġn on", + "Ġde al", + "2 4", + "le ment", + "oc i", + "en e", + "Ġs ide", + "ĠP r", + "ĠM ay", + "Ġre ason", + "u ed", + "c hed", + "ul ation", + "Ġe lect", + "Ġoffic ial", + "Ġposs ible", + "Ġh old", + "and s", + "ot s", + "Ġc ity", + "or ies", + "Ġse ver", + "Ġchild ren", + "Ġon ce", + "Ġact iv", + "l er", + "Ġn ight", + "it ions", + "ĠJ ohn", + "a pe", + "pl ay", + "Ġd one", + "Ġl im", + "Ġwork ing", + "ĠP res", + "or ld", + "e b", + "ĠC o", + "Ġb ody", + "ail s", + "ut es", + "ĠM r", + "Ġwhe ther", + "Ġa uthor", + "ro p", + "Ġpro per", + "Ġse en", + ") ;", + "Ġf ac", + "ĠS u", + "Ġcon d", + "it ing", + "Ġcour se", + "Ġ }", + "-------- --------", + "a ign", + "Ġev ent", + "Ġen g", + "Ġp ot", + "Ġin tern", + "i am", + "Ġsh ort", + "em pt", + "ã Ĥ", + "ĠG od", + "il ar", + "8 0", + "Ġor ig", + "I S", + "our n", + "ab ility", + "it ive", + "Ġd am", + "Ġ1 00", + "Ġp ress", + "Ġdo ing", + "Ġprot ect", + "r ing", + "Ġthough t", + "Ġquest ion", + "re w", + "ĠW ar", + "Ġsever al", + "ĠSt ate", + "Ġg iven", + "Ġf und", + "ĠT w", + "Ġw ent", + "an ces", + "w ork", + "p or", + "m y", + "4 0", + "Ġar g", + "art ment", + "ust om", + "Ġpol ic", + "Ġme et", + "Ġc reat", + "2 2", + "ĠSt ates", + "Ġg ames", + "ra w", + "ut ure", + "Ġunder stand", + "ur s", + "ĠO b", + "l ish", + "s y", + "Ġm akes", + "Ġw on", + "ag on", + "Ġh tt", + "Ġl ove", + "ent ial", + "Ġcomple te", + "p ar", + "ĠI m", + "A L", + "Ġacc ount", + " ł", + "ore d", + "ver t", + "Ġ ident", + "Ġ201 5", + "Ġother s", + "ĠM in", + "i ber", + "ver age", + "The re", + "ition al", + "d d", + "Ġpro b", + "Ġyou ng", + "Ġal ong", + "Ġacc ording", + "Ġy et", + "Ġmem bers", + "ĠWh at", + "o id", + "ĠM an", + "A nd", + "Ġam ong", + "a i", + "Ġem ploy", + "ĠR es", + "Ġ >", + "Ġinv ol", + "Ġl ow", + "a f", + "ĠC ar", + "Ġh ig", + "ĠO ne", + "ĠS ec", + "in ation", + "Ġlike ly", + "Ġan t", + "ag ed", + "ĠR uss", + "Ġb en", + "Ġre le", + "F or", + "b ack", + "ĠN ot", + "Ġpres ident", + "b all", + "Ġacc ess", + "ivid ual", + "ĠD em", + "ĠE uro", + "6 0", + "Ġkn own", + "ir l", + "ĠG r", + "Ġear ly", + "u se", + "iet y", + "âĢ ĵ", + "Ġf ight", + "Ġs ent", + "Ġto day", + "Ġmark et", + "\" .", + "Ġb ased", + "Ġstr ong", + "ur ther", + "Ġde b", + "m ber", + "Ġproble m", + "Ġde ath", + "Ġsoc ial", + "im ate", + "A S", + "ort un", + "Ġcamp aign", + "er y", + "C h", + "Ġe y", + "i ally", + "Ġm us", + "w h", + "p os", + "Ġ er", + "Ġsa f", + "Ġmonth s", + "ir on", + "Ġv iol", + "Ġf ive", + "Ġst re", + "Ġplay ers", + "in c", + "al d", + "y ear", + "a un", + "Ġsu ccess", + "Ġpres ent", + "ere nce", + "Ġ201 4", + "Ġsu gg", + "Ġpartic ular", + "Ġtr y", + "Ġsugg est", + "ĠCh rist", + "on es", + "Ġpri v", + "2 3", + "Ġc rit", + "Ġl and", + "Ġloc al", + "if y", + "2 9", + "Ġa ut", + "E D", + "ĠG u", + "Ġm ult", + "Ġpolit ical", + "Ġask ed", + "Ġfor mer", + "it ter", + "ri pt", + "Ġcl ose", + "Ġp ract", + "ĠY ork", + "Ġget ting", + "Ġac ross", + "Ġcom b", + "Ġbelie ve", + "Ġ z", + "Ġto get", + "Ġtoget her", + "ĠC ent", + "ir c", + "Ġind ividual", + "ĠM c", + "2 7", + "is k", + "ĠE ng", + "Ġf ace", + "Ġ2 4", + "Ġval ue", + "Ġare a", + "e v", + "Ġw rit", + "ĠPres ident", + "Ġv ot", + "Ġke y", + "Ġm om", + "p ut", + "Ġany thing", + "Ġexper ience", + "att le", + "Ġm ind", + "a ff", + "om m", + "Ġf uture", + "g ed", + "Ġc ut", + "Ġto t", + "it ch", + "Ġv ideo", + "Ġinvest ig", + "Ġn et", + "ĠM y", + "r ict", + "i en", + ". )", + "Ġimp ro", + "th ough", + "ward s", + "Ġcon nect", + "ĠM ed", + "sel ves", + "ens ive", + "m b", + "o ber", + "at ors", + "A n", + "Ġ5 0", + "Ġre du", + "res ent", + "Ġab ove", + "Ġf re", + "ĠEuro pe", + "s w", + "Ġam ount", + "ĠA pp", + "Ġe ither", + "Ġmil it", + "Ġan al", + "Ġf ail", + "ĠE n", + "al es", + "Ġspec ial", + "Ġbl ack", + "I T", + "c her", + "Ġlook ing", + "Ġf ire", + "y n", + "Ġal most", + "o on", + "Ġstud y", + "Ġm iss", + "c hes", + "ro wn", + "Ġt re", + "Ġcommun ity", + "Ġmed ia", + "Ġf ood", + "Ġcom es", + "ĠUn iversity", + "Ġsing le", + "Wh at", + "u ly", + "Ġh alf", + "ag ue", + "h od", + "ĠRep ublic", + "Ġstart ed", + "Ġqu ick", + "ot o", + "b ook", + "Ġiss ue", + "it or", + "Ġel se", + "Ġcons ider", + "2 6", + "ro du", + "Ġt aken", + "2 8", + "9 9", + "ĠW ith", + "Ġtr ue", + "Ġw a", + "Ġtr ad", + "Ġag o", + "Ġm ess", + "ie f", + "Ġadd ed", + "o ke", + "Ġb ad", + "Ġf av", + "3 3", + "Ġsim ilar", + "as k", + "ĠD on", + "Ġcharact er", + "ort s", + "ĠH ouse", + "Ġreport ed", + "Ġty pe", + "v al", + "i od", + "ĠHow ever", + "Ġt arg", + "Ġent ire", + "pp ing", + "Ġhist ory", + "Ġl ive", + "ff ic", + ".... ....", + "ed eral", + "Ġtr ying", + "Ġdisc uss", + "ĠH ar", + "ac es", + "l ished", + "Ġse lf", + "os p", + "re st", + "Ġro om", + "el t", + "Ġf all", + "ol ution", + "Ġe t", + "Ġ x", + "Ġis n", + "Ġide a", + "b o", + "Ġs ound", + "ĠD ep", + "Ġsome one", + "ci ally", + "ull y", + "Ġf oc", + "Ġob ject", + "if t", + "ap er", + "Ġplay er", + "Ġr ather", + "Ġserv ice", + "as hing", + "ĠD o", + "ĠP art", + "ru g", + "m on", + "p ly", + "Ġm or", + "Ġnot hing", + "Ġprov ide", + "I C", + "un g", + "Ġpart y", + "Ġex ist", + "Ġm ag", + "7 0", + "Ġr ul", + "Ġh ouse", + "Ġbeh ind", + "Ġhow ever", + "ĠW orld", + "Ġs um", + "Ġapp lic", + "Ġ ;", + "Ġfun ction", + "g r", + "ĠP ol", + "Ġfr ont", + "2 00", + "Ġser ies", + "Ġt em", + "Ġty p", + "ill s", + "Ġo pt", + "Ġpoint s", + "Ġbel ow", + "itt ed", + "Ġspec ific", + "Ġ201 7", + "um b", + "Ġr a", + "Ġpre vious", + "Ġpre t", + "re me", + "Ġc ustom", + "Ġcour t", + "ĠM e", + "Ġre pl", + "Ġwho le", + "g o", + "c er", + "Ġt reat", + "ĠA ct", + "Ġprob ably", + "Ġle arn", + "end er", + "ĠA ss", + "Ġvers ion", + "n ow", + "Ġche ck", + "ĠC al", + "R E", + "min ist", + "O n", + "our ces", + "Ġben ef", + "Ġd oc", + "Ġdet er", + "Ġen c", + "Ġsu per", + "Ġadd ress", + "Ġv ict", + "Ġ201 3", + "Ġme as", + "t r", + "Ġf ield", + "W hen", + "Ġsign ific", + "u ge", + "Ġfe at", + "Ġcomm on", + "l oad", + "Ġbe gin", + "Ġbr ing", + "Ġa ction", + "er man", + "Ġdesc rib", + "Ġind ust", + "Ġwant ed", + "ri ed", + "m ing", + "Ġatt empt", + "4 5", + "f er", + "Ġd ue", + "ress ion", + "# #", + "Ġsh all", + "Ġs ix", + "o o", + "Ġst ep", + "Ġp ub", + "Ġhim self", + "Ġ2 3", + "Ġc op", + "Ġd est", + "Ġst op", + "A C", + "ib ility", + "Ġl ab", + "ic ult", + "Ġhour s", + "Ġcre ate", + "Ġf urther", + "ĠAmeric a", + "ĠC ity", + "Ġd ou", + "he ad", + "S T", + "ĠN orth", + "c ing", + "Ġn ational", + "u le", + "ĠIn st", + "Ġt aking", + "ĠQ u", + "ir t", + "Ġre d", + "Ġrese arch", + "v iron", + "ĠG e", + "Ġbre ak", + "an a", + "Ġsp ace", + "ater ial", + "Ġrec ent", + "ĠA b", + "Ġgener al", + "Ġh it", + "Ġper iod", + "Ġevery thing", + "ive ly", + "Ġph ys", + "Ġsay ing", + "an ks", + "Ġc ou", + "Ġc ult", + "ac ed", + "e al", + "u ation", + "Ġc oun", + "l u", + "Ġinclud e", + "Ġpos ition", + "ĠA fter", + "ĠCan ad", + "ĠE m", + "Ġim m", + "ĠR ed", + "Ġp ick", + "Ġcom pl", + "Ġm atter", + "re g", + "e xt", + "ang u", + "is c", + "o le", + "a ut", + "Ġcomp et", + "e ed", + "f ect", + "Ġ2 1", + "ĠS en", + "ĠThe se", + "as ing", + "Ġcan not", + "Ġin it", + "Ġrel ations", + "ac hed", + "Ġb ar", + "Ġ4 0", + "ĠT H", + "Ġ201 2", + "Ġv ol", + "Ġg round", + "Ġsec urity", + "Ġup d", + "il t", + "3 5", + "Ġconc ern", + "ĠJ ust", + "Ġwh ite", + "Ġseem s", + "ĠH er", + "pe cially", + "i ents", + "Ġann oun", + "Ġf ig", + "ight s", + "Ġst ri", + "l ike", + "id s", + "Ġs us", + "Ġw atch", + "Ġ â", + "Ġw ind", + "ĠC ont", + "Ġit self", + "Ġm ass", + "A l", + "y le", + "iqu e", + "ĠN ational", + "Ġab s", + "Ġp ack", + "Ġout side", + "Ġan im", + "Ġp ain", + "et er", + "Ġman ag", + "du ct", + "og n", + "Ġ ]", + "ĠSe pt", + "se c", + "o ff", + "ĠJ an", + "Ġf oot", + "ad es", + "Ġth ird", + "Ġm ot", + "Ġev idence", + "int on", + "Ġth reat", + "a pt", + "pl es", + "c le", + "Ġl o", + "Ġde cl", + "Ġit em", + "med i", + "Ġrep resent", + "om b", + "am er", + "Ġsignific ant", + "og raph", + "s u", + "Ġc al", + "i res", + "00 00", + "I D", + "A M", + "Ġsim ply", + "Ġlong er", + "Ġf ile", + "O T", + "c he", + "S o", + "ate g", + "or g", + "ĠH is", + "Ġen er", + "Ġd om", + "Ġup on", + "il i", + "\": \"", + "Ġthem selves", + "Ġcom ing", + "Ġqu ite", + "Ġdiff icult", + "ĠB ar", + "il ities", + "re l", + "end s", + "c ial", + "6 4", + "Ġwom an", + "ra p", + "y r", + "Ġne cess", + "ip s", + "Ġte xt", + "Ġrequ ire", + "Ġmilit ary", + "Ġre view", + "Ġresp ons", + "7 5", + "Ġsub ject", + "Ġinst ead", + "Ġiss ues", + "Ġg en", + "\" ,\"", + "Ġmin utes", + "Ġwe ap", + "r ay", + "am ed", + "t ime", + "b l", + "H ow", + "Ġc ode", + "ĠS m", + "Ġhig her", + "ĠSt e", + "r is", + "Ġp age", + "Ġstud ents", + "ĠIn tern", + "Ġmet hod", + "ĠA ug", + "ĠP er", + "ĠA g", + "Ġpolic y", + "ĠS w", + "Ġex ec", + "Ġac cept", + "um e", + "rib ut", + "Ġword s", + "Ġfin al", + "Ġchang es", + "ĠDem ocr", + "Ġfriend s", + "Ġres pect", + "Ġe p", + "Ġcomp an", + "iv il", + "Ġdam age", + "** **", + "og le", + "viron ment", + "Ġne g", + "ent al", + "Ġa p", + "Ġtot al", + "iv al", + "! \"", + "l im", + "Ġneed s", + "Ġag re", + "Ġdevelop ment", + "Ġa ge", + "ip le", + "2 1", + "Ġresult s", + "ĠA f", + "S h", + "Ġg un", + "ĠOb ama", + "ro ll", + "Ġ @", + "Ġright s", + "ĠB rit", + "Ġrun ning", + "Ġwas n", + "Ġp ort", + "Ġr ate", + "Ġpret ty", + "Ġtarg et", + "Ġsa w", + "Ġc irc", + "Ġwor ks", + "ic ro", + "al t", + "o ver", + "ww w", + "Th at", + "l ier", + "Ġevery one", + "ud e", + "Ġp ie", + "idd le", + "ra el", + "Ġr ad", + "Ġbl ock", + "Ġw alk", + "T o", + "ã ģ", + "n es", + "ĠA ust", + "a ul", + "ro te", + "ĠS outh", + "ess ion", + "op h", + "Ġshow s", + "Ġs ite", + "Ġj o", + "Ġr isk", + "cl us", + "l t", + "Ġin j", + "id ing", + "ĠS pe", + "Ġch all", + "ir m", + "Ġ2 2", + "itt ing", + "st r", + "Ġh y", + "L E", + "ke y", + "Ġbe gan", + "at ur", + "ashing ton", + "l am", + "ĠD av", + "b it", + "Ġs ize", + "ĠP ar", + "3 8", + "ourn al", + "f ace", + "Ġdec ision", + "Ġl arg", + "Ġj ud", + "re ct", + "Ġcontin ue", + "ĠO ct", + "ove red", + "ĠI nt", + "==== ====", + "Ġp arent", + "ĠW ill", + "Ġeas y", + "Ġd rug", + "ang er", + "Ġs ense", + "Ġd i", + "id ay", + "Ġener gy", + "ist ic", + "Ġass oci", + "ar ter", + "ob al", + "e ks", + "ĠE l", + "ur ch", + "Ġg irl", + "o e", + "it le", + "Ġ2 8", + "ĠC he", + "Ġrequ est", + "Ġso on", + "Ġh ost", + "k y", + "Ġst ates", + "om es", + "Ġm aterial", + "le x", + "Ġmom ent", + "Ġan sw", + "on se", + "Ġes pecially", + "Ġn orm", + "Ġserv ices", + "p ite", + "r an", + "Ġro le", + "4 4", + ") :", + "Ġc red", + "C l", + "____ ____", + "Ġm at", + "Ġl og", + "ĠCl inton", + "O U", + "Ġoff ice", + "Ġ2 6", + "Ġch arg", + "Ġtr ack", + "m a", + "Ġhe art", + "Ġb all", + "Ġperson al", + "Ġbuild ing", + "n a", + "s et", + "b ody", + "ĠBl ack", + "Ġincre ase", + "itt en", + "Ġneed ed", + "3 6", + "3 2", + "= \"", + "Ġl ost", + "Ġbec ame", + "Ġgrou ps", + "ĠM us", + "Ġw rote", + "ĠP e", + "Ġpro p", + "j oy", + "à ©", + "ĠWh ite", + "Ġde ad", + ". '", + "Ġhtt p", + "Ġwe bs", + "O S", + "Ġins ide", + "Ġwr ong", + "Ġstat ement", + "Ġ ...", + "y l", + "Ġfil m", + "Ġmus ic", + "Ġsh are", + "ific ation", + "Ġre lease", + "Ġfor ward", + "Ġst ay", + "Ġcomp ut", + "it te", + "s er", + "Ġorig inal", + "Ġc ard", + "Ġc and", + "Ġd iv", + "at ural", + "Ġfav or", + "O M", + "Ġc ases", + "us es", + "Ġse ction", + "Ġle ave", + "g ing", + "ov ed", + "ĠW ashington", + "3 9", + "ĠG l", + "Ġrequ ired", + "act ion", + "ap an", + "o or", + "it er", + "ĠK ing", + "Ġcount ries", + "ĠG erman", + "ll ing", + "Ġ2 7", + "3 4", + "Ġquest ions", + "Ġpr im", + "Ġc ell", + "Ġsh oot", + "Ġany one", + "ĠW est", + "Ġaff ect", + "ep end", + "Ġon line", + "ĠIs rael", + "ĠSept ember", + "Ġab ility", + "Ġcont ent", + "is es", + "Ġre ve", + "Ġl aun", + "Ġind ic", + "Ġfor ce", + "c ast", + "Ġso ld", + "av ing", + "f l", + "Ġso ft", + "Ġcompan ies", + "ce ed", + "Ġart icle", + "Ġa ud", + "Ġre v", + "Ġed uc", + "Ġplay ing", + "0 5", + "Ġhe ld", + "ct or", + "Ġrele ased", + "Ġf ederal", + "3 7", + "Ġad minist", + "Ġinter view", + "Ġinst all", + "Ġrece ived", + "Ġs ource", + "u k", + "P h", + "Ġser ious", + "Ġcre ated", + "Ġc ause", + "Ġim medi", + "Ġdef in", + "u el", + "ĠDep artment", + "ct ions", + "ĠC our", + "ĠN ow", + "z e", + "it es", + "it ution", + "Ġl ate", + "Ġspe ak", + "n ers", + "Ġleg al", + "ar i", + "ĠC or", + "Ġwe eks", + "Ġmod el", + "Ġp red", + "Ġex act", + "B C", + "ĠB y", + "IN G", + "os ing", + "Ġt akes", + "Ġreg ard", + "Ġopp ortun", + "Ġpr ice", + "Ġ19 8", + "ĠA pr", + "f ully", + "Ġor d", + "Ġproble ms", + "ru ction", + "h am", + "ĠC ount", + "le ge", + "Ġlead ers", + "E T", + "le v", + "Ġde ep", + "olog ical", + "es e", + "h aps", + "ĠS ome", + "Ġp ers", + "Ġcont ract", + "Ġrelations hip", + "s p", + "ou d", + "Ġb ase", + "4 8", + "m it", + "A d", + "anc ial", + "Ġcons um", + "Ġpot ential", + "Ġl angu", + "re m", + "et h", + "Ġrel ig", + "ress ed", + "6 6", + "Ġl ink", + "Ġl ower", + "ay er", + "ĠJ une", + "Ġf em", + "un t", + "er c", + "ur d", + "Ġcont act", + "Ġ ill", + "Ġm other", + "Ġest ab", + "h tt", + "ĠM arch", + "ĠB ro", + "ĠCh ina", + "Ġ2 9", + "Ġs qu", + "Ġprov ided", + "Ġa verage", + "as ons", + "Ġ201 1", + "Ġex am", + "l in", + "5 5", + "n ed", + "Ġper fect", + "Ġt ou", + "al se", + "u x", + "Ġbu y", + "Ġsh ot", + "Ġcol lect", + "Ġph ot", + "Ġplay ed", + "Ġsur pr", + "Ġofficial s", + "Ġsim ple", + "av y", + "Ġindust ry", + "Ġhand s", + "g round", + "Ġp ull", + "Ġr ound", + "Ġus er", + "Ġr ange", + "u ary", + "Ġpriv ate", + "op s", + "e es", + "Ġw ays", + "ĠM ich", + "Ġve h", + "Ġex cept", + "Ġter ms", + "im um", + "pp er", + "I ON", + "ore s", + "ĠDr agon", + "ou l", + "Ġd en", + "Ġperform ance", + "Ġb ill", + "c il", + "4 7", + "Ġen vironment", + "Ġex c", + "ad d", + "Ġwor th", + "Ġp ict", + "Ġch ance", + "Ġ201 8", + "b or", + "Ġspe ed", + "ict ion", + "Ġal leg", + "ĠJ apan", + "at ory", + "re et", + "Ġm atch", + "ĠI I", + "Ġst ru", + "ord er", + "Ġst e", + "Ġl iving", + "Ġst ruct", + "in o", + "Ġse par", + "her n", + "Ġresp onse", + "Ġen joy", + "Ġv ia", + "A D", + "um ents", + "ace book", + "Ġmem ber", + "ib r", + "iz ing", + "Ġto ol", + "ĠM on", + "ĠWh ile", + "h ood", + "ĠA ng", + "ĠD ef", + "Ġoff er", + "T r", + "a ur", + "Ġturn ed", + "ĠJ uly", + "d own", + "an ced", + "Ġrec ently", + "ĠE ar", + "Ġc e", + "ĠSt ar", + "ĠC ong", + "rough t", + "Ġbl ood", + "Ġhop e", + "Ġcom ment", + "ain t", + "Ġar ri", + "il es", + "Ġpartic ip", + "ough t", + "ri ption", + "0 8", + "4 9", + "Ġg ave", + "Ġse lect", + "Ġkill ed", + "sy ch", + "Ġgo es", + "i j", + "Ġc oll", + "Ġimp act", + "at ives", + "ĠS er", + "0 9", + "ĠAug ust", + "Ġb oy", + "d e", + "ĠD es", + "Ġf elt", + "U S", + "Ġexpect ed", + "Ġim age", + "ĠM ark", + "cc ording", + "o ice", + "E C", + "ĠM ag", + "en ed", + "h old", + "ĠP ost", + "Ġpre vent", + "N o", + "Ġinvol ved", + "Ġey es", + "Ġquick ly", + "A t", + "un k", + "Ġbeh av", + "Ġ ur", + "Ġl ed", + "c ome", + "e y", + "Ġcand id", + "Ġear lier", + "Ġfoc us", + "et y", + "P ro", + "led ge", + "ix ed", + "ill ed", + "Ġpop ular", + "A P", + "Ġset t", + "l ight", + "Ġvar ious", + "in ks", + "Ġlevel s", + "Ġro ad", + "ell ig", + "ab les", + "he l", + "itte e", + "ĠG ener", + "y pe", + "Ġhe ard", + "ic les", + "Ġm is", + "Ġus ers", + "ĠS an", + "Ġimpro ve", + "Ġf ather", + "Ġse arch", + "The y", + "v il", + "Ġprof ess", + "Ġkn ew", + "Ġl oss", + "Ġev ents", + "6 5", + "Ġb illion", + "0 7", + "0 2", + "ĠNew s", + "ĠA M", + "Ġco ver", + "w here", + "ens ion", + "Ġb ott", + "Ġare as", + "en ces", + "op e", + "ĠTw itter", + "a el", + "Ġget s", + "ĠGo ogle", + "Ġs n", + "i ant", + "Ġv ote", + "Ġnear ly", + "Ġinclud ed", + "Ġrec ogn", + "z z", + "m m", + "al ed", + "Ġhappen ed", + "0 4", + "Ġh ot", + "Ġwho se", + "Ġc ivil", + "Ġsu ff", + "o es", + "it iz", + "ĠSy ri", + "Ġresp ond", + "Ġh on", + "Ġfeat ures", + "Ġeconom ic", + "ĠApr il", + "r im", + "Ġtechn ology", + "Ġo ption", + "ag ing", + "Ġpur ch", + "R e", + "Ġl at", + "ch ie", + "is l", + "Ġrec omm", + "u f", + "Ġtr aining", + "Ġeffect s", + "Ġf ast", + "Ġ201 0", + "Ġocc ur", + "Ġwebs ite", + "Ġem ail", + "Ġs ens", + "e ch", + "Ġo il", + "Ġinf lu", + "Ġcurrent ly", + "ĠS ch", + "ĠAd d", + "Ġgo al", + "Ġsc ient", + "Ġcon v", + "1 00", + "em y", + "Ġdec ided", + "Ġtra vel", + "Ġm ention", + "L L", + "0 3", + "Ġe lection", + "Ġph one", + "Ġlook s", + "Ġsit uation", + "Ġc y", + "Ġh or", + "b ed", + "ĠCour t", + "a ily", + "av es", + "Ġqu ality", + "ĠCom p", + "w ise", + "Ġt able", + "Ġst aff", + "ĠW ind", + "et t", + "Ġtri ed", + "ide red", + "Ġadd ition", + "Ġb ox", + "Ġl ack", + "ar ily", + "Ġw ide", + "Ġm id", + "Ġbo ard", + "ys is", + "Ġant i", + "h a", + "Ġd ig", + "en ing", + "Ġd ro", + "C on", + "6 8", + "Ġsl ow", + "b ased", + "se qu", + "Ġp ath", + "E x", + "ak er", + "Ġwork ed", + "Ġp en", + "Ġeng ine", + "Ġlook ed", + "ĠSu per", + "ĠS erv", + "Ġvict im", + "U n", + "Ġproper ty", + "Ġint rodu", + "Ġexec ut", + "ĠP M", + "L e", + "Ġcol or", + "ĠM ore", + "Ġ6 0", + "Ġnet work", + "Ġd ate", + "c ul", + "id ge", + "Ġext ra", + "3 1", + "Ġs le", + "6 7", + "Ġw ond", + "Ġreport s", + "j ust", + "ĠAust ral", + "Ġcap ital", + "Ġen s", + "Ġcomm and", + "Ġallow ed", + "Ġpre p", + "Ġca pt", + "h ib", + "Ġnum bers", + "ch an", + "Ġf air", + "m p", + "om s", + "Ġre ach", + "W ith", + "t ain", + "Ġbro ad", + "Ġcou ple", + "ec ause", + "ly ing", + "ĠF eb", + "Ġsc reen", + "Ġl ives", + "Ġpri or", + "ĠCong ress", + "A r", + "Ġappro ach", + "Ġe mer", + "ar ies", + "ĠD is", + "s erv", + "ĠN e", + "Ġbu ilt", + "c ies", + "Ġre pe", + "Ġrul es", + "for ce", + "ĠP al", + "Ġfin ancial", + "Ġcons idered", + "ĠCh ar", + "n ces", + "ĠI S", + "Ġb rought", + "Ġb i", + "i ers", + "ĠS im", + "O P", + "Ġproduct s", + "Ġvis it", + "Ġdoc ument", + "Ġcon duct", + "Ġcomplete ly", + "in ing", + "ĠCal if", + "ib ly", + "Ġwr itten", + "ĠT V", + "em ents", + "Ġd raw", + "O ne", + "Ġpub lished", + "Ġsec ret", + "r ain", + "he t", + "ĠF acebook", + "ond ay", + "ĠU p", + "Ġsex ual", + "Ġth ous", + "ĠP at", + "Ġ ess", + "Ġstand ard", + "Ġar m", + "g es", + "ect ion", + "Ġf ell", + "Ġfore ign", + "an i", + "ĠFr iday", + "Ġreg ular", + "in ary", + "Ġincre ased", + "Ġus ually", + "Ġdem on", + "Ġd ark", + "Ġadd itional", + "ro l", + "ĠO f", + "Ġprodu ction", + "! !", + "und red", + "Ġintern ational", + "id ents", + "ĠF ree", + "rou p", + "Ġr ace", + "Ġm ach", + "Ġh uge", + "A ll", + "le ar", + "ove mber", + "Ġto wn", + "Ġatt ention", + "ĠO ff", + "y ond", + "ĠThe n", + "f ield", + "Ġter ror", + "ra z", + "ĠB o", + "Ġmeet ing", + "ĠP ark", + "Ġar rest", + "Ġf ear", + "Ġa w", + "ĠV al", + "or ing", + "' ,", + "Ġext reme", + "ar r", + "Ġwork ers", + "A fter", + "Ġ3 1", + "n et", + "am ent", + "Ġdirect ly", + "Ġpop ulation", + "ub e", + "ĠOct ober", + "ĠI N", + "ĠJan uary", + "5 9", + "ĠDav id", + "Ġc ross", + "ce mber", + "ĠF irst", + "Ġmess age", + "ir it", + "Ġn ation", + "Ġp oll", + "is ions", + "Ġansw er", + "n y", + "is ode", + "Ġcar ry", + "ĠRuss ia", + "Ġhe ar", + "eng th", + "ro y", + "Ġn atural", + "in ally", + "Ġdo g", + "m itted", + "Ġtr ade", + "Ġsub st", + "Ġmult iple", + "ĠAf ric", + "Ġf ans", + "Ġs ort", + "Ġgl obal", + "ic ation", + "ĠW ed", + "ar a", + "Ġa chie", + "Ġlangu age", + "ve y", + "Ġt al", + "Ġnecess ary", + "Ġdet ails", + "Ġs en", + "ĠS und", + "ĠRe g", + "ĠR ec", + "0 6", + "Ġs il", + "ress ive", + "Ġmed ical", + "un ch", + "orn ia", + "Ġu nd", + "f ort", + "oc ks", + "ĠM onday", + "ues day", + "c raft", + "7 7", + "ur t", + "Ġ ver", + "ĠH ill", + "Ġrece ive", + "Ġmor ning", + "es tern", + "Ġb ank", + "Ġs at", + "ir th", + "ĠH igh", + "Ġdev ice", + "ĠTH E", + "ĠCent er", + "Ġsaf e", + "Ġp le", + "ĠCanad a", + "Ġsystem s", + "Ġass ist", + "Ġsur v", + "Ġb attle", + "ĠS oc", + "vert is", + "S he", + "Ġp aper", + "Ġgrow th", + "Ġc ast", + "S c", + "Ġpl ans", + "ll ed", + "Ġpart s", + "Ġw all", + "Ġmove ment", + "Ġpract ice", + "im ately", + "Ġdis play", + "Ġsomet imes", + "om p", + "ĠP aul", + "ĠY es", + "k ing", + "5 8", + "o ly", + "Ġs on", + "Ġav oid", + "ok es", + "ĠJ ew", + "Ġto wards", + "as c", + "Ġ //", + "ĠK ore", + "Ġtalk ing", + "Ġcor rect", + "Ġsp ent", + "ic ks", + "i able", + "e ared", + "Ġter m", + "Ġwant s", + "om ing", + "Ġ ut", + "Ġdou b", + "Ġfor ces", + "Ġp lease", + "6 9", + "ĠN ovember", + "at form", + "ond on", + "Ġon es", + "Ġimmedi ately", + "ĠRuss ian", + "ĠM et", + "Ġde g", + "Ġparent s", + "C H", + "ĠAmeric ans", + "al y", + "ĠM od", + "Ġsh own", + "Ġcond itions", + "Ġst uff", + "Ġre b", + "ĠY our", + "Ġinclud es", + "n own", + "ĠS am", + "Ġexper ien", + "m ission", + "ĠE ven", + "augh t", + "Ġannoun ced", + "ĠRepublic an", + "Ġdeter min", + "Ġdescrib ed", + "ĠCount y", + "( )", + "Ġdo or", + "Ġchang ed", + "Ġne igh", + "ĠH ere", + "Ġcle an", + "Ġp an", + "ĠDe cember", + "ĠEurope an", + "ir ing", + "ap ter", + "Ġcl ub", + "ĠT uesday", + "Ġp aid", + "ĠN et", + "Ġattack s", + "Ġcharact ers", + "Ġal one", + "Ġdirect or", + "d om", + "Ġ3 5", + "Ġl oad", + "Ġr out", + "ĠCalif ornia", + "Ġfin ally", + "Ġr ac", + "Ġcont r", + "Ġexact ly", + "res h", + "p ri", + "ĠIs lam", + "Ġn ature", + "Ġcare er", + "Ġlat est", + "Ġcon vers", + "ĠS l", + "p ose", + "ci ent", + "ĠIn c", + "iv ity", + "8 8", + "ĠA tt", + "ĠM or", + "nes day", + "Ġwe ight", + "k en", + "Ġnot e", + "Ġteam s", + "Ġ \\", + "air s", + "ĠG reen", + "Ġh undred", + "on ent", + "Ġstre ng", + "Ġcons ist", + "ic ated", + "Ġreg ul", + "Ġl ic", + "ast ic", + "Ġt en", + "urs day", + "ellig ence", + "ous ly", + "ĠU K", + "B I", + "Ġcost s", + "Ġind epend", + "ĠA P", + "Ġnorm al", + "Ġh om", + "Ġob vious", + "Ġs we", + "Ġst ar", + "Ġread y", + "ac her", + "Ġimp lement", + "g est", + "Ġs ong", + "ĠG et", + "ĠL ab", + "Ġinterest ing", + "us ing", + "Ġg iving", + "ĠSund ay", + "Ġet c", + "Ġm iddle", + "Ġrem ember", + "r ight", + "os ition", + "ut ions", + "Ġm ax", + "4 6", + "Ġyour self", + "Ġdem and", + "Ġtreat ment", + "Ġd anger", + "ĠC ons", + "Ġgu y", + "ĠBrit ish", + "Ġphys ical", + "Ġrel ated", + "Ġrem ain", + "Ġcould n", + "Ġref er", + "Ġc itiz", + "b ox", + "EN T", + "bo ard", + "Ġin n", + "I G", + "er o", + "ĠSt reet", + "osp ital", + "ren ch", + "cher s", + "Ġst ra", + "O L", + "ag er", + "ĠA N", + "Ġeas ily", + "I A", + "en ge", + "in y", + "Ġcl os", + "ock ed", + "Ġus es", + "ĠC oun", + "I m", + "u ild", + "? ?", + "m ore", + "Ġan g", + "Ġwr ite", + "ol ute", + "5 7", + "Ġlead er", + "Ġread ing", + "< /", + "Ġaut om", + "est s", + "4 3", + "Ġleg isl", + "ĠG old", + "Ġdesign ed", + "ĠS T", + "ĠLe g", + "a res", + "Ġbe aut", + "ĠT ex", + "Ġappear s", + "Ġstru gg", + "ĠR om", + "Ġ 00", + "Ġcho ice", + "Ġparticular ly", + "ĠF rom", + "op er", + "ĠL ondon", + "ann ed", + "Ġallow s", + "ob ile", + "Ġdiffere nce", + "âĢ ¢", + "ĠV iew", + "ĠWed nesday", + "Ġal though", + "Ġrel ative", + "Ġapplic ation", + "ate ver", + "Ġare n", + "Ġmy self", + "Ġim ag", + "Ġdis e", + "Ġsoc iety", + "Ġfre qu", + "ĠEng lish", + "Ġpo or", + "ĠD ay", + "Ġwrit ing", + "Ġse ven", + "Ġstart ing", + "Ġb ud", + "Ġpr int", + "ĠTr ans", + "uf act", + "ĠSt ud", + "n ew", + "Ġcr im", + "Ġg ives", + "Ġco ol", + "a e", + "i ance", + "ĠGener al", + "Ġthink ing", + "Ġsa ve", + "Ġlim ited", + "ĠPart y", + "Ġmean ing", + "p en", + "ow ers", + "ĠJ ack", + "E M", + "Ġn ice", + "ru pt", + "Ġg as", + "Ġe ight", + "Ġfe et", + "Ġeff ort", + "Ġ ign", + "ic it", + "B l", + "co in", + "Ġop in", + "Ġbr ain", + "Wh ile", + "he st", + "ĠTh ursday", + "Ġwould n", + "augh ter", + "Ġtou ch", + "le ments", + "Ġstud ies", + "Ġcent er", + "c ont", + "or ge", + "Ġcomput er", + "Ġinvestig ation", + "P l", + "or ks", + "Ġ200 8", + "Ġincre asing", + "Ġst ore", + "Ġcom ments", + "Ġb al", + "m en", + "Ġdo ll", + "Ġl iber", + "Ġw ife", + "Ġlaw s", + "atur day", + "it ness", + "Ġmod ern", + "ĠS k", + "Ġadminist ration", + "Ġopportun ity", + "Ġs al", + "Ġpower ful", + "M y", + "Ġclaim s", + "ĠEar th", + "ord s", + "Ġt itle", + "Ġes c", + "n ame", + "N ot", + "om en", + "Ġbe yond", + "Ġc amer", + "Ġse ll", + "it ute", + "ear ch", + "Ġapp l", + "im ent", + "4 2", + "ĠAr t", + "Ġun f", + "Ġviol ence", + "ur g", + "ĠE ast", + "Ġcomp ared", + "Ġopt ions", + "Ġthrough out", + "Ġv s", + "ig r", + ". [", + "ac hes", + "7 8", + "Ġfil es", + "F L", + "E L", + "ar ian", + "ĠJ ames", + "ĠA ir", + "an ch", + "Ġdet ail", + "Ġpie ce", + "P S", + "Ġn amed", + "Ġeduc ation", + "Ġdri ve", + "Ġitem s", + "Ġstud ent", + "ic ed", + ": :", + "ic o", + "Ġth row", + "Ġsc ene", + "Ġcomple x", + "Ġ200 9", + "Ġpre c", + "ĠB re", + "7 9", + "Ġcon cept", + "Ġstat us", + "am ing", + "Ġd ied", + "Ġknow ledge", + "Ġbegin ning", + "O D", + "ru ary", + "Ġcertain ly", + "Ġgu ys", + "Ġsl ight", + "in n", + "ound s", + "Ġf ine", + "Ġf at", + "ic ations", + "Ġper haps", + "ĠA nt", + "Ġinc ome", + "Ġhtt ps", + "Ġmajor ity", + "port s", + "st on", + "Ġgreat er", + "Ġfe ed", + "ent ially", + "Ġsaf ety", + "Ġun ique", + "and om", + "Ġg one", + "Ġshow ed", + "Ġhist or", + "Ġcoun ter", + "i us", + "id a", + "Ġlead ing", + "i pe", + "Ġs end", + "ĠDon ald", + "er ve", + "Ġdef ense", + "ines e", + "Ġy es", + "ĠF ire", + "ĠMus lim", + "ra q", + "Ġcontin ued", + "os h", + "Ġprov ides", + "Ġpr ison", + "ĠP re", + "Ġhapp y", + "Ġeconom y", + "Ġtr ust", + "ag s", + "ĠG ame", + "Ġweap ons", + "um an", + "ĠC le", + "it ation", + "Ġanal ysis", + "ĠT imes", + "Ġsc ience", + "- >", + "Ġfig ure", + "Ġdis app", + "ent y", + "Ġsoft ware", + "Ġu lt", + "Ġoffic ers", + "N ew", + "I s", + "Ġrem ains", + "ĠInd ia", + "Ġp sych", + "ri ef", + "Ġc at", + "es c", + "Ġob serv", + "Ġst age", + "ĠD ark", + "Ġent er", + "ch ange", + "Ġpass ed", + "Ġdes pite", + "ĠO ut", + "Ġmov ie", + "r s", + "Ġv oice", + "m ine", + "ĠPl ay", + "Ġto ward", + "ĠT er", + "Ġreg ion", + "Ġval ues", + "or ters", + "Ġm ount", + "Ġoffic er", + "ĠO ther", + "b an", + "Ġh ous", + "w ood", + "ro om", + "I V", + "ĠS un", + "se e", + "ĠO ver", + "ro g", + "9 0", + "Ġl ay", + "ĠT ur", + "a wn", + "Ġpress ure", + "ĠS ub", + "Ġbook s", + "ed om", + "ĠS and", + "A A", + "ag o", + "Ġre asons", + "f ord", + "Ġactiv ity", + "U T", + "N ow", + "ĠSen ate", + "ce ll", + "n ight", + "Ġcall s", + "in ter", + "Ġlet ter", + "ĠR ob", + "ĠJ e", + "Ġcho ose", + "ĠL aw", + "G et", + "B e", + "Ġro b", + "Ġtyp es", + "Ġpl atform", + "Ġqu arter", + "R A", + "ĠT ime", + "Ġmay be", + "ĠC r", + "9 5", + "p re", + "Ġmov ing", + "Ġl if", + "Ġgo ld", + "Ġs om", + "Ġpat ients", + "Ġtr uth", + "ĠK e", + "ur ance", + "ant ly", + "m ar", + "Ġchar ge", + "ĠG reat", + "Ġce le", + "---------------- ----------------", + "Ġro ck", + "ro id", + "an cy", + "Ġcred it", + "a ud", + "B y", + "ĠE very", + "Ġmov ed", + "ing er", + "rib ution", + "Ġn ames", + "Ġstra ight", + "ĠHe alth", + "ĠW ell", + "Ġfe ature", + "Ġr ule", + "Ġsc he", + "in ated", + "ĠMich ael", + "ber g", + "4 1", + "il ed", + "b and", + "Ġcl ick", + "ĠAng el", + "on ents", + " Ń", + "ĠI raq", + "ĠS aturday", + "Ġa ware", + "p art", + "Ġpat tern", + "O W", + "ĠL et", + "Ġgr ad", + "ign ed", + "Ġassoci ated", + "Ġst yle", + "n o", + "i ation", + "a ith", + "il ies", + "Ġst ories", + "ur ation", + "Ġindividual s", + "ĠâĢ ¦", + "m iss", + "ĠAss oci", + "ish ing", + "ab y", + "Ġsum mer", + "ĠB en", + "Ġ3 2", + "Ġar ch", + "ut y", + "ĠTex as", + "h ol", + "Ġfull y", + "Ġm ill", + "Ġfollow ed", + "ĠB ill", + "ĠInd ian", + "ĠSec ret", + "ĠB el", + "ĠFeb ruary", + "Ġjob s", + "Ġseem ed", + "ĠGo vern", + "i pped", + "Ġreal ity", + "Ġl ines", + "Ġp ark", + "Ġmeas ure", + "ĠO ur", + "I M", + "Ġbro ther", + "Ġgrow ing", + "Ġb an", + "Ġest im", + "Ġc ry", + "ĠS chool", + "Ġme chan", + "ĠO F", + "ĠWind ows", + "Ġr ates", + "ĠO h", + "Ġpos itive", + "Ġcult ure", + "ist ics", + "ic a", + "Ġh ar", + "y a", + "ite ly", + "i pp", + "Ġm ap", + "en cies", + "ĠWill iam", + "I I", + "ak ers", + "5 6", + "ĠM art", + "ĠR em", + "Ġal tern", + "it ude", + "Ġco ach", + "row d", + "D on", + "Ġk ids", + "Ġj ournal", + "Ġcor por", + "Ġf alse", + "Ġwe b", + "Ġsle ep", + "Ġcont ain", + "Ġst o", + "Ġb ed", + "iver se", + "ĠR ich", + "ĠCh inese", + "Ġp un", + "Ġme ant", + "k nown", + "Ġnot ice", + "Ġfavor ite", + "a ven", + "Ġcond ition", + "Ġpur pose", + ") )", + "Ġorgan ization", + "Ġchall eng", + "Ġman ufact", + "Ġsus p", + "ĠA c", + "Ġcrit ic", + "un es", + "uc lear", + "Ġm er", + "vent ion", + "Ġ8 0", + "Ġm ist", + "ĠU s", + "ĠT or", + "htt p", + "ol f", + "Ġlarg er", + "Ġadv ant", + "Ġrese ar", + "Ġact ions", + "m l", + "Ġke pt", + "Ġa im", + ", '", + "c ol", + "Ġbenef its", + "if ying", + "Ġact ual", + "ĠIntern ational", + "Ġveh icle", + "Ġch ief", + "Ġeff orts", + "ĠLe ague", + "ĠM ost", + "Ġwa it", + "Ġad ult", + "Ġover all", + "Ġspe ech", + "Ġhigh ly", + "Ġfem ale", + "Ġer ror", + "Ġeffect ive", + "5 4", + "Ġenc our", + "w ell", + "Ġfail ed", + "Ġcons erv", + "Ġprogram s", + "Ġt rou", + "Ġa head", + "5 00", + "vertis ement", + "I P", + "ĠF ound", + "p ir", + "Ġ %", + "Ġcr ime", + "and er", + "Ġloc ation", + "ĠI ran", + "Ġbehav ior", + "az ing", + "Ġr are", + "Ġem b", + "Ġca used", + "Ġsh ip", + "Ġact ive", + "Ġcont ribut", + "Ġg reen", + "Ġac qu", + "Ġref lect", + "ven ue", + "Ġf irm", + "Ġb irth", + "] .", + "Ġclear ly", + "Ġem ot", + "Ġag ency", + "ri age", + "Ġmem ory", + "9 8", + "S A", + "ĠSe e", + "ac ing", + "C C", + "Ġbig gest", + "Ġr ap", + "Ġbas ic", + "Ġb and", + "e at", + "Ġsus pect", + "ĠM ac", + "Ġ9 0", + "m ark", + "ist an", + "Ġsp read", + "am s", + "k i", + "as y", + "ra v", + "ĠR ober", + "Ġdemon str", + "r ated", + "Ġabs olute", + "Ġpl aces", + "Ġim pl", + "ibr ary", + "Ġc ards", + "Ġdest roy", + "Ġv irt", + "ve re", + "Ġapp eared", + "y an", + "p oint", + "Ġbe g", + "Ġtem per", + "s pe", + "ant ed", + "ear s", + "ĠD irect", + "Ġl ength", + "Ġbl og", + "am b", + "Ġint eg", + "Ġres ources", + "ac c", + "if ul", + "Ġsp ot", + "Ġfor ced", + "Ġthous ands", + "ĠMin ister", + "Ġqu al", + "ĠF rench", + "at ically", + "Ġgener ally", + "Ġdr ink", + "Ġth us", + "I L", + "od es", + "Ġappro pri", + "ĠRe ad", + "Ġwh om", + "Ġey e", + "Ġcol lege", + "Ġ4 5", + "ire ction", + "Ġens ure", + "Ġapp arent", + "id ers", + "Ġrelig ious", + "Ġmin or", + "ol ic", + "Ġt ro", + "ĠWh y", + "rib ute", + "m et", + "Ġprim ary", + "Ġdevelop ed", + "Ġpe ace", + "Ġsk in", + "st e", + "av a", + "Ġbl ue", + "Ġfam ilies", + "Ġ ir", + "Ġapp ly", + "Ġin form", + "ĠSm ith", + "C T", + "i i", + "Ġlim it", + "Ġres ist", + "........ ........", + "um n", + "Ġconf lic", + "Ġtw e", + "ud d", + "ĠT om", + "Ġl iter", + "qu e", + "b on", + "Ġha ir", + "Ġevent ually", + "Ġp us", + "Ġhelp ed", + "Ġag g", + "or ney", + "ĠApp le", + "Ġf it", + "ĠS ur", + "Ġpre m", + "Ġs ales", + "Ġsecond s", + "Ġstreng th", + "Ġfeel ing", + "¿ ½", + "Ġt our", + "Ġknow s", + "o om", + "Ġex erc", + "Ġsom ew", + "ï ¿½", + "> >", + "Ġsp okes", + "Ġide as", + "Ġreg ist", + "so ft", + "ĠD el", + "ĠP C", + "Ġpro pos", + "Ġlaun ch", + "Ġbott om", + "T H", + "ĠP lease", + "v est", + "it z", + "ĠIn ter", + "Ġsc ript", + "Ġr at", + "ar ning", + "Ġ il", + "ĠJ er", + "ĠA re", + "Ġwh atever", + "ok en", + "ci ence", + "Ġmod e", + "Ġag ree", + "Ġs ources", + "Ġinit ial", + "Ġrest rict", + "Ġwond er", + "us ion", + "## ##", + "ĠS il", + "vil le", + "Ġb urn", + "t w", + "as ion", + "Ġ £", + "Ġn or", + "u ing", + "Ġre ached", + "Ġs un", + "Ġc ateg", + "ig ration", + "Ġc ook", + "Ġprom ot", + "Ġm ale", + "Ġcl imate", + "Ġf ix", + "Ġalleg ed", + "U R", + "all ed", + "Ġim ages", + "C ont", + "ot a", + "Ġschool s", + "i os", + "Ġd rop", + "Ġst ream", + "ĠM o", + "Ġprevious ly", + "al ing", + "Ġp et", + "Ġdou ble", + "Ġ( @", + "ann el", + "Ġdef ault", + "t ies", + "Ġr ank", + "ĠD ec", + "ĠCoun cil", + "Ġweap on", + "Ġst ock", + "Ġanal y", + "ĠSt r", + "Ġpict ure", + "ĠPol ice", + "f erence", + "Ġcent ury", + "Ġcitiz ens", + "Ġon to", + "Ġexp and", + "Ġhe ro", + "ĠS ol", + "Ġw ild", + "Ġupd ate", + "Ġcustom ers", + "r ont", + "d ef", + "Ġl ik", + "Ġcrim inal", + "ĠChrist ian", + "S P", + "7 6", + "Ġle aving", + "Ġother wise", + "ĠD ist", + "Ġbas is", + "5 2", + "5 3", + "ic ip", + "ĠB er", + "Ġrecomm end", + "Ġfl oor", + "Ġc rowd", + "ol es", + "Ġ7 0", + "Ġcent ral", + "ĠE v", + "Ġd ream", + "Ġdown load", + "Ġconf ir", + "ĠTh om", + "Ġwind ow", + "Ġhapp ens", + "Ġun it", + "Ġt end", + "Ġs pl", + "Ġbec omes", + "Ġfight ing", + "Ġpred ict", + "ĠP ress", + "ĠP ower", + "Ġhe avy", + "ak ed", + "Ġf an", + "or ter", + "ate gy", + "B A", + "iz es", + "Ġsp end", + "H ere", + "Ġ200 7", + "Ġad op", + "ĠH am", + "Ġfoot ball", + "ĠP ort", + "od ay", + "5 1", + "amp ions", + "Ġtrans fer", + "h t", + "Ġ3 8", + "ter m", + "ac ity", + "Ġb ur", + "] ,", + "tern al", + "r ig", + "b ut", + "Ġthere fore", + "ĠB ecause", + "res p", + "re y", + "Ġm ission", + "S ome", + "Ġnot ed", + "Ġass um", + "Ġdise ase", + "Ġed it", + "Ġprog ress", + "r d", + "ĠB rown", + "oc al", + "Ġadd ing", + "Ġra ised", + "ĠAn y", + "Ġt ick", + "Ġsee ing", + "ĠPe ople", + "Ġagre ement", + "Ġser ver", + "Ġw at", + "Ġdeb ate", + "Ġsupp osed", + "il ing", + "Ġlarg est", + "Ġsuccess ful", + "ĠP ri", + "ĠDemocr atic", + "Ġj ump", + "ĠSyri a", + "Ġown ers", + "Ġoff ers", + "Ġshoot ing", + "Ġeff ic", + "se y", + "Ġha ven", + "ver se", + "te red", + "ĠL ight", + "im al", + "ĠB ig", + "Ġdef end", + "Ġbe at", + "Ġrecord s", + "% )", + "Ġsc en", + "Ġemploy ees", + "Ġdev ices", + "he m", + "Ġcom mer", + "ĠM ex", + "Ġbenef it", + "ĠPro f", + "Ġil leg", + "Ġsur face", + "ĠAl so", + "Ġh arm", + "ing ly", + "w ide", + "ĠA lex", + "Ġsh ut", + "ĠC ur", + "Ġl ose", + "p m", + "Ġchall enge", + "se mb", + "Ġst ation", + "Ġint elligence", + "Ġacc ur", + "ĠFl or", + "Ġrequ ires", + "ĠM al", + "b um", + "Ġh ospital", + "Ġsp irit", + "Ġoff ered", + "Ġprodu ce", + "ĠComm un", + "Ġcreat ing", + "Ġcr is", + "s pect", + "Ġend ed", + "Ġd aily", + "Ġvot ers", + "land s", + "i as", + "i h", + "on a", + "Ġsm art", + "ĠOff ice", + "ĠL ord", + "ri al", + "ĠIntern et", + "Ġcirc um", + "Ġextreme ly", + "' .", + "Ġopin ion", + "ĠM il", + "Ġg ain", + "B S", + "ĠF in", + "y p", + "Ġuse ful", + "Ġbud get", + "Ġcom fort", + "is f", + "Ġback ground", + "el ine", + "Ġep isode", + "Ġen emy", + "Ġtri al", + "Ġestab lish", + "d ate", + "ĠC ap", + "Ġcontin ues", + "Ġshow ing", + "ĠUn ion", + "w ith", + "Ġpost ed", + "ĠSy stem", + "Ġe at", + "ri an", + "Ġr ise", + "ĠGerman y", + "il s", + "Ġsign ed", + "Ġv ill", + "Ġgr and", + "m or", + "ĠEng land", + "Ġproject s", + "um ber", + "Ġconf erence", + "z a", + "Ġrespons ible", + "ĠAr ab", + "Ġlearn ed", + "âĢĶ âĢĶ", + "i pping", + "ĠGe orge", + "O C", + "Ġreturn ed", + "ĠAustral ia", + "Ġb rief", + "Q u", + "Ġbr and", + "ill ing", + "ab led", + "Ġhig hest", + "Ġtr ain", + "ĠComm ission", + "wh ile", + "Ġn om", + "cept ion", + "Ġm ut", + "ĠBl ue", + "Ġinc ident", + "v ant", + "8 6", + "ĠI D", + "Ġn uclear", + "7 4", + "ĠL ike", + "ĠR E", + "ĠM icro", + "l i", + "m ail", + "Ġcharg es", + "8 9", + "Ġad just", + "ad o", + "Ġear th", + "N A", + "Ġpr ices", + "P A", + "Ġd raft", + "Ġrun s", + "Ġcandid ate", + "ens es", + "Ġmanag ement", + "ĠPh il", + "ĠM iss", + "Ġte ach", + "g ram", + "Ġunderstand ing", + "a it", + "ic ago", + "A dd", + "ĠE p", + "sec ut", + "Ġsepar ate", + "Ġinst ance", + "Ġe th", + "Ġun less", + "**** ****", + "ĠF ore", + "in ate", + "Ġoper ations", + "S p", + "Ġf aith", + "g ar", + "ĠCh urch", + "ron ic", + "Ġconf ig", + "os ure", + "Ġactiv ities", + "Ġtrad itional", + "Ġ3 6", + "Ġd irection", + "Ġmach ine", + "Ġsur round", + "Ġp ush", + "un ction", + "ĠE U", + "Ġeas ier", + "Ġarg ument", + "G B", + "Ġm icro", + "Ġsp ending", + "iz ations", + "Ġthe ory", + "ad ow", + "Ġcall ing", + "ĠL ast", + "Ġd er", + "Ġinflu ence", + "Ġcomm it", + "Ġph oto", + "Ġun c", + "ist ry", + "g n", + "ast e", + "ack s", + "Ġdis p", + "ad y", + "d o", + "ĠG ood", + "Ġ `", + "Ġw ish", + "Ġreve aled", + "Âł Âł", + "l ig", + "Ġen force", + "ĠComm ittee", + "Ġche m", + "Ġmil es", + "Ġinterest ed", + "Ġsol ution", + "ic y", + "in ct", + "Ġ- >", + "ĠD et", + "Ġrem oved", + "Ġcomp ar", + "e ah", + "Ġpl ant", + "ĠS ince", + "Ġachie ve", + "Ġadvant age", + "Ġslight ly", + "b ing", + "Ġpl aced", + "u nder", + "201 5", + "ĠM ad", + "Ġt im", + "os es", + "Ġc ru", + "ĠR ock", + "Ġmost ly", + "Ġneg ative", + "Ġset ting", + "Ġprodu ced", + "Ġm ur", + "Ġconnect ion", + "ĠM er", + "Ġdri ver", + "Ġexecut ive", + "Ġass ault", + "Ġb orn", + "ĠV er", + "t ained", + "Ġstruct ure", + "Ġredu ce", + "Ġdec ades", + "Ġd ed", + "u ke", + "ĠM any", + "idd en", + "Ġle ague", + "S e", + "Ġjo in", + "Ġdis co", + "Ġd ie", + "c ks", + "act ions", + "Ġass ess", + "ag n", + "Ġgo als", + "our s", + "I R", + "Ġsen ior", + "ill er", + "m od", + "ip ment", + "oc ol", + "u y", + "ĠQ ue", + "Ġpart ies", + "ir gin", + "Ġle arning", + "it able", + "Ġstre et", + "Ġcamer a", + "A pp", + "Ġsk ills", + "b re", + "c ious", + "Ġcele br", + "ĠFr anc", + "Ġexist ing", + "Ġwill ing", + "l or", + "Ġ id", + "ĠSp ace", + "Ġcrit ical", + "ĠL a", + "ortun ately", + "Ġser ve", + "Ġc old", + "Ġspec ies", + "T S", + "Ġanim als", + "ĠB ay", + "Ġold er", + "ĠU nder", + "est ic", + "ĠT re", + "Ġte acher", + "Ġpre fer", + "v is", + "Ġth read", + "ĠM att", + "Ġmanag er", + "ãĥ »", + "Ġprofess ional", + "ĠV ol", + "Ġnot es", + "The se", + "ul a", + "Ġf resh", + "ent ed", + "u zz", + "ed y", + "clus ion", + "ĠR el", + "Ġdoub t", + "E O", + "Ġopen ed", + "ĠB it", + "Ad vertisement", + "Ġgu ess", + "ĠU N", + "Ġse qu", + "Ġexpl ain", + "ott en", + "Ġatt ract", + "ak s", + "Ġstr ing", + "Ġcont ext", + "oss ible", + "ĠRepublic ans", + "Ġsol id", + "Ġc ities", + "Ġask ing", + "Ġr andom", + "u ps", + "ur ies", + "ar ant", + "dd en", + "g l", + "ĠFlor ida", + "Ġdep end", + "ĠSc ott", + "Ġ3 3", + "Ġi T", + "ic on", + "Ġmention ed", + "Ġ2 000", + "Ġclaim ed", + "Ġdefin itely", + "ul f", + "Ġc ore", + "Ġopen ing", + "ĠCon st", + "wh ich", + "ĠT ra", + "A G", + "7 2", + "Ġbelie ved", + "ad a", + "Ġ4 8", + "ĠSec urity", + "yr ight", + "ĠP et", + "ĠL ou", + "Ġhold ing", + "======== ========", + "Ġ ice", + "Ġb row", + "Ġauthor ities", + "h ost", + "w ord", + "Ġsc ore", + "ĠD iv", + "Ġcell s", + "Ġtrans l", + "Ġneigh bor", + "Ġrem ove", + "u ct", + "Ġdist rict", + "ĠA ccording", + "Ġwor se", + "Ġconcern s", + "Ġpresident ial", + "Ġpolic ies", + "ĠH all", + "7 3", + "Ġh us", + "A Y", + "Ġ200 6", + "ĠJ ud", + "Ġindepend ent", + "ĠJust ice", + "ili ar", + "pr int", + "igh ter", + "Ġprotect ion", + "z en", + "Ġsu dden", + "h ouse", + "ĠJ es", + "P R", + "ĠIn f", + "Ġb ul", + "Ġ _", + "ĠServ ice", + "ĠP R", + "Ġstr ategy", + "ff ect", + "Ġgirl s", + "Ġmiss ing", + "oy al", + "ĠTe am", + "ul ated", + "Ġd at", + "Ġpolit ics", + "ab or", + "A ccording", + "Ġspe ll", + "Ġg raph", + "ort hern", + "T C", + "A b", + "Ġlab or", + "is her", + "Ġk ick", + "ĠiT unes", + "Ġstep s", + "pos es", + "Ġsmall er", + "E n", + "ber t", + "Ġro ll", + "Ġresear chers", + "Ġcl osed", + "Ġtrans port", + "Ġlaw y", + "________ ________", + "ĠCh icago", + "Ġas pect", + "Ġn one", + "Ġmar riage", + "9 6", + "Ġe lements", + "ĠF re", + "ĠS al", + "Ġd ram", + "F C", + "t op", + "e qu", + "Ġhe aring", + "Ġsupport ed", + "Ġtest ing", + "co hol", + "Ġmass ive", + "Ġst ick", + "Ġgu ard", + "is co", + "ph one", + "F rom", + "How ever", + "Ġb order", + "Ġcop y", + "ograph y", + "l ist", + "7 1", + "Ġown er", + "cl ass", + "ru it", + "r ate", + "ĠO nce", + "Ġdig ital", + "Ġt ask", + "ER S", + "Ġinc red", + "t es", + "+ +", + "ĠFr ance", + "Ġb reat", + "ow l", + "Ġiss ued", + "ĠW estern", + "Ġdet ect", + "Ġpart ners", + "Ġsh ared", + "ĠC all", + "Ġcan cer", + "ac he", + "rib e", + "Ġexpl ained", + "Ġhe at", + "{ \"", + "Ġinvest ment", + "ĠB ook", + "Ġw ood", + "Ġtool s", + "ĠAl though", + "Ġbelie f", + "Ġcris is", + "Ġg e", + "ĠM P", + "Ġoper ation", + "ty pe", + "~ ~", + "g a", + "Ġcont ains", + "ant a", + "Ġexp ress", + "ĠG roup", + "ĠJ ournal", + "k a", + "Ġam b", + "ĠUS A", + "Ġfind ing", + "Ġfund ing", + "h ow", + "Ġestab lished", + "ide os", + "Ġdeg ree", + "Ġdanger ous", + "ang ing", + "Ġfre edom", + "pp ort", + "out hern", + "Ġch urch", + "Ġc atch", + "ĠTw o", + "Ġpres ence", + "ĠGu ard", + "U p", + "Ġauthor ity", + "ĠPro ject", + "Ġbut ton", + "Ġcon sequ", + "Ġval id", + "Ġwe ak", + "Ġstart s", + "Ġref erence", + "ĠM em", + "\" )", + "U N", + "or age", + "ĠO pen", + "Ġcol lection", + "y m", + "g ency", + "Ġbeaut iful", + "ro s", + "Ġtell s", + "Ġwa iting", + "n el", + "Ġprov iding", + "ĠDemocr ats", + "Ġd aughter", + "Ġm aster", + "Ġpur poses", + "ĠJapan ese", + "Ġequ al", + "Ġturn s", + "Ġdoc uments", + "Ġwatch ing", + "R es", + "Ġr an", + "201 4", + "Ġre ject", + "ĠKore a", + "Ġvictim s", + "Le vel", + "ere nces", + "Ġw itness", + "Ġ3 4", + "Ġre form", + "com ing", + "Ġocc up", + "Ġc aught", + "Ġtra ffic", + "ad ing", + "Ġmod els", + "ar io", + "Ġserv ed", + "Ġb atter", + "u ate", + "ĠSecret ary", + "Ġagre ed", + "Ġtr uly", + "yn am", + "ĠR et", + "Ġun its", + "ĠRes earch", + "h and", + "az ine", + "ĠM ike", + "Ġvar iety", + "ot al", + "Ġam azing", + "Ġconfir med", + "Ġentire ly", + "Ġpurch ase", + "Ġe lement", + "Ġc ash", + "Ġdeter mine", + "D e", + "Ġc ars", + "ĠW all", + "â ĸ", + "Ġview s", + "Ġdrug s", + "Ġdep artment", + "ĠSt ep", + "u it", + "Ġ3 9", + "as ure", + "ĠCl ass", + "Ġc overed", + "ĠB ank", + "Ġme re", + "u ana", + "Ġmult i", + "Ġm ix", + "Ġun like", + "lev ision", + "Ġsto pped", + "Ġs em", + "ĠG al", + "ul es", + "Ġwe l", + "ĠJohn son", + "l a", + "Ġsk ill", + "Ġbec oming", + "ri e", + "Ġappropri ate", + "f e", + "ell ow", + "ĠPro t", + "ul ate", + "oc ation", + "Ġweek end", + "od ies", + "Ġsit es", + "Ġanim al", + "ĠT im", + "Ġsc ale", + "Ġcharg ed", + "Ġinst ruct", + "ill a", + "Ġmethod s", + "Ġc ert", + "Ġjud ge", + "ĠH el", + "Ġdoll ars", + "Ġstand ing", + "ĠS qu", + "Ġdeb t", + "l iam", + "Ġdri ving", + "ĠS um", + "ĠEd ition", + "Ġal bum", + "and on", + "I F", + "ĠU k", + "6 3", + "ad er", + "Ġcommer cial", + "es h", + "ĠGovern ment", + "Ġdisc overed", + "Ġout put", + "ĠHill ary", + "ĠCar ol", + "Ġ200 5", + "Ġab use", + "anc ing", + "Ġsw itch", + "Ġann ual", + "T w", + "Ġst ated", + "ag ement", + "in ner", + "Ġdem ocr", + "Ġres idents", + "Ġallow ing", + "Ġfact ors", + "od d", + "Ġf uck", + "em ies", + "Ġoccur red", + "ot i", + "Ġn orth", + "ĠP ublic", + "Ġinj ury", + "Ġins urance", + "C L", + "oll y", + "ã Ģ", + "Ġrepe ated", + "Ġar ms", + "ang ed", + "Ġconst ruction", + "Ġf le", + "P U", + "ic ians", + "Ġfor ms", + "ĠMc C", + "ant ic", + "Ġm ental", + "p ire", + "Ġequ ipment", + "Ġf ant", + "Ġdiscuss ion", + "Ġregard ing", + "k in", + "ar p", + "Ġch air", + "og ue", + "Ġpro ceed", + "ĠI d", + "O ur", + "Ġmur der", + "M an", + "Ġ4 9", + "as p", + "Ġsupp ly", + "Ġin put", + "Ġwe alth", + "liam ent", + "Ġpro ced", + "or ial", + "ĠSt at", + "ĠN FL", + "hen s", + "ĠInst itute", + "Ġput ting", + "ourn ament", + "et ic", + "Ġloc ated", + "Ġk id", + "er ia", + "r un", + "Ġpr inc", + "Ġ !", + "go ing", + "ĠB et", + "Ġcl ot", + "Ġtell ing", + "Ġprop osed", + "i ot", + "or ry", + "Ġfund s", + "g ment", + "ĠL ife", + "Ġb aby", + "ĠB ack", + "Ġsp oke", + "Im age", + "Ġear n", + "ĠA T", + "g u", + "Ġex change", + "ĠL in", + "ov ing", + "Ġp air", + "M ore", + "az on", + "Ġarrest ed", + "Ġkill ing", + "c an", + "ĠC ard", + "y d", + "Ġident ified", + "Ġm obile", + "Ġthan ks", + "ony m", + "ĠF orm", + "Ġhundred s", + "ĠCh ris", + "ĠC at", + "Ġtre nd", + "h at", + "ĠA v", + "om an", + "Ġelect ric", + "ĠW il", + "S E", + "O f", + "Ġrest aur", + "ot ed", + "Ġtr ig", + "Ġn ine", + "Ġb omb", + "Wh y", + " ¯", + "Ġco verage", + "Ġapp eal", + "ĠRober t", + "ĠS up", + "Ġfin ished", + "Ġfl ow", + "Ġdel iver", + "Ġcal cul", + "Ġphot os", + "Ġph il", + "Ġpie ces", + "Ġapp re", + "k es", + "Ġr ough", + "D o", + "Ġpart ner", + "Ġconcern ed", + "Ġ3 7", + "ĠG en", + "C ol", + "ct ors", + "Ġ= >", + "st ate", + "Ġsuggest ed", + "ĠFor ce", + "C E", + "Ġher self", + "ĠPl an", + "w orks", + "o oth", + "ren cy", + "Ġcor ner", + "Ġhus band", + "Ġintern et", + "ĠA ut", + "em s", + "os en", + "ĠAt l", + "g en", + "Ġbal ance", + "6 2", + "Ġsound s", + "te xt", + "Ġar r", + "ov es", + "Ġmill ions", + "Ġrad io", + "Ġsat isf", + "ĠD am", + "M r", + "G o", + "S pe", + "Ġcomb at", + "r ant", + "ĠG ree", + "Ġf uel", + "Ġdist ance", + "Ġtest s", + "Ġdec re", + "ĠE r", + "Ġman aged", + "D S", + "Ġt it", + "Ġmeas ures", + "ĠL iber", + "Ġatt end", + "as hed", + "ĠJ ose", + "ĠN ight", + "d it", + "ĠN ov", + "ĠE nd", + "out s", + "Ġgener ation", + "Ġadv oc", + "y th", + "Ġconvers ation", + "ĠS ky", + "act ive", + "ce l", + "ri er", + "ĠFr ank", + "Ġg ender", + "Ġcon cent", + "Ġcar ried", + "and a", + "ĠV irgin", + "Ġarri ved", + "ic ide", + "ad ed", + "Ġfail ure", + "Ġmin imum", + "le ts", + "Ġwor st", + "Ġkeep ing", + "Ġint ended", + "Ġilleg al", + "Ġsub sc", + "Ġdetermin ed", + "Ġtri p", + "Y es", + "Ġra ise", + "Ġ ~", + "Ġfeel s", + "Ġpack age", + "ĠJ o", + "h i", + "201 6", + "re al", + "Ġf ra", + "Ġsy mb", + "M e", + "uck y", + "p ret", + "ĠK h", + "ĠEd it", + "ĠWe b", + "em ic", + "ĠCol or", + "Ġjust ice", + "I nt", + "Ġfar m", + "ck now", + "\" >", + "el ess", + "Ġredu ced", + "Ġ5 00", + "x x", + "ĠR ad", + "ĠW ood", + "Ġcl in", + "Ġhy p", + "il er", + "ur a", + "k ins", + "8 5", + "6 1", + "ĠThe ir", + "ĠM ary", + "Ġs an", + "Ġno vel", + "ĠWh o", + "Ġcap acity", + "Ġimp ossible", + "Ġpl ays", + "Ġmin ister", + "ij uana", + "ic ate", + "ĠS et", + "Ġf ram", + "Ġ ing", + "Ġcommun ities", + "ĠF BI", + "it a", + "Ġb on", + "Ġstr ateg", + "Ġinterest s", + "l ock", + "g ers", + "m as", + "ĠAN D", + "Ġconflic t", + "Ġrequire ments", + "Ġs ac", + "Ġoper ating", + "in i", + "rel ated", + "Ġcomm itted", + "Ġrelative ly", + "Ġs outh", + "¯ ¯", + "Ġaff ord", + "Ġident ity", + "Ġdec isions", + "Ġacc used", + "pl ace", + "Ġvict ory", + "o ch", + "i at", + "N ame", + "C om", + "t ion", + "ed s", + "Ġsee k", + "Ġt ight", + "ĠIm ages", + "Ġinit i", + "Ġhum ans", + "Ġfam iliar", + "Ġaud ience", + "Ġintern al", + "vent ure", + "Ġs ides", + "ĠT O", + "Ġd im", + "Ġcon clud", + "Ġapp oint", + "Ġenforce ment", + "ĠJ im", + "ĠAssoci ation", + "Ġcircum st", + "ĠCanad ian", + "Ġjo ined", + "Ġdiffere nces", + "ĠL os", + "Ġprot est", + "Ġtw ice", + "w in", + "Ġgl ass", + "ars h", + "ĠAr my", + "Ġexp ression", + "Ġdec ide", + "Ġplan ning", + "an ia", + "Ġhand le", + "ĠMicro soft", + "ĠN or", + "Ġmax imum", + "ĠRe v", + "Ġse a", + "Ġev al", + "Ġhel ps", + "re f", + "Ġb ound", + "Ġm outh", + "Ġstand ards", + "Ġcl im", + "ĠC amp", + "ĠF ox", + "cl es", + "Ġar my", + "ĠTe chn", + "ack ing", + "x y", + "S S", + "Ġ4 2", + "Ġbu g", + "ĠUk rain", + "ĠM ax", + "ĠJ ones", + "ĠSh ow", + "l o", + "Ġplan et", + "Ġ7 5", + "Ġwin ning", + "Ġf aster", + "Ġspe ct", + "Ġbro ken", + "T R", + "Ġdef ined", + "Ġhealth y", + "Ġcompet ition", + "htt ps", + "ĠIs land", + "ĠF e", + "Ġannoun ce", + "ĠC up", + "ĠInst ead", + "Ġcl ient", + "Ġposs ibly", + "se ction", + "ock et", + "l ook", + "Ġfin ish", + "Ġcre w", + "Ġres erv", + "Ġed itor", + "Ġh ate", + "Ġs ale", + "Ġcontro vers", + "Ġp ages", + "w ing", + "Ġnum er", + "Ġopp osition", + "Ġ200 4", + "Ġref uge", + "Ġfl ight", + "Ġap art", + "ĠL at", + "A meric", + "ĠAfric a", + "Ġapplic ations", + "ĠPal est", + "ĠB ur", + "Ġg ar", + "ĠSoc ial", + "Ġup gr", + "Ġsh ape", + "Ġspe aking", + "ans ion", + "a o", + "ĠS n", + "Ġwor ry", + "ĠBrit ain", + "P lease", + "rou d", + "Ġh un", + "Ġintrodu ced", + "Ġd iet", + "I nd", + "ĠSec ond", + "Ġfun ctions", + "ut s", + "ĠE ach", + "ĠJe ff", + "Ġst ress", + "Ġaccount s", + "Ġgu arant", + "ĠAn n", + "ed ia", + "Ġhon est", + "Ġt ree", + "ĠAfric an", + "ĠB ush", + "} ,", + "Ġs ch", + "ĠOn ly", + "Ġf if", + "ig an", + "Ġexerc ise", + "ĠEx p", + "Ġscient ists", + "Ġlegisl ation", + "ĠW ork", + "ĠS pr", + "à Ĥ", + "ĠH uman", + "Ġ è", + "Ġsur vey", + "Ġr ich", + "ri p", + "Ġmain tain", + "Ġfl o", + "Ġleaders hip", + "st ream", + "ĠIslam ic", + "Ġ 01", + "ĠCol lege", + "Ġmag ic", + "ĠPr ime", + "Ġfig ures", + "201 7", + "ind er", + "x ual", + "ĠDe ad", + "Ġabsolute ly", + "Ġfour th", + "Ġpresent ed", + "resp ond", + "rib le", + "Ġal cohol", + "at o", + "ĠD E", + "por ary", + "Ġgr ab", + "Ġvar i", + "Ġqu ant", + "ĠPh oto", + "Ġpl us", + "r ick", + "ar ks", + "Ġaltern ative", + "Ġp il", + "Ġappro x", + "th at", + "Ġobject s", + "ĠR o", + "ĠAnd roid", + "Ġsignificant ly", + "ĠR oad", + "k ay", + "R ead", + "av or", + "Ġa cknow", + "ĠH D", + "ĠS ing", + "O r", + "ĠM ont", + "Ġun s", + "pro f", + "Ġneg oti", + "ĠAr ch", + "ik i", + "Ġte levision", + "ĠJew ish", + "Ġcomm ittee", + "Ġmot or", + "Ġappear ance", + "Ġs itting", + "Ġstri ke", + "ĠD own", + "com p", + "ĠH ist", + "Ġf old", + "ac ement", + "ĠLou is", + "Ġbel ong", + "ĠâĢ ¢", + "Ġm ort", + "Ġprep ared", + "Ġ6 4", + "ĠM aster", + "Ġind eed", + "ĠD en", + "Ġre nt", + "T A", + "our ney", + "ar c", + "S u", + "9 7", + "Ġadv ice", + "Ġchang ing", + "Ġlist ed", + "Ġlaun ched", + "is ation", + "ĠP eter", + "is hes", + "Ġl ived", + "ĠM el", + "ĠSup reme", + "ĠF ederal", + "Ġ) ;", + "ruct ure", + "Ġset s", + "Ġphil os", + "u ous", + "Ġ ł", + "Ġappl ied", + "ĠN OT", + "Ġhous ing", + "ĠM ount", + "Ġo dd", + "Ġsu st", + "D A", + "ffic ient", + "Ġ ?", + "ol ved", + "Ġp owers", + "Ġth r", + "Ġrem aining", + "ĠW ater", + "L C", + "Ġca uses", + "ãģ ®", + "Ġman ner", + "ad s", + "Ġsuggest s", + "Ġend s", + "stand ing", + "f ig", + "ĠD un", + "id th", + "Ġg ay", + "Ġter min", + "ĠAngel es", + "M S", + "Ġscient ific", + "Ġco al", + "ap ers", + "b ar", + "ĠThom as", + "Ġsy m", + "ĠR un", + "th is", + "P C", + "igr ants", + "Ġmin ute", + "ĠDist rict", + "cell ent", + "Ġle aves", + "Ġcomple ted", + "am in", + "Ġfoc used", + "Ġmon itor", + "Ġveh icles", + "M A", + "ĠM ass", + "ĠGr and", + "Ġaffect ed", + "itution al", + "Ġconst ruct", + "Ġfollow s", + "Ġt on", + "re ens", + "Ġh omes", + "ĠE xt", + "ĠLe vel", + "r ast", + "ĠI r", + "Ġel im", + "Ġlarge ly", + "ĠJ oe", + "Ġvot es", + "all s", + "Ġbusiness es", + "ĠFound ation", + "ĠCent ral", + "Ġy ards", + "Ġmaterial s", + "ul ner", + "Ġgu ide", + "Ġclos er", + "um s", + "Ġsp orts", + "ed er", + "J ust", + "Ġtax es", + "8 4", + "ĠO ld", + "Ġdec ade", + "ol a", + "Ġv ir", + "Ġdro pped", + "Ġdel ay", + "it ect", + "Ġsec ure", + "ste in", + "le vel", + "Ġtre ated", + "Ġfil ed", + "ain e", + "Ġv an", + "Ġm ir", + "Ġcol umn", + "ict ed", + "e per", + "Ġro t", + "Ġcons ult", + "Ġent ry", + "Ġmar ijuana", + "ĠD ou", + "Ġapparent ly", + "ok ing", + "clus ive", + "Ġincre ases", + "an o", + "Ġspecific ally", + "Ġte le", + "ens ions", + "Ġrelig ion", + "ab ilities", + "Ġfr ame", + "ĠN ote", + "ĠLe e", + "Ġhelp ing", + "Ġed ge", + "ost on", + "Ġorgan izations", + "à ĥ", + "ĠB oth", + "hip s", + "Ġbig ger", + "Ġbo ost", + "ĠSt and", + "Ġro w", + "ul s", + "ab ase", + "Ġr id", + "L et", + "are n", + "ra ve", + "Ġst ret", + "P D", + "Ġv ision", + "Ġwe aring", + "Ġappre ci", + "Ġa ward", + "ĠU se", + "Ġfact or", + "w ar", + "ul ations", + ") (", + "Ġg od", + "Ġter rit", + "Ġpar am", + "ast s", + "8 7", + "Ġen emies", + "ĠG ames", + "F F", + "Ġacc ident", + "W ell", + "ĠMart in", + "T ER", + "Ġat h", + "ĠHe ll", + "Ġfor g", + "Ġve ter", + "ĠMed ic", + "f ree", + "Ġst ars", + "Ġexp ensive", + "Ġac ad", + "ra wn", + "ĠW he", + "Ġl ock", + "Ġform at", + "Ġsold iers", + "s m", + "Ġag ent", + "Ġrespons ibility", + "or a", + "ĠS cience", + "Ġrap id", + "Ġt ough", + "ĠJes us", + "Ġbelie ves", + "M L", + "Ġwe ar", + "le te", + "Ãĥ ÃĤ", + "ĠD ri", + "Ġcomm ission", + "ĠB ob", + "O h", + "ap ed", + "Ġwar m", + "ÃĥÃĤ ÃĥÃĤ", + "Ġ200 3", + "ort ion", + "Ġhas n", + "ust er", + "Ġun ivers", + "ĠI ll", + "Ġk ing", + "olog ies", + "9 4", + "ĠT em", + "ĠM os", + "Ġpat ient", + "ĠMex ico", + "ce an", + "ĠDe ath", + "ĠSand ers", + "y ou", + "ĠC ast", + "ĠComp any", + "pt y", + "Ġhappen ing", + "F P", + "ĠB attle", + "Ġb ought", + "A m", + "M od", + "U s", + "ut ers", + "ĠC re", + "ĠTh ose", + "Ġ4 4", + "is er", + "Ġs oul", + "ĠT op", + "ĠHar ry", + "ĠA w", + "Ġse at", + "ff ee", + "Ġrev olution", + "Ġ( \"", + "ĠD uring", + "et te", + "Ġr ing", + "Ġoff ensive", + "Ġreturn s", + "Ġv ideos", + "Ġdis cl", + "Ġfam ous", + "en ced", + "ĠS ign", + "ĠR iver", + "Ġ3 00", + "P M", + "ĠB us", + "ĠC H", + "Ġcandid ates", + "ard en", + "Ġpercent age", + "Ġvis ual", + "Ġthan k", + "Ġtrou ble", + "ner gy", + "Ġ200 1", + "Ġpro ve", + "ash ion", + "Ġen h", + "ĠL ong", + "U M", + "Ġconnect ed", + "Ġposs ibility", + "O ver", + "Ġexper t", + "Ġl ibrary", + "art s", + "ĠDirect or", + "Ġfell ow", + "9 2", + "ir ty", + "Ġd ry", + "Ġsign s", + "ĠL ove", + "Ġqu iet", + "f oot", + "Ġp ure", + "ĠH un", + "Ġf illed", + "ph as", + "ĠE lect", + "end ment", + "ĠEx pl", + "Ġun able", + "n s", + "m o", + "Ġv ast", + "ob e", + "Ġident ify", + "app ing", + "ĠCarol ina", + "g ress", + "Ġpro te", + "Ġf ish", + "Ġcircumst ances", + "raz y", + "ĠPh ot", + "Ġb odies", + "ĠM ur", + "Ġdevelop ing", + "ĠA R", + "Ġexperien ced", + "Ġsubst ant", + "ĠBo ard", + "es ome", + "Ġdom estic", + "Ġcomb ined", + "ĠP ut", + "Ġchem ical", + "ĠCh ild", + "Ġpo ol", + "ĠC y", + "Ġe gg", + "c ons", + "st ers", + "Ġh urt", + "Ġmark ets", + "Ġconserv ative", + "Ġsupp orters", + "Ġag encies", + "id el", + "O b", + "ur b", + "Ġ4 3", + "ĠDef ense", + "y e", + "ĠA p", + "du le", + "Ġtemper ature", + "Ġconduct ed", + "ĠCh ief", + "Ġpull ed", + "Ġf ol", + "L ast", + "ont o", + "os is", + "V ER", + "D es", + "ĠP an", + "F irst", + "Ġadv ance", + "Ġlic ense", + "r ors", + "ĠJ on", + "Ġimag ine", + "Ġhe ll", + "Ġf ixed", + "Ġinc or", + "os ite", + "ĠL og", + "ick en", + "] :", + "Ġsurpr ise", + "h ab", + "Ġc raft", + "ol t", + "ĠJ ul", + "Ġd ial", + "Ġrele vant", + "Ġent ered", + "Ġlead s", + "ĠA D", + "ĠCle an", + "Ġpict ures", + "ess or", + "Ġal t", + "Ġpay ing", + "P er", + "ĠMark et", + "Ġupd ates", + "am ily", + "ĠT ype", + "ĠH ome", + "Ġ5 5", + "semb ly", + "rom e", + "8 3", + "Ġgreat est", + "Ġhe ight", + "Ġhe av", + "ain ts", + "Ġlist en", + "as er", + "ĠS H", + "Ġcap able", + "ac le", + "Ġpers pect", + "in ating", + "Ġoff ering", + "ry pt", + "ĠDe velop", + "ab in", + "r c", + "Ġbr ight", + "al ty", + "ar row", + "Ġsupp l", + "ind ing", + "ack ed", + "gy pt", + "ĠAn other", + "p g", + "ĠVirgin ia", + "ĠL u", + "Ġpl anned", + "Ġp it", + "Ġswe et", + "T ype", + "ĠD i", + "Ġtyp ically", + "ĠFranc isco", + "Ġpro spect", + "ĠD an", + "Ġte en", + "re es", + "Ġsc hed", + "Ġh ol", + "Ġsc r", + "Ġlot s", + "l ife", + "Ġnews p", + "Ġfor get", + "ĠN one", + "ĠM iddle", + "ĠR yan", + "ed d", + "Ġse vere", + "Ġsu it", + "ll er", + "9 3", + "Ġcor respond", + "Ġexpl os", + "u ations", + "Ġfl ag", + "g ame", + "r id", + "Ġpr in", + "ĠD ata", + "Ġde ploy", + "ĠEn ter", + "su it", + "gh an", + "ĠM en", + "Ġthough ts", + "Ġmat ters", + "Ġad apt", + "ĠA ri", + "Ġf ill", + "Ġfor th", + "Ġs am", + "Ġ4 1", + "Ġpay ment", + "ĠH or", + "Ġsp ring", + "du c", + "Ġl osing", + "Ġbring ing", + "F O", + "al a", + "Ġdist ribution", + "he red", + "b our", + "ĠIsrael i", + "om a", + "Ġcomb ination", + "Ġpl enty", + "V E", + "C an", + "ĠH aw", + "Ġper man", + "ĠSpe cial", + "Ġto w", + "Ġsee king", + "Ġexam ples", + "Ġclass es", + "c r", + "Ġbe er", + "Ġmov es", + "ĠI P", + "ĠK n", + "Ġpan el", + "E ven", + "Ġproper ly", + "Ġr is", + "Ġpl ug", + "Ġestim ated", + "E very", + "Ġdef ensive", + "ag raph", + "Ġpre gn", + "Ġinst it", + "ĠV ict", + "Ġvol ume", + "Ġpos itions", + "Ġl inks", + "ĠPro gram", + "ĠWe ek", + "ag ues", + "Ġtrans form", + "k er", + "ĠC EO", + "Ġc as", + "Ġopp onent", + "Ġtwe et", + "ĠC ode", + "Ġsh op", + "Ġf ly", + "Ġtal ks", + "Ġb ag", + "Ph one", + "Ġa id", + "Ġpl ants", + "Ġ6 5", + "Ġatt orney", + "ar ters", + "qu est", + "ĠMag ic", + "Ġbeg ins", + "Ġmy ster", + "Ġenvironment al", + "Ġst orage", + "N N", + "Ġm arg", + "Ġs ke", + "Ġmet al", + "ell y", + "Ġord ered", + "Ġrem ained", + "Ġl oved", + "Ġprom pt", + "Ġupd ated", + "Ġexper ts", + "Ġwalk ing", + "Ġan cient", + "Ġperform ed", + "AT E", + "Ġne ither", + "i ency", + "Ġmanufact ure", + "ĠP ak", + "Ġselect ed", + "Ġm ine", + "Ġult imately", + "Ġexpl an", + "Ġlab el", + "ĠServ ices", + "ribut ed", + "Tr ump", + "Ġsy n", + "ĠU lt", + "S C", + "Ġme at", + "Ġg iant", + "ĠW ars", + "ĠO N", + "Ġad m", + "Ġinter pret", + "Ġeven ing", + "Ġev il", + "ĠB oston", + "ĠW ild", + "Ġ Ã", + "ĠBit coin", + "ĠAm azon", + "D r", + "ĠIn formation", + "Ġobvious ly", + "Ġadv anced", + "Ph oto", + "ol ar", + "Ġwe ather", + "Ġsymb ol", + "Ġso le", + "Ġpot entially", + "ost er", + "Ġorig inally", + "m un", + "3 00", + "az e", + "ess ions", + "Ġde ck", + "Ġst ood", + "Ġyou th", + "ĠB ern", + "R ep", + "ĠT est", + "Ġbas ically", + "ot ic", + "Ġinvol ve", + "ol it", + "ly n", + "S ee", + "Ġair craft", + "Ġconf irm", + "E W", + "Ġmess ages", + "ĠRich ard", + "Ġk it", + "Ġpro hib", + "Ġv ulner", + "is ters", + "Ġexist ence", + "Ġturn ing", + "ĠS P", + "Ġdes ire", + "Ġfl at", + "Ġm ent", + "se ason", + "ang es", + "Ġneighbor hood", + "ĠL ake", + "AT ION", + "Ġpoint ed", + "b ur", + "Ġinn ov", + "uc ks", + "U L", + "Ġprofess or", + "Ġexp ressed", + "A B", + "ic ious", + "Ġ200 2", + "ĠDe v", + "Ġs ession", + "Ġb are", + "s en", + "Ġdis s", + "ĠC ath", + "ĠP ass", + "ĠP oint", + "Ġdo ctor", + "or row", + "ail ed", + "ĠR ub", + "ĠD C", + "ĠChar l", + "p erson", + "Ġwrit er", + "igh ters", + "ure au", + "Ġob lig", + "Ġrecord ed", + "Ġbro ke", + "Ġord ers", + "il ty", + "Ġmot ion", + "in ity", + "l aw", + "ad ium", + "Ġimm igration", + "Ġcontr ast", + "Ġb att", + "Ġex cellent", + "Ġtechn ical", + "am i", + "Ġt un", + "Ġcl oud", + "ĠY ear", + "ge on", + "Ġcre ation", + "Ġstr ange", + "Ġa uth", + "Ġfor t", + "b orn", + "Ġext ent", + "ĠT oday", + "ĠCl ub", + "Ġr ain", + "Ġs ample", + "Ġaccept ed", + "Ġt act", + "Ġf ired", + "ĠS on", + "Ġstand s", + "Ġb oot", + "Ġ4 7", + "Ġstat ements", + "Ġvers ions", + "Ġse lling", + "ound ed", + "Ġ199 0", + "Ġwere n", + "ĠW atch", + "Ġexper iment", + "P ost", + "Ġret ail", + "ul ed", + "In st", + "un te", + "ãĥ ¼", + "Ġdep art", + "Ġb ond", + "i very", + "om pl", + "Ġre action", + "ĠSyri an", + "ĠP ac", + "app ed", + "ani el", + "D P", + "Ġres olution", + "Ġre act", + "Ġappro ved", + "on om", + "m ond", + "ĠO ffic", + "-- -", + "Ġrepl ace", + "Ġt ack", + "Ġsp ort", + "Ġch ain", + "Ġemer gency", + "r ad", + "ĠPalest in", + "Ġ4 6", + "Ġautom atically", + "Ġrout e", + "Ġp al", + "Ġb anks", + "ĠPar is", + "ĠMed ia", + "ro ad", + "ic ing", + "i xt", + "ist ed", + "Ġg rew", + "Ġco ord", + "ĠW here", + "om in", + "Ġsub s", + "� �", + "Ġ ±", + "Ġcorpor ate", + "Ġse lection", + "n oon", + "ĠRep ort", + "c s", + "clud ing", + "ord ers", + "anc he", + "ĠIt s", + "Ġslow ly", + "ĠE gypt", + "ĠA cc", + "Ġcol le", + "iqu es", + "E X", + "Ġattempt s", + "ur l", + "ĠC ross", + "Ġfind ings", + "ĠS C", + "ĠO R", + "Ġind ex", + "ens ity", + "ĠW ay", + "ĠL and", + "Ġsh ock", + "d is", + "Ġd ynam", + "Ġc art", + "m osp", + "S ince", + "i est", + "ĠB oy", + "Ġst orm", + "ĠCont in", + "201 3", + "he w", + "il it", + "Ġess ential", + "iqu id", + "O ther", + "ive red", + "Ġreason able", + "A ct", + "Ġsub sequ", + "ĠP ack", + "ĠF ort", + "Ġconsider ing", + "Ġun iversity", + "l og", + "Ġmar ried", + "Ġill ust", + "ĠTr ue", + "£ ı", + "Ġnumer ous", + "rast ructure", + "Ġserious ly", + "Ġrefer red", + "u a", + "Ġconsist ent", + "on na", + "ĠRe al", + "ru ption", + "ci ples", + "Ġfact s", + "9 1", + "ot es", + "er g", + "The n", + "Ġacc ompl", + "N ote", + "Ġre venue", + "Ġpass ing", + "Ġm al", + "e en", + "ĠY et", + "Ġg ather", + "ter day", + "ew ork", + "ĠA uthor", + "P e", + "Ġopt im", + "Ġr ub", + "Ġè £ı", + "Ġun known", + "st one", + "Ġun ion", + "ol ve", + "Ġopportun ities", + "Ġbrow ser", + "ĠW al", + "ĠC ost", + "Ġreport ing", + "st s", + "p et", + "Ġs and", + "Ġsudden ly", + "Ġsurpr ising", + "ĠV R", + "Ġsomew hat", + "ĠB as", + "ult ure", + "iz z", + "ĠC D", + "Ġchalleng es", + "Ġsett ings", + "Ġexperien ces", + "ĠF ull", + "Ġcan n", + "Ġrece iving", + "ES T", + "Ġj oint", + "Ġcult ural", + "Ġa st", + "8 2", + "as tern", + "ce ived", + "ĠC ru", + "Ġb ull", + "p ired", + "am m", + "Ġfac ing", + "p ower", + "Ġb oss", + "ĠH ol", + "Ġinst r", + "Ġincreasing ly", + "Ġsh ift", + "Ġstre ets", + "ĠWilliam s", + "ab b", + "Ġl ie", + "Ġl augh", + "ĠC a", + "P L", + "Ġadult s", + "Ġcustom er", + "Ġob tained", + "Ġsupport ing", + "ht ml", + "f ire", + "Ġdetail ed", + "Ġpick ed", + "ĠR ight", + "ld er", + "E E", + "st ood", + "ĠK im", + "Ġw ire", + "Ġs ight", + "Ġdevelop ers", + "Ġpers ons", + "Ġs ad", + "Ġc up", + "Ġwar ning", + "Ġboy s", + "l ong", + "Ġb ird", + "f o", + "Ġw al", + "Ġobserv ed", + "Ġz one", + "iven ess", + "Ġch annel", + "c ript", + "Ġref used", + "ĠAg ain", + "Ġsu c", + "Ġspokes man", + "ĠRe f", + "r ite", + "ou ston", + "ãĥ ³", + "ĠS her", + "Ġact s", + "ĠN ame", + "Ġstrugg le", + "ar ry", + "omet imes", + "Ġdisc rim", + "H T", + "Ġcateg ory", + "Ġreal ize", + "Ġemploy ee", + "ĠAf ghan", + "en ger", + "Ġgun s", + "ĠSte ve", + "ĠM ot", + "ĠO l", + "ok ed", + "Ġth ick", + "Ġfair ly", + "ill y", + "Ġsur ve", + "ĠM at", + "we ight", + "â Ķ", + "Ġtro ops", + "Ġag ents", + "Ġbatter y", + "Ġmot iv", + "à ¡", + "S ec", + "d en", + "o very", + "L S", + "Ġfl u", + "Ġconf ident", + "ĠO per", + "Ġem pty", + "Ġp hen", + "Ġse ctor", + "Ġexc ited", + "Ġrem ote", + "ap h", + "o en", + "Ġdestroy ed", + "Ġmor al", + "ĠH P", + "ĠR on", + "Ġd ress", + "ĠB at", + "Ġl it", + "ĠM S", + "Ġa f", + "H L", + "r um", + "is ms", + "Ġshould n", + "Ġsym pt", + "ĠTor onto", + "het ic", + "Ġcar bon", + "Ġinstall ed", + "Ġviol ent", + "Ġsol ar", + "j a", + "Ġpract ices", + "Ġr ide", + "ĠP enn", + "Ġimpro ved", + "Ġaud io", + "Ġbehav i", + "ĠP S", + "Ġe ating", + "D ata", + "ĠRe view", + "p ass", + "cl aim", + "u ated", + "ang ers", + "c hen", + "Ġproper ties", + "Ġany where", + "An other", + "Ġbl ow", + "ĠJack son", + "Ġp roud", + "Ġplan e", + "l ines", + "Ġsqu are", + "Ġpro of", + "ans as", + "Ġtalk ed", + "m akers", + "Ġs ister", + "Ġhold s", + "Ġres ident", + "Ġ= =", + "Ġresist ance", + "Ġspl it", + "Ġpro secut", + "Ġconf idence", + "res ents", + "Ġcut s", + "Ġexcept ion", + "Ġz ero", + "Get ty", + "Ġcop yright", + "Ġtot ally", + "orm al", + "ific ations", + "ĠAustral ian", + "Ġs ick", + "Ġ1 50", + "Ġhouse hold", + "Ġfe es", + "Ġdri vers", + "og en", + "ĠN Y", + "Ġnecess arily", + "Ġregul ations", + "ear ing", + "s l", + "Ġperspect ive", + "c are", + "ic ial", + "H is", + "Ġesc ape", + "Ġsurpr ised", + "ĠV an", + "ur rent", + "Ġv ac", + "8 1", + "ĠTh us", + "Ġem phas", + "ĠCh ampions", + "ĠI ce", + "Ġn arr", + "Ġhead s", + "Ġca using", + "b el", + "f ortunately", + "ĠM a", + "Ġtarg ets", + "ci pl", + "Ġafter noon", + "Ġadd s", + "ĠMay be", + "ĠF our", + "ess ed", + "ple te", + "Ġus ual", + "ch o", + "ing u", + "Ġwith d", + "ĠE nergy", + "ĠE conom", + "O O", + "Ġart icles", + "Ġinj ured", + "Ġman age", + "Ġexpl ains", + "Ġdi agn", + "R ec", + "at ures", + "Ġlink ed", + "Ġdiscuss ed", + "Ġexpl o", + "Ġocc asion", + "ath an", + "Ġopp osite", + "Ġfac es", + "Ġden ied", + "ĠK night", + "Ġn ut", + "Ġapprox imately", + "Ġdisapp oint", + "onym ous", + "ĠB est", + "ĠL o", + "ĠH y", + "ĠA ff", + "Ġvot ing", + "an while", + "ĠII I", + "Ġinstit utions", + "ag ram", + "ĠD aily", + "Ġdr ag", + "Ġnear by", + "Ġgu ilty", + "Ġcon ver", + "P re", + "s hip", + "Ġre ward", + "Ġphilos oph", + "ĠS S", + "u gh", + "Ġapp s", + "f riend", + "Ġu pper", + "Ġad vert", + "Ġs now", + "Ġfr ust", + "Ġour selves", + "F r", + "ĠD ie", + "amp ion", + "Ġdis miss", + "Ġc ere", + "Ġsign al", + "f rom", + "Ġ ).", + "Ġ5 2", + "Ġcr imes", + "it ors", + "est ival", + "use um", + "Ġcoun cil", + "ĠS aud", + "M ay", + "ĠG un", + "ic ian", + "et her", + "Ġsu fficient", + "ĠH en", + "so le", + "Ġhistor ical", + "ĠF ar", + "ĠT urn", + "Ġp in", + "Ġsuc ceed", + "m at", + "ly mp", + "Ġtrad ition", + "ĠO k", + "Ġc ro", + "Ġdesc ription", + "al le", + "Ġsk y", + "T e", + "Ġwide ly", + "Ġw ave", + "Ġdefin ition", + "ĠJew s", + "Ġcy cle", + "Ġref ere", + "Ġbr ings", + "us al", + "Ġal ive", + "Ġfrequ ently", + "Ġint ention", + "ĠCont rol", + "l v", + "y stem", + "Ġpriv acy", + "g ent", + "ren ce", + "ĠQu est", + "ĠChrist mas", + "Ġr ail", + "Ġco oper", + "Ġtest ed", + "ĠC apt", + "as ks", + "Ġcomfort able", + "Ġdel ivered", + "sc ape", + "Ġdep th", + "ĠG OP", + "Ġwrit es", + "Ġass ets", + "Ġsa v", + "im ents", + "Ġtrans ition", + "Ġart ist", + "ĠL ook", + "Ġl ob", + "Ġcomp onents", + "ar ity", + "Ġwalk ed", + "Ġro ot", + "Ġparticip ants", + "Ġnot iced", + "Ġres c", + "Ġn av", + "ĠAd minist", + "d a", + "ut ral", + "pl ate", + "Ġimport ance", + "Ġass ert", + "ious ly", + "c ription", + "Ġinj uries", + "ĠChe ck", + "Ġregist ered", + "Ġint ent", + "Ġmiss ed", + "ograph ic", + "Ġsent ence", + "oun ter", + "Ġassist ance", + "ev in", + "Ġdat abase", + "Ġbuild ings", + "Ġclass ic", + "Ġth inks", + "ĠOh io", + "P r", + "ug g", + "Ġfe e", + "p an", + "Ġeffect ively", + "Ġfac ility", + "Ġbe ar", + "Ġch apter", + "Ġdog s", + "ĠCol umb", + "Ġl atter", + "it ial", + "Ġad mitted", + "T V", + "ĠGe org", + "Ġpost s", + "\\ \\", + "Ġlawy er", + "Ġequ ival", + "Ġm and", + "Ġcontro lled", + "ĠW alk", + "ĠAnd rew", + "Ġmen u", + "am ental", + "Ġprotect ed", + "v a", + "Ġadminist r", + "or al", + "Ġre in", + "ĠS ar", + "Ġamount s", + "Ġn ative", + "ĠM oon", + "Ġrep resents", + "Ġab andon", + "Ġcarry ing", + "Ġt ank", + "m ary", + "Ġdecl ared", + "T ube", + "Ġh at", + "Ġpun ish", + "el lect", + "m es", + "Ġun iverse", + "ĠR od", + "ph y", + "Ġinf rastructure", + "Ġ5 1", + "Ġopp osed", + "ow nt", + "c a", + "ĠM ake", + "Ġhard ware", + "Ġco ffee", + "R el", + "b al", + "w orld", + "ĠS af", + "ĠSe a", + "in als", + "Ġown ed", + "Ġh all", + "ers ion", + "Ġdescrib e", + "ĠP ot", + "Ġport ion", + "Ġat mosp", + "Ġgovern ments", + "Ġdep ending", + "Ġoff ense", + "Ġtr ick", + "aw a", + "ĠL ine", + "ĠV is", + "ĠH ard", + "ĠOr ig", + "ĠCl ick", + "Ġdes k", + "ĠVal ley", + "ĠS ov", + "Ġmov ies", + "Ġrem ark", + "Ġm ail", + "Ġcons cious", + "Ġrul ing", + "ĠR ights", + "Ġmed ic", + "he nt", + "ĠW omen", + "> <", + "Ġrepl aced", + "ĠP rem", + "ĠTh anks", + "Ġre new", + "ĠB all", + "if orm", + "Ġsh ots", + "C omm", + "Ġar med", + "Ġconst ant", + "Ġt aste", + "Ġreal ized", + "Ġbu ff", + "Ġm o", + "Ġeffic ient", + "M ost", + "or ation", + "if ies", + "Ġcommun ication", + "Ġfl ood", + "Ġconsequ ences", + "Ġany way", + "ig g", + "ĠG M", + "ĠTh ank", + "Ġ iron", + "Ġev olution", + "ĠC op", + "tw itter", + "Ġ9 5", + "Ġrelationship s", + "ad el", + "ĠYou ng", + "Ġpropos al", + "ay ers", + "uild ing", + "ĠH ot", + "OR E", + "c os", + "Ġcoll abor", + "P G", + "ax y", + "Ġknow ing", + "Ġsupport s", + "ow ed", + "Ġcontrol s", + "Ġmere ly", + "um er", + "Ġath let", + "Ġf ashion", + "p ath", + "Ġg ift", + "Ġer a", + "AN D", + "Ġkind s", + "ĠKore an", + "Ġleg it", + "ul ous", + "Ġess entially", + "Ġthe rap", + "n ic", + "Ġsuff ered", + "Ġh ur", + "Ġprom ise", + "Ġex cess", + "Ġover w", + "Ġpr ime", + "ĠH ouston", + "er ry", + "ĠM s", + "R S", + "201 2", + "Ġst ores", + "ĠO lymp", + "Ġj ourney", + "Al though", + "S ub", + "ĠE duc", + "ĠCh apter", + "Ġrequest s", + "Ġconsum ers", + "Ġt iny", + "Ġis ol", + "ĠF air", + "b a", + "ĠY OU", + "Ġcr ash", + "ce ler", + "Ġemot ional", + "Ġgood s", + "Ġelect ed", + "Ġmod er", + "ĠLin ux", + "Ġbl ocks", + "Ġis land", + "ĠSoc iety", + "Ġelect ions", + "Ġbroad cast", + "Ġche ap", + "Ġn ations", + "Ġse asons", + "4 00", + "Ġwas te", + "ĠS at", + "Ġfield s", + "em ploy", + "Ġprof ile", + "Ġauth ors", + "AL L", + "ĠG ra", + "w est", + "ĠT y", + "Ġdeath s", + "Ġv acc", + "Ġfor med", + "Ġd u", + "Ġon going", + "ĠMuslim s", + "el f", + "ig ure", + "Ġass ume", + "ĠUkrain e", + "w ater", + "Ġco ast", + "Ġvot ed", + "g or", + "ĠA S", + "ĠMich igan", + "az a", + "ĠAr m", + "i ro", + "Ġf lex", + "as ters", + "' '", + "Ġwel come", + "ar l", + "Ġloc ations", + "ig ation", + "ĠF il", + "Ġbu ying", + "Ġarch itect", + "Ġhard er", + "ĠC ub", + "Ġinter face", + "Ġrestaur ant", + "Ġdisco ver", + "Ġex ceed", + "Ġfav our", + "ger y", + "Ġd uty", + "Ġp itch", + "ad or", + "ĠM ach", + "b oy", + "Ġrespond ed", + "Ġext ended", + "her s", + "M any", + "ra id", + "if er", + "ĠIn s", + "S er", + "Ġmed ium", + "s he", + "ĠS ports", + "Ġmag azine", + "ut ation", + "Ġlim its", + "ĠG all", + "Ġex ternal", + "raz il", + "Ġyoung er", + "t le", + "Ġrem ind", + "ĠC ON", + "Ġimmedi ate", + "Ġh idden", + "Ġvol unte", + "Ġsim pl", + "od cast", + "Ġph ase", + "d r", + "Ġpl ot", + "Ġexp osure", + "R I", + "og rap", + "v in", + "an ish", + "ĠAc ad", + "ĠEng ine", + "Ġexp ansion", + "ĠP ay", + "Y our", + "Ġpus hed", + "ĠE ll", + "ĠHe ad", + "Ġmarket ing", + "ĠA C", + "k et", + "Ġh its", + "Ġg ro", + "ĠA ge", + "ĠSc ot", + "] [", + "Ġst im", + "Ġi Phone", + "Ī Ĵ", + "Ġn arrow", + "ĠGet ty", + "ĠTur key", + "Ġperfect ly", + "Ġen able", + "ut ch", + "Ġprec ise", + "Ġreg ime", + "Ġsh if", + "Ġcomp ens", + "g un", + "d iv", + "Ġch osen", + "ĠK en", + "An y", + "Ġtre es", + "Ġrecomm ended", + "ĠR en", + "u able", + "ĠH T", + "F ollow", + "E G", + "ĠH and", + "ĠK enn", + "Ġarg uments", + "Ġex ists", + "Ġb ike", + "ĠCons erv", + "Ġbre aking", + "ĠG ar", + "Ġc razy", + "Ġvirt ual", + "ay lor", + "ix el", + "Ġ19 80", + "Ġper mission", + "ĠSer ies", + "Ġconsum er", + "Ġclose ly", + "c alled", + "Ġ5 4", + "Ġhop es", + "Ġar ray", + "ĠW in", + "ĠLab our", + "Ġsp ons", + "ĠI re", + "Ġp ow", + "Ġread ers", + "Ġemploy ment", + "Ġcreat ure", + "Ġresult ing", + "Ġaccur ate", + "Ġmom ents", + "Ġarg ued", + "Ġp ed", + "D uring", + "Ġ5 3", + "ĠT al", + "Ġs ought", + "Ġsuff ering", + "Ġ icon", + "le e", + "Ġ( $", + "al ian", + " °", + "Ġp ra", + "Ġbon us", + "( \"", + "k o", + "Ġact ing", + "D E", + "f all", + "Ġcompar ison", + "Ġsm ooth", + "ĠN AS", + "u pp", + "ĠJose ph", + "ep ing", + "ĠT ake", + "ĠM id", + "Ġs ending", + "f ast", + "ĠF all", + "Ġdeal ing", + "us er", + "ĠOr gan", + "C o", + "Ġatt ached", + "Ġse es", + "% .", + "Ġtyp ical", + "AR T", + "Ġfind s", + "ĠAs ia", + "um in", + "ĠC ore", + "ĠE nt", + "in ent", + "u ce", + "ĠBl ood", + "ĠN ever", + "Ġem ails", + "Ġhigh light", + "Ġconf ront", + "at us", + "ut ed", + "Ġun us", + "Ġtop ic", + "ĠAd am", + "Ġb le", + "at i", + "Ġunder stood", + "S et", + "st ruct", + "T P", + "Ġm ob", + "a a", + "ĠSt art", + "pect ed", + "se ll", + "Ġded icated", + "ĠC A", + "u an", + "Ġsong s", + "esc ription", + "Ġte ch", + "Ġr ape", + "Ġas ide", + "Ġgr ant", + "Ġ5 6", + "s ub", + "Ġarg ue", + "Ġcont aining", + "Ġsche dule", + "Ġliber al", + "Ġpublic ly", + "Ġheav ily", + "ĠU t", + "in er", + "ĠS ection", + "ĠC are", + "we et", + "l s", + "D is", + "âĶ Ģ", + "ĠF ollow", + "B ack", + "ĠI T", + "Ġb es", + "j i", + "ĠH it", + "est ed", + "Ġevery body", + "ĠSw ed", + "Ġfem in", + "Ġfac ilities", + "Ġcon ven", + "C omp", + "ĠO S", + "c ore", + "Ġan x", + "Ġdiv ision", + "ĠC am", + "ĠSt an", + "m ates", + "Ġexpl ore", + "pl om", + "Ġsh ares", + "pl oad", + "an es", + "Ġide al", + "et ers", + "ĠB ase", + "Ġpl astic", + "Ġdist inct", + "ĠNet work", + "ĠSe attle", + "Ġtrad ing", + "ens us", + "int end", + "Ġex hib", + "Ġinit ially", + "ĠF ood", + "Ġthous and", + "ĠBus iness", + "act er", + "Ġpar agraph", + "Ġrough ly", + "Ġw ww", + "Ġcreat ive", + "ĠCon f", + "Ġconsum ption", + "Ġfil ms", + "ag an", + "Ġob tain", + "Ġt all", + "Ġt or", + "Ġacknow led", + "Ġg rown", + "al o", + "K E", + "Ġ4 00", + "end ers", + "t aining", + "U G", + "Ġsu icide", + "Ġwat ched", + "ĠL ist", + "al i", + "re hens", + "Ġsurround ing", + "Ġp ip", + "Ġf lying", + "ĠJ ava", + "ord an", + "Ġserv ing", + "in ations", + "p ost", + "Ġsh o", + "A v", + "Ġj ail", + "z y", + "Ġ199 9", + "Ġ< /", + "Ġliter ally", + "ĠS ir", + "Ġexp osed", + "Ġl ies", + "st ar", + "Ġb at", + "Ġear ned", + "ĠD ig", + "Ġspec ified", + "ĠSe ason", + "Ġdeg rees", + "Don ald", + "Ġcent re", + "Ġsh aring", + "Ġwin ter", + "ĠC O", + "C he", + "Ġ Î", + "M P", + "Ġun w", + "Ġfew er", + "ĠM ir", + "Ġsomew here", + "ĠK ey", + "Ġattack ed", + "ĠK ir", + "Ġdom ain", + "Ġstrong er", + "Ġ9 9", + "Ġpen alty", + "I d", + "Sc ript", + "Ġdecl ined", + "Ġne ck", + "Ġfra ud", + "Ġcur rency", + "Ġr ising", + "R C", + "âĢ¦ âĢ¦", + "H z", + "Ġt ab", + "Ġtal ent", + "n am", + "ĠN BA", + "Ġvill age", + "Ġleg s", + "ĠN ext", + "E d", + "Ġac id", + "Ġhy d", + "8 00", + "Ġinvol ving", + "ĠIm age", + "ĠBe fore", + "F l", + "Ġyes terday", + "S ource", + "Ġterror ist", + "Ġsu p", + "Ġsy nt", + "ĠSaud i", + "Ġw est", + "Ġr u", + "b urg", + "Ġvis ible", + "Ġstru ck", + "r ison", + "Ġaw esome", + "Ġd rawn", + "Ġansw ers", + "ĠG irl", + "ĠR am", + "Ġthreat s", + "Ġdef eat", + "os it", + "Ġv ent", + "atur ally", + "Americ an", + "end a", + "ĠH oly", + "Ġr um", + "% ,", + "c ase", + "ĠHist ory", + "ĠYou Tube", + "Ġsit uations", + "ĠD NA", + "S te", + "Ġsa ved", + "It em", + "Ġrec ip", + "olog ist", + "Ġfac ed", + "Ġel ig", + "O nce", + "ĠL i", + "u h", + "Ġmist ake", + "ĠDiv ision", + "ĠB ell", + "Ġsympt oms", + " ®", + "Ġdom in", + "Ġfall ing", + "Ġend ing", + "as hes", + "Ġmat ches", + "ĠOn line", + "Ġexplan ation", + "D ef", + "red it", + "Ġany more", + "ĠT otal", + "ĠF OR", + "us hed", + "Ġlet ters", + "Ġris ks", + "ĠO K", + "Ġreported ly", + ": \\", + "Ġpl ate", + "Ġsubject s", + "Ġattempt ed", + "if ier", + "ian a", + "Ġunlike ly", + "ĠTh ough", + "um a", + "ĠIn vest", + "ĠPr in", + "ic an", + "ĠD ar", + "ĠColor ado", + "au g", + "Ġve get", + "a os", + "ri a", + "Ġshe l", + "Ġmark ed", + "Ġ( )", + "Ġsp r", + "p o", + "ĠL ink", + "Ġdef e", + "ĠJ r", + "Ġthem e", + "Ġpass ion", + "ĠP en", + "Ġinf o", + "iz er", + "Ġsh it", + "ĠC ivil", + "ap se", + "c re", + "Ġpo ly", + "Ġcomp onent", + "ĠChar les", + "ĠIre land", + "ĠPro v", + "Ġdo ctors", + "Ġgr anted", + "Ġpain t", + "Ġhon or", + "Ġsm oke", + "Ġpay ments", + "Ġprim arily", + "ĠKing dom", + "r ich", + "ate ll", + "Ġde als", + "Ġsched uled", + "Ġfund amental", + "Ġprote in", + "Ġnewsp aper", + "Ġcl ients", + "yth on", + "ĠD ate", + "h us", + "Ġfeed back", + "Ġstret ch", + "Ġc ock", + "Ġhot el", + "ĠQue en", + "Ġsu gar", + "Ġj u", + "Ġmil k", + "Ġappro val", + "ĠL ive", + "Ġequival ent", + "ef ully", + "Ġins ert", + "z ona", + "Ġext ension", + "d ri", + "J ohn", + "Ġacc omp", + "S m", + "ĠF und", + "Ġconst antly", + "Ġ` `", + "Ġgener ated", + "ĠA ction", + "ĠP sych", + "ĠT ri", + "Ġrecogn ize", + "Ġv ary", + "ph a", + "ĠR a", + "d f", + "et ch", + "ĠSov iet", + "Tw o", + "Ġpattern s", + "Ġprof ession", + "an ing", + "T ime", + "ĠL im", + "Ġcol ors", + "ĠA z", + "ĠT R", + "Ġinf ect", + "Ġphen omen", + "Ġshe ll", + "Al so", + "Ġput s", + "Ġdel ivery", + "Ġbro wn", + "Ġprocess ing", + "Ġlight s", + "ess age", + "ĠBro ok", + "ĠA ud", + "l ation", + "Ġindust rial", + "L ike", + "ĠB razil", + "rou s", + "ES S", + "ĠL uc", + "Ġsome how", + "Ġ8 5", + "Ġpro port", + "Ġpolit icians", + "Ġindic ate", + "Ġh ole", + "Ġtechn iques", + "Ġcompet itive", + "Ġph r", + "Ġv o", + "ist ent", + "ĠD ream", + "Ġcamp us", + "Ġaspect s", + "Ġhelp ful", + "Ġsh ield", + "or se", + "Ġtrig ger", + "m al", + "Ġ5 8", + "Ġt ort", + "Ġperson ally", + "Ġt ag", + "Ġkeep s", + "ĠV ideo", + "Ġben ch", + "Ġg ap", + "a ire", + "Ġe ast", + "Ġrec overy", + "per ial", + "Ġprof it", + "ĠM ic", + "Ġ5 7", + "Ġcol on", + "Ġstrong ly", + "st yle", + "Ġalleg ations", + "h an", + "Ġrep orters", + "j o", + "r ine", + "arg et", + "and al", + "Ġ0 3", + "Ġfl ash", + "tr ans", + "Ġstr ict", + "Ġpark ing", + "ĠPak istan", + "Ġl i", + "Ġwe ird", + "ĠE ric", + "Ġreg ions", + "ĠJ un", + "Ġint ellect", + "ĠW H", + "od ing", + "rib utes", + "up id", + "ĠT it", + "Ġf inger", + "or ia", + "Ġe lev", + "ĠF ield", + "Ġcon clusion", + "; ;", + "Ġfeel ings", + "Ġext ensive", + "Ġm ixed", + "Ġne uro", + "v y", + "Ġhar ass", + "ĠC irc", + "ou ch", + "Ġterrit ory", + "Ġsuccess fully", + "M ar", + "Ġing red", + "Ġoverw hel", + "Ġl ayer", + "V iew", + "Ġall ies", + "ill ance", + "ĠTh ree", + "Ġb unch", + "Ġnorm ally", + "Ġnet works", + "Ġsac r", + "ĠC IA", + "b les", + "Ġch ose", + "Ġopp onents", + "Ġregard less", + "Ġfr anch", + "Ġpre f", + "ĠP o", + "Ġbr idge", + "ann a", + "ĠSil ver", + "Ġw age", + "p age", + "ri or", + "Ġrad ical", + "ĠL ittle", + "Ġman ip", + "Ġsecret ary", + "Ġg ang", + "D R", + "F A", + "Ġdec ent", + "ĠSp irit", + "Ġun cle", + "ĠDevelop ment", + "Ġinvest ors", + "Ġwall s", + "Ġpub lish", + "Ġgener ate", + "iss ions", + "c ar", + "Ġprom ote", + "Ġcut ting", + "Ġche st", + "Ġdrink ing", + "Ġcollect ed", + "Ġ7 2", + "Ġhop ing", + "Ġem br", + "gor ith", + "Ġwar ned", + "Ġinstruct ions", + "O G", + "ĠD id", + "ĠAg ency", + "Ġg ear", + "Ġcritic ism", + "ĠF urther", + "Ġut il", + "ann y", + "R ed", + "Ġcoun sel", + "ĠAs ian", + "Ġredu ction", + "p ool", + "Ġteach ing", + "Ġdeep ly", + "i y", + "Ġestim ates", + "Ġcho ices", + "Ġperman ent", + "in em", + "ke l", + "Ġf asc", + "p se", + "f ile", + "ĠL ow", + "ĠP erson", + "Ġt ournament", + "st al", + "Ġm el", + "U ST", + "ĠR ay", + "az i", + "V al", + "Ġcont ained", + "ĠH olly", + "Ġw ake", + "Ġreve al", + "Ġprocess es", + "ĠIS IS", + "Ġ0 9", + "Ġbl ind", + "Ġste el", + "ĠB ad", + "Ġcare fully", + "app y", + "ro it", + "Ġg aming", + "Ġhous es", + "ĠC oll", + "Ġtr uck", + "er m", + "Ġsc ored", + "Ġocc as", + "ret urn", + "b ound", + "v ar", + "Ġsh arp", + "Ġaf raid", + "ĠE X", + "am ber", + "c ific", + "Ġsche me", + "N C", + "ĠPol it", + "Ġdecl ine", + "Ġ199 8", + "Ġpus hing", + "Ġposs ession", + "Ġpriv ile", + "Ġteacher s", + "Ġy ield", + "H A", + "ĠDav is", + "it led", + "#### ####", + "Ġr ig", + "ĠD aniel", + "ac on", + "Ġh ide", + "ut en", + "Ġcolle agues", + "Ġprin ciples", + "Ġl oud", + "Ġs in", + "ĠDem on", + "Ġst one", + "Ġ0 2", + "Ġt aught", + "Ġter rible", + "Ġst uck", + "ĠPol icy", + "te en", + "Ġimplement ation", + "ĠB BC", + "ĠAP I", + "Ġwhe el", + "all as", + "Ġch ampions", + "ol ars", + "play er", + "Ġrepeated ly", + "ĠSt ill", + "Ġlik es", + "ast y", + "es ter", + "ĠCath olic", + "R L", + "Ġb ath", + "Ġno ise", + "t itle", + "Ġn orthern", + "P art", + "Ġmag n", + "Ġf ab", + "ĠAs h", + "Ġdis pl", + "Ġtick et", + "Ġm urd", + "Ġalong side", + "ĠMus ic", + "Ġr iver", + "ĠSte el", + "ĠC L", + "ĠPl ayer", + "ĠM ult", + "ow ing", + "re p", + "s ize", + "Ġt ur", + "ĠGeorg ia", + "isc al", + "ra ction", + "Ġc able", + "Ġ5 9", + "Ġw ins", + "Ġup coming", + "Ġsurv ive", + "Ġins pired", + "ĠEduc ation", + "Ġstat istics", + "ĠF oot", + "iam i", + "Ġy ellow", + "ĠP age", + ". -", + "ĠH as", + "Ġur ban", + "Ġa x", + "es sel", + "\\ \"", + "Ġquarter back", + "Ġreg ister", + "ĠLab or", + "Ġab ilities", + "ĠF amily", + "Ġvar iable", + "ĠPr ice", + "Ġcont em", + "Ġth in", + "ĠE qu", + "d ata", + "Ġg otten", + "Ġconst it", + "Ġas ks", + "Ġt ail", + "Ġexc iting", + "ĠE ffect", + "ĠSp anish", + "Ġencour age", + "ins on", + "ĠA h", + "Ġcommit ment", + "C S", + "Ġr ally", + "Ġ: :", + "Ġsubs id", + "Ġsp in", + "Ġcapt ured", + "201 8", + "Ġinn oc", + "Ġalleged ly", + "ĠC ome", + "Ġart ists", + "ĠN umber", + "Ġelect ronic", + "Ġreg ional", + "ap es", + "Ġw ra", + "Ġmy th", + "pr ise", + "ĠM iller", + "ĠC reat", + "ĠEp isode", + "b ell", + "Ġdirect ed", + "Ġext ract", + "Ġs orry", + "Ġv ice", + "ag ger", + "ĠSu pport", + "Ġ6 6", + "ĠI ron", + "Ġwonder ful", + "Ġg ra", + "N et", + "ion e", + "E ng", + "Ġsh ips", + "ik es", + "ĠK evin", + "it ar", + "Ġactiv ists", + "tr ue", + "ĠAri zona", + "ent h", + "ĠDes pite", + "ĠS E", + "Ġha bit", + "ern el", + "Ġin qu", + "Ġab ortion", + "Ġv oid", + "Ġexpl icit", + "Ġeng aged", + "Ġang ry", + "Ġr ating", + "Ġfr ag", + "b ro", + "ick ing", + "d ev", + "Ġwor ried", + "Ġob ser", + "Ġap artment", + "ĠG T", + "Ġest ate", + "ĠConst itution", + "em on", + "ĠS now", + "Ġcount y", + "Ġdis ag", + "ĠStep hen", + "Ġimm igrants", + "w ind", + "ĠN ations", + "Ġfol ks", + "O ut", + "Ġg all", + "Ġtarget ed", + "Ġst ead", + "ĠB on", + "ĠL ib", + "Ġinform ed", + "Ġ12 0", + "ch ain", + "idel ines", + "or ough", + "Ġdri ven", + "Ġregular ly", + "Ġbas ket", + "Ġprinc iple", + "oc ument", + "Ġst un", + "ib ilities", + "ĠRom an", + "ĠAb out", + "Ġal ert", + "Ġdemocr acy", + "Ġrepresent ed", + "H S", + "c ers", + "p arent", + "Ar t", + "p ack", + "Ġdi plom", + "re ts", + "ĠN O", + "Ġcapt ure", + "ĠAd v", + "Ħ ¢", + "Ġannounce ment", + "ĠL ear", + "Ġh ook", + "Ġpur s", + "ĠS uch", + "ĠC amer", + "Ġrefuge es", + "ĠV e", + "P ol", + "Ġrecogn ized", + "l ib", + "Ġhad n", + "A ss", + "Ġpil ot", + "us hing", + "Ġreturn ing", + "Ġtra il", + "ĠSt one", + "Ġrout ine", + "Ġcour ts", + "Ġdes per", + "Ġfriend ly", + "ĠIt aly", + "Ġpl ed", + "Ġbreat h", + "Ġstud io", + "N S", + "Ġimp ressive", + "ĠAfghan istan", + "Ġf ing", + "Ġd ownt", + "ink ing", + "ĠR og", + "i ary", + "col or", + "se x", + "ar on", + "Ġf ault", + "ĠN ick", + "D own", + "ĠR ose", + "ĠS outhern", + "X X", + "is odes", + "L ist", + "6 00", + "Ġout come", + "er r", + "Ġelse where", + "Ġret ire", + "Ġp ounds", + "ĠGl obal", + "Pe ople", + "Ġcommun ications", + "Ġlo an", + "Ġrat io", + "ĠEm pire", + "Ġg onna", + "Ġinv ent", + "D F", + "Ġ19 70", + "ĠComm on", + "p at", + "Ġprom ised", + "Ġd inner", + "ĠH om", + "Ġcreat es", + "Ġoper ate", + "ver ty", + "ĠJ ordan", + "et ime", + "Ġsust ain", + "R eg", + "Ġincred ible", + "im a", + "Ġwar rant", + "Ġm m", + "A tt", + "Ġlaw suit", + "Ġreview s", + "it ure", + "ĠS ource", + "l ights", + "ĠF ord", + "Ġ6 3", + "g roup", + "st ore", + "Ġfeat ured", + "Ġfore ver", + "Ġpo verty", + "ĠP op", + "ĠC NN", + "az z", + "ab is", + "ach ing", + "Ġl aid", + "ĠSu pp", + "Ġfil ter", + "en a", + "ĠCommun ity", + "Ġcreat ures", + "u ction", + "ĠR oyal", + "Ġassoci ation", + "ĠCon nect", + "ĠBr ad", + "âĸ Ī", + "l ers", + "the re", + "ĠG i", + "Ġval uable", + "AC K", + "ĠT aylor", + "Ġl iquid", + "ĠAtt orney", + "ĠCar l", + "ĠF inal", + "ag a", + "ĠWil son", + "B ecause", + "ĠProf essor", + "ak a", + "Ġincred ibly", + "r ance", + "! )", + "R ef", + "s k", + "Ġsol utions", + "Ġatmosp here", + "Ġbl ame", + "um es", + "ĠN ob", + "C A", + "um ps", + "r ical", + "ĠPut in", + "ĠD est", + "or ic", + "ĠP A", + "Ġrespect ively", + "w an", + "Ġfif th", + "â Ħ¢", + "ĠC ry", + "Ġgovern or", + "res ident", + "Ġpurch ased", + "Ġh ack", + "Ġint ense", + "ob s", + "Ġorig in", + "Ġdef ine", + "Ġcare ful", + "** *", + "Ġshould er", + "Cl ick", + "Ġt ied", + "Ġdest ruction", + "ou red", + "Ġno body", + "Ġh o", + "ĠEx per", + "Ġt ip", + "\" ;", + "Ġtechn ique", + "Ġj ur", + "ĠP ok", + "b ow", + "Ġleg end", + "Ġacc ord", + "Ġbus y", + "ĠInt el", + "Ġh ang", + "ak i", + ". ]", + "âĢĶâĢĶ âĢĶâĢĶ", + "Ġsur gery", + "Ġrep rodu", + "Ġun iform", + "Ġscen es", + "c ode", + "Ġ6 2", + "l isher", + "ĠH ave", + "ph ia", + "Ġcry pt", + "Ġrec on", + "Ġsc ream", + "Ġadop ted", + "Ġsc ores", + "N e", + "ĠIt alian", + "in cluding", + "B O", + "Ġindic ated", + "Ġent ertain", + "G u", + "T ext", + "i el", + "Ġtw enty", + "Ġeng age", + "off s", + "ĠPac ific", + "Ġsm ile", + "Ġperson nel", + "Ġto ler", + "Ġdo ors", + "Ġt one", + "Ġmach ines", + "Ġent ering", + "ten ance", + "C O", + "ĠJer sey", + "Ġfore st", + "Ġhor se", + "Ġcompl aint", + "ĠSpr ing", + "y o", + "ĠPl us", + "ed ing", + "ĠRet urn", + "qu arters", + "ial s", + "c ow", + "Ġacad emic", + "Ġf ruit", + "Ġ199 6", + "og ether", + "Ġw ine", + "Ġpur su", + "ĠSte ven", + "Ġlic ens", + "Wh o", + "Ġclot hes", + "re ction", + "Ġsqu ad", + "Ġst able", + "Ġr aw", + "z ens", + "St ar", + "ut ies", + "anc er", + "Ġke ys", + "ĠM u", + "Ġcompl icated", + "ig er", + "ĠTe xt", + "Ġabs or", + "Ġ6 8", + "Ġfun ny", + "Ġrel ief", + "ĠL ew", + "ĠC ook", + "Ġch art", + "Ġdraw ing", + "G E", + "Ġmod ule", + "ĠB ull", + "I LL", + "Ġs alt", + "0000 0000", + "il le", + "Ġres ource", + "aw ay", + "adel phia", + "ĠB ru", + "Ġ6 7", + "Ġsome body", + "Ġparticip ate", + "Ġro se", + "we red", + "Ġmus cle", + "Ġcons ent", + "Ġcontin uing", + "ĠGuard ian", + "ĠOr der", + "reg on", + "Ġre ar", + "Ġprov ision", + "Ġlik ed", + "ri ent", + "Ġb ra", + "Tr ans", + "Ġmeet ings", + "Ġto x", + "Ġcon vent", + "Ġaut o", + "Ġrec ording", + "ĠSo ft", + "00 1", + "ĠR oll", + "Ġprogram ming", + "Ġp ic", + "Ġprov ed", + "Ġst ab", + "ĠA st", + "Ġca ption", + "ul ating", + "ĠAtt ack", + "Ġnew ly", + "Ġ199 7", + "f r", + "Ġdis cipl", + "ĠGree k", + "Ġed ition", + "ĠDo es", + "ĠB ox", + "if le", + "ack et", + "Ġpass es", + "Ġgu est", + "Ġac celer", + "it als", + "U D", + "Ġaut hent", + "ĠR est", + "ov al", + "t a", + "u ine", + "Ġarm or", + "ĠT own", + "Ġcomp at", + "Ġinc hes", + "Des pite", + "Ġass ign", + "he rent", + "Ġprep are", + "ĠM eg", + "oc key", + "Ġdep ends", + "Ġtrack s", + "w atch", + "Ġl ists", + "ĠN orthern", + "Ġal ter", + "re c", + "ĠE astern", + "Ġcond em", + "Ġevery where", + "? '", + "Ġaff ili", + "Ġf ought", + "\": {\"", + "Ġm ac", + "it arian", + "Ġsc ope", + "ĠA L", + "aw s", + "ar ms", + "Ġqu e", + "Ġenjoy ed", + "nes ota", + "Ġagg ressive", + "ĠSt ory", + "ĠI V", + "Ġrec ipe", + "Ġrare ly", + "ĠMed ical", + "val ue", + "ang el", + "ay ing", + "omet hing", + "Ġsub section", + "Ġs outhern", + "Ġfrequ ency", + "re te", + "roll ed", + "ult s", + "ĠN ic", + "Ġbeh alf", + "Ġsequ ence", + "ab et", + "Ġcontrovers ial", + "Ġcomp rom", + "Ġwork er", + "Ġmain ly", + "Ġal gorith", + "ĠM ajor", + "or ce", + "g ender", + "Ġorgan ized", + "Ġf ake", + "Ġconclud ed", + "ĠE D", + "ĠEx ec", + "r age", + "Ġch ances", + "ber ry", + "ĠTr ad", + "Ġconfig uration", + "Ġwithd raw", + "Ġf ro", + "ud es", + "ĠBro ther", + "ĠB rian", + "Ġtri es", + "Ġsam ples", + "Ġb id", + "ĠGold en", + "Ġphot ograph", + "if est", + "ĠD O", + "ĠPar liament", + "******** ********", + "R em", + "Ġcont est", + "Ġsign ing", + "p x", + "ĠZ eal", + "âĶĢ âĶĢ", + "E ar", + "Ġex it", + "Be fore", + "ĠCor por", + "n ull", + "mon th", + "Ġrac ial", + "ott ed", + "ĠV eg", + "ĠRe uters", + "Ġsw ord", + "ps on", + "ĠRom ney", + "a ed", + "Ġt rib", + "Ġin ner", + "Ġprot ocol", + "ĠB i", + "ĠM iami", + "ever al", + "p ress", + "Ġsh ipping", + "ĠAm endment", + "ĠHow ard", + "con nect", + "ĠD isc", + "ĠJ ac", + "iam ond", + "ĠThere fore", + "s es", + "ĠPrin cess", + "ĠUS B", + "ĠAn th", + "Ġsurve illance", + "Ġap olog", + "Ġ6 1", + "ow a", + "Ġf ulf", + "j s", + "Ġl uck", + "ust ed", + "Ġ §", + "n i", + "Ġant icip", + "em an", + "Ġwin ner", + "Ġsil ver", + "ll a", + "ic ity", + "Ġunus ual", + "Ġcr ack", + "Ġt ies", + "e z", + "Ġpract ical", + "Ġprov ince", + "ĠPl ace", + "Ġprior ity", + "IC E", + "Ġdescrib es", + "Ġbr anch", + "F orm", + "ask a", + "miss ions", + "b i", + "Ġp orn", + "ĠTur k", + "Ġent hus", + "Ġf ighters", + "Ġ0 8", + "ĠDet roit", + "Ġfound ation", + "av id", + "A re", + "Ġjud gment", + "cl ing", + "Ġsol ve", + "ĠDes ign", + "W here", + "hes is", + "ĠT ro", + "a fter", + "Ġne utral", + "ĠPalestin ian", + "ĠHolly wood", + "Ġadv is", + "ĠN on", + "y es", + "ol is", + "Ġrep utation", + "Ġsm ell", + "Ġb read", + "ĠB ul", + "ĠBe ach", + "Ġclaim ing", + "Ġgen etic", + "Ġtechn ologies", + "Ġupgr ade", + "row s", + "Ġdevelop er", + "ĠJ osh", + "ĠDis ney", + "erv ed", + "ip al", + "Ġun ex", + "Ġbare ly", + "t hen", + "ĠP ub", + "Ġill ness", + "et ary", + "ĠB al", + "Ġp atch", + "Ġbut t", + "Ġst upid", + "ĠD og", + "ĠD allas", + "f ront", + "ie ce", + "Ġprot ests", + "Ġch at", + "oen ix", + "Ġw ing", + "Ġpar liament", + "Ġ7 7", + "ose xual", + "Ġre nder", + "pt ions", + "ĠCo ast", + "os a", + "ĠG reg", + "h op", + "ĠMan agement", + "Ġbit coin", + "Ġrec over", + "Ġincor por", + "or ne", + "ĠUs ing", + "Ġpre ced", + "Ġthreat ened", + "Ġspirit ual", + "ĠE vent", + "ĠF red", + "Ġadvert ising", + "Ġimprove ments", + "ĠC ustom", + "Ġer rors", + "Ġsens itive", + "ĠN avy", + "Ġcre am", + "L ook", + "Ġex clusive", + "Ġcomp rehens", + "Ġde leg", + "Ġcon ce", + "Ġrem em", + "Ġstruct ures", + "Ġst ored", + "N D", + "Ġ1 000", + "U P", + "ĠB udd", + "A F", + "w oman", + "ĠAcad emy", + "ð Ł", + "se a", + "Ġtem porary", + "Ab out", + "es ters", + "Ġtick ets", + "Ġposs ess", + "in ch", + "o z", + "Ġl a", + "Ġcontract s", + "Ġun p", + "Ġc ig", + "ĠK at", + "ult ural", + "as m", + "Ġmount ain", + "ĠCapt ain", + "St ep", + "m aking", + "ĠSp ain", + "Ġequ ally", + "Ġl ands", + "at ers", + "Ġreject ed", + "er a", + "im m", + "ri x", + "C D", + "Ġtrans action", + "g ener", + "less ly", + "Ġ| |", + "Ġc os", + "ĠHen ry", + "Ġprov isions", + "Ġg ained", + "Ġdirect ory", + "Ġra ising", + "ĠS ep", + "ol en", + "ond er", + "Ġcon sole", + "in st", + "Ġb om", + "Ġunc ertain", + "1 50", + "ock ing", + "Ġmeas ured", + "Ġpl ain", + "Ġse ats", + "Ġd ict", + "S L", + "af e", + "Ġest imate", + "iz on", + "at hered", + "Ġcontribut ed", + "Ġep isodes", + "omm od", + "G r", + "AN T", + "Ġ6 9", + "G ener", + "Ġ2 50", + "vious ly", + "rog en", + "Ġterror ism", + "Ġmove ments", + "ent le", + "oun ce", + "ĠS oul", + "Ġpre v", + "ĠT able", + "act s", + "ri ors", + "t ab", + "Ġsuff er", + "Ġn erv", + "Ġmain stream", + "ĠW olf", + "Ġfranch ise", + "b at", + "Ġdem ands", + "Ġag enda", + "Ġdo zen", + "Ġclin ical", + "iz ard", + "ĠO p", + "t d", + "Ġvis ited", + "ĠPer haps", + "Ġact or", + "Ġde lic", + "Ġcont ribute", + "Ġin ject", + "ĠE s", + "ac co", + "Ġlist ening", + "Ġcon gress", + "epend ent", + "Ġprem ium", + "Ġ7 6", + "ĠIr ish", + "Ġass igned", + "ĠPh ys", + "Ġworld wide", + "Ġnarr ative", + "ot ype", + "m ont", + "b ase", + "ĠB owl", + "ĠAdminist ration", + "Ġrel ation", + "ĠE V", + "C P", + "Ġco vers", + "Ġ7 8", + "Ġcert ific", + "Ġgr ass", + "Ġ0 4", + "pir acy", + "ir a", + "Ġengine ering", + "ĠM ars", + "Ġun employ", + "ĠFore ign", + "st ract", + "Ġv en", + "Ġst eal", + "Ġrepl ied", + "Ġult imate", + "Ġtit les", + "d ated", + "Ġj oy", + "a us", + "Ġhy per", + "ak u", + "Ġoffic ially", + "ĠPro duct", + "Ġdifficult y", + "per or", + "Ġresult ed", + "rib ed", + "l ink", + "wh o", + "~~ ~~", + "ĠSpe ed", + "ĠV iet", + "W ind", + "ĠBar ack", + "Ġrestrict ions", + "ĠSh are", + "Ġ199 5", + "ition ally", + "Ġbeaut y", + "op t", + "Ġm aps", + "ĠC R", + "ĠN ation", + "ĠCru z", + "W ill", + "Ġelectric ity", + "Ġor g", + "Ġb urd", + "Ġviol ation", + "Ġus age", + "Ġper mit", + "ĠCh ron", + "ĠF ant", + "Ġn aturally", + "Ġ0 7", + "Ġth rown", + "ĠAw oken", + "Ġal ien", + "ĠHer o", + "ĠK ent", + "ĠR ick", + "ri ke", + "Ġp ace", + "}, {\"", + "G L", + "Ġpo ison", + "ĠT ower", + "Ġform al", + "al ysis", + "Ġgen uine", + "Ġk il", + "a ver", + "Ġproced ure", + "ĠPro p", + "intend o", + "ĠM ain", + "as ant", + "Ġtr ained", + "G ame", + "ĠL oad", + "ĠM A", + "Ġcru cial", + "Ġle ts", + "ĠF R", + "Ġch ampion", + "1 01", + "ĠCon ference", + "Ġwrit ers", + "Ġconnect ions", + "Ġo kay", + "ir ms", + "ĠR and", + "Ġenc ounter", + "ĠB uff", + "Ġachie ved", + "Ġche cks", + "isc ons", + "Ġassist ant", + "Ġwhen ever", + "ĠA ccess", + "ĠU r", + "b in", + "Ġcl ock", + "is p", + "op her", + "Ġb orrow", + "Ġm ad", + "Ġperson ality", + "on ly", + "IS T", + "ab ama", + "Ġg ains", + "Ġcommon ly", + "Ġter r", + "Ġhyp ot", + "Ġre ly", + "Ġt iss", + "iscons in", + "Ġrid ic", + "f unction", + "ĠO regon", + "Ġun com", + "r ating", + "el and", + "ĠN C", + "Ġm oon", + "ann on", + "Ġvulner able", + "ut ive", + "³³ ³³", + "ĠRad io", + "Ġw estern", + "se ct", + "ĠT ony", + "Ġocc urs", + "ĠO s", + "ĠH on", + "à Ń", + "Ġv essel", + "ĠScot land", + "Ġdiscrim ination", + "Ġsubsequ ent", + "st ring", + "Ġfant asy", + "ĠSh adow", + "Ġtest im", + "W E", + "it i", + "r as", + "Ġbo at", + "Ġmar ks", + "Ġord inary", + "Ġre n", + "Ġrepresent ative", + "Ġpet ition", + "Ġ7 3", + "Ġad venture", + "Ġign ore", + "ĠPhil adelphia", + "ĠS av", + "V P", + "Ġfact ory", + "Ġt asks", + "Ġdep ression", + "z ed", + "................ ................", + "ĠSt orm", + "Ġc ogn", + "Ġelig ible", + "Ġredu cing", + "v ia", + "Ġ0 5", + "Ġstri king", + "Ġdoll ar", + "h o", + "O V", + "Ġinstr ument", + "Ġphilosoph y", + "ĠMo ore", + "ĠA venue", + "Ġrul ed", + "ĠFr ont", + "IN E", + "ĠM ah", + "Ġscen ario", + "ĠNAS A", + "Ġen orm", + "Ġdeb ut", + "Ġte a", + "T oday", + "Ġabs ence", + "S im", + "Ġh am", + "le ep", + "Ġt ables", + "ĠHe art", + "M I", + "K e", + "re qu", + "V D", + "m ap", + "Ġchair man", + "Ġp ump", + "Ġrapid ly", + "v i", + "Ġsubstant ial", + "E P", + "d es", + "ch ant", + "ili pp", + "ĠS anta", + "ri ers", + "anche ster", + "L oad", + "ĠC ase", + "Ġsa ving", + "Ġ7 4", + "ĠA FP", + "er ning", + "oun ced", + "ĠMin nesota", + "ĠW as", + "Ġrec ru", + "Ġassess ment", + "ĠB ron", + "U E", + "Ġdynam ic", + "Ġf urn", + "ul ator", + "Ġprop ag", + "h igh", + "Ġacc ommod", + "Ġst ack", + "ĠS us", + "w rit", + "Ġre ven", + "ĠGod d", + "ĠZeal and", + "ab s", + "Ġbr ut", + "Ġper pet", + "h ot", + "Ġhard ly", + "ĠB urn", + "ãĤ ¹", + "Ġst y", + "Ġtrans actions", + "Ġg ate", + "Ġsc reens", + "Ġsub mitted", + "Ġ1 01", + "Ġlangu ages", + "ugh t", + "em en", + "Ġfall s", + "Ġc oc", + "Ĥ ¬", + "Ġstri kes", + "p a", + "Ġdel iber", + "ĠI M", + "Ġrel ax", + "ann els", + "ĠSen ator", + "Ġext rem", + "Ġ} ,", + "ĠDe b", + "Ġbe ll", + "Ġdis order", + "c ut", + "Ġi OS", + "Ġl ocked", + "Ġem issions", + "Ġshort ly", + "\" ]", + "ĠJud ge", + "ĠS ometimes", + "Ġr ival", + "Ġd ust", + "Ġreach ing", + "F ile", + "¯¯ ¯¯", + "ino is", + "ĠJ ason", + "Ġs atell", + "are t", + "Ġst ations", + "Ġag ric", + "ĠTechn ology", + "com es", + "ĠUn fortunately", + "ĠChild ren", + "Ġappl ies", + "ast ed", + "Ġan ger", + "ail ability", + "ĠDam age", + "Ġcomp are", + "ĠStand ard", + "Ġaim ed", + "ĠB a", + "angu age", + "Ġreg ulation", + "Ġj ury", + "Ġair port", + "Ġse ctions", + "ĠPr ince", + "em ed", + "Ġmedic ine", + "Ġh itting", + "Ġsp ark", + "ol ves", + "Ġad s", + "St ate", + "Ġfood s", + "Ġrepl acement", + "Ġch icken", + "Ġlow est", + "Ġmind s", + "Ġinvol ves", + "u i", + "Ġarr ang", + "Ġproced ures", + "ĠWh ich", + "ivers ary", + "Ġb ills", + "Ġimprove ment", + "Ġin ev", + "Ġexpect ations", + "Ġintellect ual", + "Ġsp aces", + "Ġmechan ism", + "2 50", + "bre ak", + "ĠZ e", + "ĠT enn", + "ĠB alt", + "Ġbar rel", + "Ġstat ic", + "man n", + "Pol ice", + "Ġt ips", + "Ġhand ling", + "c us", + "od ed", + "il ton", + "ir y", + "Ġjournal ists", + "our se", + "Ġcom ic", + "Ġnom ine", + "IT Y", + "Ġvers us", + "Ġlo op", + "Ġsur f", + "ĠInd ust", + "ĠHun ter", + "Ġbelief s", + "is an", + "Ġset up", + "Ġbre w", + "im age", + "Ġcomput ers", + "f ol", + "} ,\"", + "ĠMed al", + "Ġtax p", + "Ġdisplay ed", + "Ġg rav", + "Ġf iscal", + "M on", + "ĠMos cow", + "ĠK ong", + "ĠCent re", + "Ġcamer as", + "ĠMr s", + "ĠH ay", + "Ġa ver", + "ĠK elly", + "p y", + "Ġrequire ment", + "Ġent itled", + "omb ie", + "Ġsh adow", + "ag ic", + "ĠA k", + "Ġel ite", + "Ġdiv ided", + "Ġhead ing", + "Ġcop ies", + "Ġloss es", + "Ġv it", + "k ed", + "ĠB ry", + "Ġan s", + "ĠSte am", + "Ġrep orter", + "he im", + "ĠIt em", + "Ġsuper ior", + "d on", + "ere nt", + "à ¶", + "Ġtherap y", + "Ġpe ak", + "ĠMod el", + "Ġl ying", + "Ġg am", + "z er", + "r itten", + "Ġrespons es", + "Ġconsider ation", + "ĠB ible", + "Ġl oyal", + "Ġinst ant", + "Ġp m", + "ĠFore st", + "à ¼", + "Ġext end", + "Ġconv icted", + "Ġfound er", + "Ġconv in", + "ĠO ak", + "che ck", + "Ġsch olars", + "p ed", + "Ġover se", + "T op", + "c ount", + "ĠAr k", + " ·", + "Ġ0 6", + "ĠL A", + "m d", + "ĠLat in", + "im ental", + "ĠC PU", + "Ġsubst ance", + "Ġminor ity", + "Ġmanufact uring", + "E r", + "ocol ate", + "Ġatt ended", + "ĠMan ager", + "r ations", + "Ġappreci ate", + "om y", + "GB T", + "id ency", + "B L", + "Ġguarant ee", + "pos ition", + "Ġo cean", + "clud e", + "Ġhead ed", + "Ġt ape", + "Ġlo ose", + "Ġlog ic", + "Ġpro ven", + "Ġsp ir", + "Ġad mit", + "is a", + "Ġinvestig ate", + "Ġ199 4", + "sy lv", + "ĠL ost", + "c est", + "Ġ7 1", + "Ġrequest ed", + "Ġwind ows", + "ĠPok é", + "ĠWith out", + "M et", + "Ġbehavi our", + "Ġread er", + "Ġh ung", + "ĠKe ep", + "Ġro les", + "Ġimplement ed", + "Ġbl ank", + "Ġserv es", + "ĠJ ay", + "Ġc ited", + "ĠF riend", + "prof it", + "ap on", + "Ġrep air", + "it em", + "arr ass", + "Ġcrit ics", + "ad i", + "ĠF ather", + "Ġsh out", + "Ġf ool", + "Ġ8 8", + "Ġprodu cing", + "Ġl ib", + "Ġround s", + "Ġcirc le", + "Ġpre par", + "Ġsub mit", + "Ġn ic", + "mor row", + "ãĥ «", + "U nder", + "Ġv ital", + "ater n", + "Ġpass word", + "Ġpublic ation", + "Ġprom inent", + "Ġspeak s", + "Ġb ars", + "Ġde eper", + "ĠM ill", + "port ed", + "Ġw id", + "Ġbut ter", + "Ġsm oking", + "Ġindic ates", + "K ey", + "rop ri", + "ĠF ile", + "all ing", + "ast ing", + "ĠR us", + "Ġad j", + "Ġ7 9", + "av al", + "Ġpres um", + "bur gh", + "on ic", + "Ġf ur", + "Ġpoll s", + "ik a", + "Ġsecond ary", + "Ġmon ster", + "ig s", + "ĠCur rent", + "E vent", + "Ġowners hip", + "end ar", + "Ġarri ve", + "ĠT ax", + "Ġn ull", + "ĠPri v", + "Ġth ro", + "Ġk iss", + "c at", + "Ġup set", + "ang le", + "it ches", + "ect or", + "olog ists", + "ĠGal axy", + "Ġcor ruption", + "Ġh int", + "ent er", + "ĠH ospital", + "Ġgreat ly", + "Ġbeg un", + "es y", + "Ġso il", + "ĠAnt on", + "Ġmain tenance", + "ãĥ ©", + "Ġdo zens", + "Ġhuman ity", + "ĠAl abama", + "Ġr om", + "w orth", + "ap ing", + "sylv ania", + "l ah", + "Ġg athered", + "G A", + "Ġattack ing", + "f ound", + "ĠSqu are", + "Ġar bit", + "ict ions", + "ĠW isconsin", + "Ġd ance", + "ĠS aint", + "arch y", + "Ġbase ball", + "Ġcontribut ions", + "Ġliter ature", + "Ġex ha", + "per ty", + "t est", + "Ġb ab", + "Ġcontain er", + "let ter", + "Ġfall en", + "Ġwebs ites", + "Ġbott le", + "ĠS ac", + "Ġbre ast", + "ĠP L", + "Ġveter an", + "Ġinterview s", + "ĠA le", + "Ġb anned", + "eng ers", + "ĠRev olution", + "in th", + "Ġconc erning", + "IV E", + "Ġexp enses", + "ĠMatt hew", + "ĠColumb ia", + "d s", + "ist ance", + "Ġent ity", + ".. .\"", + "Ġrel iable", + "Ġpar alle", + "ĠChrist ians", + "Ġopin ions", + "Ġin du", + "l ow", + "Ġcompet e", + "Ġth orough", + "Ġemploy ed", + "Ġestablish ment", + "ig en", + "ĠC ro", + "Ġlawy ers", + "ĠSt ation", + "T E", + "ĠL ind", + "ĠP ur", + "it ary", + "Ġeffic iency", + "âĢ IJ", + "ĠL y", + "Ġm ask", + "Ġdis aster", + "Ġag es", + "ER E", + "es is", + "ĠH old", + "Ġcas ual", + "b led", + "Ġen abled", + "ĠEn vironment", + "ĠInt elligence", + "i per", + "ĠM ap", + "ĠB E", + "Ġemer ged", + "is dom", + "Ġc abin", + "Ġregist ration", + "Ġfing ers", + "Ġro ster", + "Ġfram ework", + "ĠDo ctor", + "et ts", + "Ġtransport ation", + "Ġaware ness", + "H er", + "Ġattempt ing", + "O ff", + "ĠSt ore", + "ÃĥÃĤÃĥÃĤ ÃĥÃĤÃĥÃĤ", + "ĠK now", + "Ġdef ence", + "Ġsc an", + "ĠT en", + "ĠCh air", + "ĠP H", + "ĠAtl anta", + "Ġfuck ing", + "Ġans wered", + "b n", + "ĠK ar", + "Ġcateg ories", + "Ġr ational", + "Ġc ust", + "Ġrob ot", + "Ġcorrect ly", + "Ġg if", + "Ġgraph ics", + "m ic", + "Ġground s", + "ĠO pp", + "i ate", + "Ġdist ributed", + "Ġsan ctions", + "Ġchalleng ing", + "ut o", + "Ġingred ients", + "Ġinv ited", + "Ġfound ed", + "ĠRe qu", + "d ed", + "Ġb owl", + "Ġbrother s", + "ĠH a", + "I O", + "Ġw ages", + "im ore", + "oc ial", + "Ġse ed", + "ative ly", + "Ġaddress es", + "ĠI owa", + "ab eth", + "Ġatt itude", + "is d", + "ch ild", + "Ġm ole", + "Ġdisco very", + "y ard", + "B r", + "Ġ8 2", + "Ġsuppl ies", + "ell ing", + "Ġdist ingu", + "C R", + "Ġre cept", + "Ġ vert", + "Ġsw im", + "b ec", + "d oor", + "ĠY eah", + "Ġg al", + "Ġinter act", + "ĠE SP", + "ĠC S", + "amp s", + "Ġconvin ced", + "Ġobject ive", + "Ġdis h", + "ĠPhot os", + "l ad", + "Ġdownt own", + "o il", + "in ction", + "Ġto morrow", + "ĠC OM", + "Ġsurv ival", + "sh ot", + "Ġsett lement", + "C ons", + "ĠX box", + "int erest", + "ĠS M", + "arg o", + "en ess", + "Ġeth nic", + "b ered", + "M in", + "ĠT ok", + "Ġinc ent", + "ĠComm and", + "Ġmain tained", + "Ġbreak s", + "br idge", + "at ar", + "ag g", + "ĠF inally", + "un icip", + "ĠO nt", + "le ft", + "Ġrecogn ition", + "Ġ* /", + "ĠP ers", + "Ġwe lf", + "Ġaddress ed", + "ĠK ansas", + "Ġvir us", + "Ġwhere as", + "Ġp apers", + "ram s", + "ĠMin istry", + "Ġple asure", + "Ġacqu ired", + "Ġd uration", + "j pg", + "Ġcal m", + "ĠN HL", + "Ġburn ing", + "Ġfold er", + "ick ed", + "ĠP y", + "ĠIll inois", + "Cl ass", + "ĠGodd ess", + "Ġperform ing", + "Ġwelf are", + "j ar", + "In ter", + "Ġl in", + "Ġenh ance", + "Ġnot ion", + "f are", + "yp es", + "ĠAre a", + "Ġcann abis", + "ĠDie go", + "f s", + "ĠM anchester", + "com m", + "in ite", + "Ġcover ing", + "ĠS ound", + "Ġ19 60", + "Ġ8 4", + "e lect", + "z ing", + "Ġcitiz en", + "Ġph ones", + "Ġr aid", + "Ġign ored", + "ĠOb ject", + "Ġu pload", + "c ard", + "Ġmod ified", + "Ġroom s", + "ia h", + "r ange", + "he ast", + "ach us", + "Ġsuggest ing", + "âĢ ĭ", + "gr ade", + "E l", + "Ġclot hing", + "Ġr h", + "ĠH an", + "un ity", + "en cing", + "ĠAust in", + "sec ution", + "t ra", + "d em", + "ĠQ ual", + "Ġhe aven", + "Ġst ages", + "Ġw edd", + "pl us", + "ific ial", + "ĠIm m", + "ĠH o", + "iet ies", + "Ġphr ase", + "Ġbr ill", + "act ory", + "Ġprov iders", + "Ġsil ence", + "Ġa er", + "ĠA I", + "ĠAd venture", + "Ġplatform s", + "Ġdemonstr ated", + "Ġinter f", + "ing ton", + "Ġr aces", + "Ġgr ade", + "ult ane", + "ĠTh rough", + "f alse", + "Ġb ow", + "ĠA B", + "Ġfl avor", + "Ġhistor ic", + "g ov", + "Ġcol our", + "Ġview ed", + "ĠEm ail", + "el come", + "Ġinter vention", + "Ġd iversity", + "Ġperiod s", + "Ġre verse", + "ĠV ery", + "Ġqu ote", + "ĠLe ft", + "th rough", + "Ġsc rew", + "Ġland ing", + "Ġp ill", + "Ġw et", + "Ġprot esters", + "Ġrepe at", + "av ed", + "er k", + "Ġsal ary", + "ĠPenn sylvania", + "St ill", + "Ġmay or", + "Ġkit chen", + "Ġfeat uring", + "ĠM useum", + "ĠT ournament", + "ĠF al", + "Ġser vers", + "U C", + "Ġany body", + "im g", + "ĠTr ade", + "ixt ure", + "the less", + "Ġfin ance", + "Ġcl osing", + "ĠPat ri", + "i ac", + "ab el", + "Ġ> >", + "or ous", + "Ġf irms", + "sc reen", + "un a", + "Ġemb arrass", + "ul se", + "Ġlet ting", + "Ġth rew", + "ile y", + "Ġch annels", + "l an", + "ĠVeg as", + "Ġse ar", + "Ġfant astic", + "ar re", + "uzz le", + "ĠD er", + "Th ose", + "Ġsw ing", + "Ġshe et", + "ind ex", + "co ver", + "og an", + "Ġvari ables", + "ĠTe ch", + "Ġsp oken", + "ac hel", + "ĠD a", + "ĠMount ain", + "Ġload ed", + "Ġfoot age", + "vers ion", + "Ġun l", + "ĠPh oenix", + "Ġthrow ing", + "Ġf iring", + "Ġtrack ing", + "Ġw idth", + "Ġstrugg ling", + "ro oms", + "ot ion", + "Ġmonth ly", + "ĠSer ver", + "Ġegg s", + "op en", + "M C", + "Ġ199 3", + "Ġh ired", + "Ġstay ed", + "ĠAll en", + "Ġst ro", + "Ġ9 8", + "st ep", + "ĠTurk ish", + "Ġfab ric", + "ist ing", + "ĠD om", + "Ġd ates", + "Ġpr on", + "Ġbasket ball", + "Ġl ucky", + "ĠArab ia", + "Ġassum ed", + "est y", + "Ġaff airs", + "Ġgl ad", + "ĠInd eed", + "ĠF A", + "ĠW ord", + "Ġjo ining", + "if ice", + "p read", + "ir ts", + "ĠSe lect", + "Ġpop ulations", + "aw are", + "Ġn ose", + "Ġcompl aints", + "st art", + "Ġsc oring", + "Th anks", + "Ġmin ing", + "Ġvisit ors", + "S H", + "Ġdam aged", + "Ġcharacter istics", + "ĠP ent", + "D C", + "Ġ8 3", + "ĠS ix", + "r ates", + "Ġfl ags", + "ĠB rew", + "d og", + "M ark", + "// //", + "Ġexec ution", + "Ġj oke", + "ph ones", + "Ġtestim ony", + "Ġob st", + "Q L", + "ĠC ut", + "Ġstud ied", + "ĠN intendo", + "ick et", + "ĠN BC", + "Ġl ad", + "ĠB ra", + "ĠM oh", + "Ġk ernel", + "Ġoverwhel ming", + "Ġag ed", + "Ġapplic able", + "ĠC ond", + "Ġroad s", + "ĠBl ock", + "m ade", + "od ge", + "Ġcomm ands", + "Ġoff ices", + "vel and", + "Ġt ut", + "Ġrece iver", + "ĠF ro", + "Ġsho pping", + "Ġi P", + "ĠSt re", + "ĠA BC", + "Ġentertain ment", + "ĠB ow", + "ort ed", + "M c", + "Ġread s", + "gr ad", + "ĠCol lect", + "Ġâ ĪĴ", + "ĠCap ital", + "eder ation", + "Ġemploy er", + "Ġinvolve ment", + "Ġanx iety", + "al ia", + "Ġro of", + "ĠAm ong", + "ĠDemocr at", + "Ġstat s", + "ĠV ill", + "Ġconst itutional", + "Ġrefer ring", + "itt y", + "Ġtack le", + "out ube", + "Ġback ed", + "ĠH ong", + "ĠBro ad", + "Ġe le", + "ĠO tt", + "Ġ199 2", + "h our", + "achus etts", + "C al", + "Ġdefe ated", + "Ġ8 1", + "es p", + "Ġseem ingly", + "w as", + "ĠJ enn", + "ĠK urd", + "Ġg ene", + "Ġdisc ount", + "R et", + "EC T", + "( );", + "Ġclub s", + "Ġs id", + "ĠM arsh", + "Che ck", + "Ġp p", + "ĠE ag", + "ides pread", + "Ġbe ings", + "F T", + "Ġintrodu ction", + "ĠCh ange", + "AR D", + "Ġ1 10", + "ad ows", + "ier ce", + "Ġme al", + "a uthor", + "ĠB ang", + "lah oma", + "Ġr anks", + "201 1", + "?? ??", + "m ax", + "Ġcoll apse", + "Ġop ens", + "Ġe cho", + "Ġs oph", + "Ġrac ist", + "Ġenorm ous", + "Ġw aves", + "Ġt ap", + "Ġcomprehens ive", + ". --", + "ĠR oy", + "Ġfarm ers", + "Rel ated", + "a ired", + "ron es", + "ĠC rim", + "Ġproport ion", + "Ġdesign s", + "Ġnegoti ations", + "Ġvirt ually", + "ĠBat man", + "Ġwar n", + "Ġlegit imate", + "m ate", + "Ġcon vention", + ", ,", + "net ic", + "ĠS D", + "Ġconsist ently", + "Ġcompens ation", + "Ġpunish ment", + "Ġy e", + "Ġt ie", + "ĠB ureau", + "ir lf", + "ĠB u", + "ĠA ren", + "ĠPh ilipp", + "Ġkn ife", + "Ġmem ories", + "ĠR oss", + "Ġang le", + "Ġ8 6", + "ĠTh under", + "Ġre nd", + "ĠT our", + "Ġcount s", + "s ung", + "ĠIm p", + "Ġeduc ational", + "Ġaccess ible", + "C OM", + "Ġd rew", + "y er", + "G l", + "am ine", + "OR T", + "O B", + "I B", + "m aster", + "Ġtri als", + "og y", + "h ar", + "ĠTr ust", + "Ġprefer red", + "irlf riend", + "ĠN ev", + "Ġb in", + "Ġc ow", + "P age", + "Ġsign ature", + "ĠB L", + "7 00", + "Ġret ired", + "Ġby tes", + "Ġneigh b", + "ĠLeg end", + "Ġdev ast", + "Ġsuspect ed", + "is ons", + "ĠPoké mon", + "sc ale", + "Ġcap abilities", + "Ġre vel", + "Ġche ese", + "d y", + "igr ant", + "Ġfail ing", + "b its", + "ĠHer oes", + "ĠG host", + "ĠS cient", + "Ġappoint ed", + "ur i", + "Ġinst itution", + "Ġexpand ed", + "g reg", + "Ġmonitor ing", + "Ġp odcast", + "Ġcoal ition", + "Ġ9 6", + "J o", + "Ġst olen", + "ĠS ab", + "Ġstop s", + "Ġhol iday", + "Ġint r", + "C ar", + "Bl ack", + "ĠL GBT", + "Ġwar ming", + "ĠAnd erson", + "Ġ8 9", + "Ġprodu cer", + "M ed", + "Ġaccur acy", + "ĠMar vel", + "iz abeth", + "ĠPat rick", + "m ony", + "Ġmin i", + "ac les", + "Ġover t", + "the y", + "Ġmembers hip", + "ĠV en", + "Ġex ch", + "Ġrem oval", + "ĠD ave", + "T Y", + "m ad", + "ĠF ind", + "Ġad equ", + "Ġe c", + "Ġte eth", + "Ġemot ion", + "Ġper m", + "Ġsole ly", + "d b", + "Ġextra ord", + "IG HT", + "c al", + "Ġgu idelines", + "Ġd ying", + "Ġsusp ended", + "ĠPrem ier", + "ĠAnth ony", + "el ve", + "Ġd ad", + "ĠE th", + "ĠFoot ball", + "Ġabandon ed", + "Ġ< <", + "Ġm arch", + "Ġhor ror", + "âĢ¦ \"", + "Ġchild hood", + "Ġcampaign s", + "Ġl unch", + "ĠAl bert", + "bl ock", + "âĸĪ âĸĪ", + "ound ing", + "Ġb one", + "or gan", + "ad ers", + "ĠFl ash", + "ĠDri ve", + "Ġton ight", + "Ġw ars", + "ĠF L", + "Ġform ation", + "con st", + "New s", + "Ġcom pe", + "or ious", + "ĠSt aff", + "Ġdiscuss ions", + "ĠProt ection", + "ĠJ am", + "Ġcrit eria", + "Ġinstall ation", + "Ġaccompl ish", + "iz za", + "Ġpub lisher", + "Ġresc ue", + "ĠT ry", + "U LL", + "ĠS om", + "ĠH op", + "ore t", + "th s", + "ord on", + "Ġp ocket", + "ĠIn v", + "Down load", + "ĠCr ime", + "Ġb ene", + "ĠGu ide", + "ĠAs sembly", + "Ġparam eters", + "I E", + "ĠAlex ander", + "Ġconc ert", + "ĠSc he", + "Ġsh oes", + "Ġvis iting", + "Ġrec all", + "Ġb ub", + "Ġr ural", + "Ġconc rete", + "ĠR os", + "N ext", + "R uss", + "Ġlo ans", + "ĠSh ield", + "Ġtre m", + "hem at", + "k g", + "ĠHar ris", + "is ition", + "ĠM ove", + "ĠF C", + "Ġf ate", + "ĠCh o", + "Ġt ired", + "Ġprinc ipal", + "h ist", + "ien ces", + "ath y", + "Ġse vent", + "Ġm ood", + "Ġstrateg ic", + "Ġdise ases", + "Ġfor um", + "Ġtem por", + "Ġhead quarters", + "P ar", + "ig e", + "fl ix", + "Ġgu itar", + "Ġ9 4", + "On ly", + "Ġrele ases", + "ro ph", + "================ ================", + "Ġ6 00", + "ĠContin ue", + "ig ate", + "ĠC rit", + "sy stem", + "Ġdis abled", + "Ġunex pected", + "ith ub", + "Ġuncle ar", + "ĠE st", + "Ġcontr ad", + "Ġstrateg ies", + "vent ures", + "Ġpass age", + "AM E", + "Ġimpro ving", + "Ġreve als", + "Ġdecre ase", + "ov a", + "Ġann oy", + "ĠSh ort", + "ĠL ibrary", + "Ġcy ber", + "n ell", + "ĠH ur", + "ĠC B", + "Ġphot ograp", + "U I", + "Ġs ed", + "G e", + "Ġ8 7", + "Ġd iverse", + "Ġencour aged", + "Ġcons piracy", + "Ġbird s", + "Ġoper ator", + "Ġhand ful", + "Ġclass ified", + "? )", + "Ġdram atic", + "Ġinvestig ators", + "it o", + "Ġw idespread", + "ĠR oom", + "-------------------------------- --------------------------------", + "Ġcollect ive", + "Ġjournal ist", + "St ring", + "Ġtemper atures", + "il a", + "Ġgu id", + "Ġins pect", + "Ġmiss ile", + "ĠMay or", + "Ġman ual", + "Ġsim ultane", + "Ġrat ings", + "Ġsu ck", + "Ġ9 7", + "Ġunivers al", + "Ġph arm", + "Ġdis rupt", + "ian o", + "A V", + "Ġf t", + "Ġstat ist", + "old s", + "ĠWalk er", + "ph p", + "Ġunder t", + "ĠL as", + "ish op", + "nt il", + "res hold", + "ĠWhe ther", + "M s", + "Ġden y", + "ĠCl oud", + "Ġprov ider", + "Ġsurv iv", + "ĠUp date", + "h as", + "Ġmist akes", + "ch arge", + "pl ed", + "r ity", + "Ġn ode", + "ĠMass achusetts", + "ool s", + "lic ation", + "Ġf ails", + "em ale", + "or i", + "back s", + "Ġsh irt", + "Ġ' '", + "ĠN AT", + "Ġwat ers", + "els on", + "Ġe ase", + "Ġsc ar", + "Ġcont ents", + "m ind", + "Ġcont ribution", + "Ġsh r", + "Ġhand ed", + "Ġst ability", + "Ġtra ve", + "E m", + "Ġmir ror", + "12 3", + "Ġwe igh", + "Ġf iction", + "ou ver", + "ist ant", + "r ition", + "ĠF ed", + "Ġphys ically", + "Ġst ake", + "ĠArt icle", + "ĠAr c", + "ĠLew is", + "ĠM ind", + "Ġdemonstr ate", + "Ġprof its", + "v ision", + "om ic", + "ol id", + "Ġbatt les", + "Ġdri ves", + "Ġeas tern", + "ĠS ony", + "!! !", + "ar ation", + "v ard", + "ĠG L", + "port ation", + "Ġ9 2", + "Ġlaw makers", + "Ġprotect ing", + "ĠE PA", + "Ġy eah", + "Ġsh ame", + "ol ph", + "e ven", + "x it", + "Ġatt ach", + "Ġrepresent ing", + "Ġob s", + "ĠUt ah", + "iff s", + "ĠFre edom", + "à ³", + "A K", + "Ġinc idents", + "it age", + "Ġview ers", + "c d", + "Ġm ouse", + "Ġcl ar", + "Ġaccord ance", + "Ġb ot", + "c or", + "ĠSum mer", + "he ld", + "Ġinnoc ent", + "Ġiniti ative", + "ol s", + "________________ ________________", + "Ġsp ots", + "p ace", + "Ġconvent ional", + "Ġcorpor ations", + "Ġblock ed", + "H D", + "at tered", + "Ġref ers", + "Ġbu ck", + "ĠDig ital", + "12 0", + "Ġtop ics", + "T F", + "Ä ģ", + "br id", + "re ement", + "Ġunder lying", + "ĠM ember", + "Ġinvestig ating", + "Ġpregn ancy", + "Ġtouch down", + "ĠB and", + "ĠCall er", + "Ġinst ances", + "P P", + "w a", + "G ood", + "Ġ199 1", + "ĠC old", + "Ġfear s", + "Ġrem arks", + "Ĩ Ĵ", + "at al", + "Ġm it", + "Ġexper iments", + "i pt", + "Col or", + "ind u", + "Up date", + "Ġ9 3", + "A g", + "Ġ å", + "anc ouver", + "B oth", + "Ġjud ges", + "Ob ject", + "Ġst ere", + "umb n", + "Ġparticip ation", + "ĠSt ars", + "ĠJ ere", + "Ġweek ly", + "ĠB an", + "Ġconvers ations", + "ĠP itt", + "u z", + "ĠIndian a", + "ĠK ick", + "Ġinf ection", + "Ġhero es", + "Ġsett led", + "Ġstri p", + "Ġh al", + "Ġd ump", + "ĠS ci", + "Ġl es", + "Ġref erences", + "ĠU RL", + "ĠBr idge", + "Ġwant ing", + "For ce", + "Ġex clus", + "Me anwhile", + "m n", + "Ġg entle", + "m aker", + "sen al", + "ĠG ro", + "ou ri", + "ĠR ain", + "ĠAll iance", + "Ġl ift", + "el a", + "S D", + "ĠCle veland", + "Ġrank ed", + "Ġst adium", + "Ġdead ly", + "ä ¸", + "Ġr iding", + "ar ia", + "ĠAr mor", + "Ġdocument ation", + "ĠGree ce", + "ree k", + "Ġl ens", + "ĠS a", + "Ġg ross", + "ĠE mer", + "ag ers", + "ĠD ub", + "ĠR h", + "ĠAM D", + "Ġarri val", + "Ġdes ert", + "Ġsupp lement", + "ĠRes p", + "Ġkn ee", + "Ġmarg in", + "f ont", + "og g", + "201 0", + "ĠP ir", + "ĠP rom", + "iv als", + "Ġint ake", + "Ġdifferent ly", + "ug s", + "Ġb its", + "clud ed", + "Ġsearch ing", + "ĠD u", + "um ble", + "Ġfunction al", + "ĠBalt imore", + "ĠC ould", + "Ġdes ired", + "Ġcirc uit", + "ĠL yn", + "ĠG O", + "ĠF alse", + "re pre", + "' :", + "alt ies", + "Ġmin im", + "Ġdro ve", + "ĠSh ould", + "Ġh ip", + "Ġpro s", + "Ġut ility", + "ĠN ature", + "ĠM ode", + "P resident", + "o pp", + "r at", + "form ance", + "Ġconcent ration", + "Ġf ont", + "ĠB ud", + "Ġam id", + "Ġre vers", + "ĠM L", + "B ar", + "Ġinter action", + "Ġjur isd", + "Ġspell s", + "d ep", + "f il", + "Ġcivil ians", + "ut ter", + "ĠCo oper", + "ĠBel ow", + "Ġent rance", + "Ġcon vert", + "Ġcontrovers y", + "ow ered", + "Ġcontr ary", + "Ġar c", + "ĠExec utive", + "ĠOffic er", + "Ġpack ages", + "Ġprog ressive", + "w idth", + "Ġreserv ed", + "v ol", + "ĠSam sung", + "Ġprint ed", + "Ġcent ers", + "Ġintrodu ce", + "ĠKenn edy", + "Ġodd s", + "Ġsure ly", + "Ġindepend ence", + "Ġpass engers", + "repre ne", + "ĠBe h", + "Ġl oves", + "ĠESP N", + "Ġfac ilit", + "Ġident ical", + "Ġdo ct", + "Ġpartners hip", + "con f", + "ĠH ide", + "Ġconf used", + "ĠC ow", + "M en", + "Ġw rest", + "ĠIraq i", + "Ġh oles", + "ĠStud ies", + "Ġpregn ant", + "h ard", + "Ġsign als", + "I X", + "Ġpull ing", + "Ġgrad uate", + "Ġnomine e", + "D ate", + "Ġper mitted", + "Ġâ Ĥ¬", + "ĠOk lahoma", + "St art", + "Ġauthor ized", + "Ġal arm", + "ĠC os", + "v an", + "Ġgener ations", + "c ular", + "Ġdr agon", + "ĠSoft ware", + "ĠEd ward", + "Ġcontro ller", + "S en", + "ge red", + "ĠV ik", + "Ġappro ached", + "Th ank", + "Ġcan ce", + "Ġform ula", + "ĠSm all", + "Ġweak ness", + "Ġr amp", + "it udes", + "j ud", + "Ġbrill iant", + "Ġacc us", + "s ource", + "Ġ8 00", + "ĠE vil", + "S w", + "Ġhom eless", + "we ek", + "i ens", + "r ics", + "ĠTh ird", + "T O", + "Ġorgan ic", + "Ġpresent ation", + "ag h", + "ĠDown load", + "v ation", + "Ġas sembly", + "or able", + "hold ers", + "ĠBern ie", + "ĠHel p", + "Ġt ong", + "ĠF ight", + "Ġbe ach", + "B ook", + "ĠL ic", + "Ġr ush", + "ĠR ound", + "ou p", + "ĠMar x", + "Ġcalcul ated", + "ĠDe vil", + "ĠSar ah", + "Ġoccasion ally", + "Ġbul let", + "Av ailable", + "g ate", + "Ġ9 1", + "Ġh osp", + "Ġprom ises", + "ĠH IV", + "ĠSt adium", + "ĠSt ock", + "ĠCorpor ation", + "g age", + "N G", + "ĠC redit", + "Ġs ne", + "ib l", + "Ġacc um", + "s uch", + "Ġterror ists", + "Ġconscious ness", + "ĠZ h", + "Ġdram a", + "ool a", + "pir ation", + "Ġlab our", + "ĠN in", + "Ġut ter", + "Ġdemocr atic", + "Ġass ass", + "il ation", + "Ġg est", + "Ġab road", + "Ġmet ab", + "Ġs orts", + "Ġfl av", + "U B", + "Ġm g", + "ĠNot hing", + "ĠO d", + "Ġmus ical", + "200 9", + "Ġdro ps", + "oc ated", + "ater al", + "0000 00", + "Ġg re", + "Ġequ ality", + "Ġburd en", + "Ġv ig", + "ĠLe ader", + "-------- ----", + "Ġcere mony", + "Ġf ighter", + "Ġact ors", + "Ġ æ", + "am an", + "F i", + "Ġal ign", + "put er", + "Ġe lder", + "ĠN SA", + "Ġrepresent ation", + "ĠOnt ario", + "IT H", + "usal em", + "Ġharass ment", + "itz er", + "Ġsy mp", + "Ġbox es", + "ĠD R", + "Ġman ifest", + "at re", + "Ġ ^", + "Ġd ies", + "le ton", + "Ġmiss ions", + "et he", + "Ġres olve", + "Ġfollow ers", + "Ġas c", + "Ġk m", + "l ord", + "am med", + "Ġsil ent", + "ĠAssoci ated", + "Ġtim ing", + "Ġprison ers", + "ĠK ings", + "ĠF ive", + "Ġtow er", + "Ġappro aches", + "Ġprecise ly", + "Ġb ureau", + "ĠM other", + "ĠI ss", + "Ġkey board", + "it ual", + "Ġfund ed", + "Ġstay ing", + "Ġpsych ological", + "Ġm ile", + "ĠLe on", + "ĠBar b", + "w ill", + "Ġw ider", + "ĠAtl antic", + "Ġt ill", + "ĠR ome", + "ro t", + "Ġaccomp an", + "Ġfl our", + "ac o", + "W orld", + "ĠExp ress", + "ĠY u", + "C or", + "Ġple ased", + "part y", + "Ġpoint ing", + "Ġinf lation", + "Ġro y", + "Ġ ),", + "ain er", + "Ġwedd ing", + "orm on", + "Ġrequ iring", + "Ġqual ified", + "Ġse gment", + "EN D", + "Ġs izes", + "e als", + "Ġcor rupt", + "ass ador", + "Ġcele b", + "Ġdream s", + "ĠM ess", + "Ġcheck ing", + "ĠV ersion", + "Ġprep aring", + "Ġact ively", + "ĠD iff", + "Ġl ux", + "ĠW inter", + "act eria", + "ĠN E", + "Ġdep uty", + "Ġtrans gender", + "Ġsum mary", + "Ġin her", + "er ies", + "ch ar", + "ĠY an", + "Ġkn ock", + "ĠP ath", + "Ġl ip", + "roll er", + "Ġimp ression", + "Ġcelebr ate", + "Ġsl ide", + "Ġgu ests", + "Ġcl ip", + "F S", + "Ġsav ings", + "Ġcapt ain", + "Ġleg acy", + "ĠDen ver", + "Ġw ounded", + "tab oola", + "AC T", + "Ġpurs ue", + "Ġo xy", + "Ġ q", + "Ġsem i", + "ĠN eed", + "ĠAff airs", + "Ġob sc", + "Ġcheck ed", + "Ġd ual", + "C ode", + "ĠM D", + "le m", + "ult y", + "Ġ ©", + "ĠEl izabeth", + "Ġcent uries", + "ard ed", + "s rc", + "Ġev ident", + "enn is", + "at in", + "Ġunemploy ment", + "ĠMar io", + "Ġint im", + "Ch rist", + "Ġbi ological", + "Ġsold ier", + "ĠAdd ed", + "Ġm ath", + "ĠG il", + "Ġbi as", + "Ġd ating", + "ĠO cean", + "Ġm ice", + "M us", + "h ire", + "ĠT es", + "Ser ver", + "lim ited", + "S ize", + "Ġmet ers", + "Ġrock et", + "es see", + "Ġcertific ate", + "ĠIran ian", + "AS S", + "Ġgr id", + "D ec", + "Ġro lling", + "com mun", + "ĠSwed en", + "b ury", + "Ġtiss ue", + "Ġrac ism", + "ĠL ocal", + "Ġmyster y", + "Ġexam ine", + "Ġst em", + "Ġs its", + "Ġhop ed", + "ot ing", + "Ġdial ogue", + "Ġpers u", + "W atch", + "l ay", + "M AN", + "Ġch ronic", + "ĠPort land", + "mark et", + "ĠS EC", + "Ġparalle l", + "Ġsc andal", + "Ġcar ries", + "Ġphenomen on", + "h uman", + "ack er", + "ĠO x", + "Ġretire ment", + "tain ment", + "ov ie", + "ĠG ear", + "Ġd uties", + "Ġdo se", + "Ġsc roll", + "M B", + "in f", + "Ġsa uce", + "Ġland scape", + "red dit", + "ĠChampions hip", + "ĠRed dit", + "al id", + "Ġco in", + "Ġover s", + "Ġpost ing", + "ab out", + "Ġf el", + "and y", + "Ġb old", + "Ġfocus ing", + "e ffect", + "G R", + "Ġde emed", + "Ġrecommend ations", + "Ġste pped", + "Ġvot er", + "ĠDe ep", + "ĠInst agram", + "Ġmoder ate", + "ĠMary land", + "Ġrestrict ed", + "ĠM B", + "ĠCh all", + "Ġto b", + "Ġc ir", + "ĠO cc", + "ĠE ver", + "Ġcoll aps", + "IN FO", + "= -", + "ĠP ict", + "ĠAcc ount", + "n c", + "Ġo ught", + "Ġex port", + "Ġdr unk", + "( '", + "Ġw ise", + "ĠM ort", + "ne cess", + "Ġan cest", + "ĠInc re", + "Ġfrequ ent", + "m ir", + "Ġinterpret ation", + "Ġdepend ent", + "Ġco ins", + "ĠB ol", + "V ideo", + "ĠJust in", + "Ġfat al", + "Ġcook ing", + "Ġconf usion", + "ip her", + "Ġcust ody", + "ĠMor gan", + "om ach", + "ĠGovern or", + "Ġrestaur ants", + "el ing", + "Ġacknowled ged", + "Ġthe r", + "Ġgen es", + "ch ing", + "He y", + "Ġtact ics", + "ĠMex ican", + "Ġv end", + "Ġhe s", + "qu er", + "Ġnot ing", + "ĠCamer on", + "Ġtarget ing", + "ro ck", + "Ġcred its", + "Ġemot ions", + "Ġrepresent atives", + "new s", + "Ġlegisl ative", + "Ġrem oving", + "Ġtweet ed", + "ĠCar ter", + "ĠF ixed", + "Ġfor cing", + "Ġspeak er", + "Ġm ales", + "ĠViet nam", + "l ined", + "Ġconcept s", + "Ġvo ices", + "o ir", + "ĠT rib", + "W he", + "ĠJer usalem", + "ĠS ant", + "Ġc ul", + "Ġl ady", + "ĠHaw ai", + "Ġar ts", + "ĠIn n", + "ĠMach ine", + "ĠEm peror", + "Ġsl ot", + "g ly", + "ĠPro cess", + "II I", + "Ġathlet es", + "ĠTem ple", + "ĠRep resent", + "Ġpres c", + "Ġt ons", + "Ġgold en", + "Ġp unch", + "ĠG R", + "iver pool", + "Ġen act", + "Ġlob by", + "Ġm os", + "Ġpick ing", + "Ġlif etime", + "Ġcogn itive", + "E ach", + "z o", + "Ġd ub", + "Ġcons ists", + "ol n", + "Ġf estival", + "am ous", + "Ġint ellig", + "w ords", + "ĠSm art", + "Ġde le", + "Ġl apt", + "Ġmag ical", + "ĠS in", + "b us", + "ur ities", + "igh th", + "ĠRub y", + "ĠS ure", + "ol ving", + "Ġj un", + "O ST", + "Ġimp osed", + "Ġast ron", + "Ġcor rel", + "ĠN S", + "ĠK it", + "ĠF uture", + "b urn", + "Ġimm une", + "oc us", + "Ġcour ses", + "ĠSt ring", + "Ġle an", + "Ġg host", + "Ġout comes", + "Ġexp ense", + "Ġevery day", + "Ġaccept able", + "A h", + "Ġequ ipped", + "Ġor ange", + "F R", + "ĠD utch", + "Th ough", + "ĠR ank", + "Q U", + "ĠRober ts", + "wh at", + "re nd", + "Ġdisapp ear", + "Ġsp awn", + "ĠL am", + "o is", + "Ġdes erve", + "Ġmin imal", + "Ġnerv ous", + "ĠW ould", + "Ġro ok", + "ĠV ancouver", + "Ġres ign", + "sh ire", + "ĠW orks", + "ĠB uild", + "Ġafford able", + "ĠG ary", + "ĠAren a", + "Ġh anging", + "Ġimpl ications", + "ĠS ong", + "Ġmain taining", + "Ġgu ards", + "C ON", + "Ġder ived", + "Ġexecut ed", + "Ġthe ories", + "Ġqu oted", + "ĠAnd re", + "og a", + "sel ess", + "in fo", + "ĠBel g", + "Ġt ears", + "ĠSur v", + "Ġbirth day", + "ig ious", + "im mer", + "Ġspect rum", + "Ġarchitect ure", + "Ġrec ruit", + "arm a", + "T able", + "Ġmon sters", + "ĠG ov", + "Ġdest ination", + "Ġattract ive", + "Ġf oss", + "ĠMore over", + "Ġpres ents", + "TH E", + "Ġrep ly", + "pt on", + "Ġc um", + "Ġdel ight", + "Ġaffect s", + "Ġdon ations", + "ĠT oy", + "ĠH im", + "M ENT", + "Ġover come", + "it ched", + "ĠFant asy", + "ĠH at", + "ĠBe ast", + "b ott", + "Ġinvestig ations", + "R un", + "Ġhun ting", + "d i", + "f und", + "Ġs essions", + "est yle", + "Ġport ray", + "oid s", + "Y eah", + "Ġcommun icate", + "Ġcom edy", + "ĠY ang", + "Ġbel t", + "ĠMar ine", + "Ġpredict ed", + "Pl ay", + "Ġimportant ly", + "Ġremark able", + "Ġelim inate", + "D avid", + "Ġb ind", + "V ID", + "Ġadvoc ates", + "ĠG aza", + "im p", + "D B", + "ĠN a", + "ĠSim ilar", + "I ES", + "Ġchar ity", + "v as", + "m ath", + "Ġâ ĸ", + "ok er", + "nd um", + "Ġcap s", + "ĠH al", + "2 000", + "e an", + "Ġfle et", + "Ġrec re", + "R ight", + "Ġsleep ing", + "ij ing", + "k ind", + "Ġdesign ated", + "à ¤", + "Ġanim ation", + "ke e", + "ĠInt rodu", + "Ġ/ >", + "Ġdelay ed", + "Ġtrem end", + "Ġcur ious", + "U se", + "Ġle ct", + "d am", + "Ġinnov ation", + "ĠPoint s", + "Ġload ing", + "Ġdisp ute", + "ct ic", + "ird s", + "ĠB Y", + "Ġn urs", + "ĠVal ue", + "ION S", + "ĠH um", + "Ġtem plate", + "m ers", + "Ġappear ances", + "ĠEnter tainment", + "Ġtransl ation", + "Ġsa ke", + "Ġbene ath", + "Ġin hib", + "Ġe uro", + "abet es", + "Ġstud ying", + "ĠM as", + "Ġper ceived", + "Ġexam ined", + "Ġe ager", + "Ġco aches", + "Ġim per", + "ch i", + "Ġprodu ces", + "\" ).", + "ĠEvery one", + "Ġm unicip", + "Ġg irlfriend", + "Ġh ire", + "ĠV ice", + "Ġsu itable", + "op y", + "Ġin equ", + "ĠD uke", + "f ish", + "f irst", + "ĠO bs", + "Ġinter ior", + "ĠBru ce", + "ĠR y", + "Ġanal ys", + "Ġconsider able", + "Ġfore cast", + "Ġf ert", + "ors hip", + "ĠD rug", + "ĠA LL", + ": \"", + "th ur", + "ĠM ail", + "Ġball ot", + "Ġinst antly", + "ĠCh annel", + "Ġp icks", + "Ġ198 9", + "Ġt ent", + "ol i", + "Ġcivil ian", + "b ling", + "ell o", + "b u", + "Ġin ch", + "Ġlog o", + "Ġcooper ation", + "Ġwal ks", + "Ġinvest ments", + "Ġimp rison", + "ĠF estival", + "ĠK y", + "Ġleg ally", + "Ġg ri", + "ch arg", + "S l", + "Ġthreat ening", + "du ction", + "fl ow", + "Ġdismiss ed", + "ibr aries", + "c ap", + "e le", + "ĠMc G", + "ĠHar vard", + "ĠConserv ative", + "ĠC BS", + "p ng", + "Ġro ots", + "ĠH aving", + "umb led", + "ĠF un", + "\\ /", + "ĠS earch", + "ple x", + "Ġdiscuss ing", + "Ġcontin u", + "ĠT ai", + "ĠW ik", + "F ree", + "f it", + "Ġref use", + "Ġmanag ing", + "Ġsy nd", + "ip edia", + "w alk", + "Ġprofession als", + "Ġguid ance", + "Ġunivers ities", + "Ġas semb", + "unt u", + "F inally", + "AS E", + "ĠAut o", + "ĠH ad", + "Ġann iversary", + "L D", + "ĠD ur", + "ĠUlt imate", + "ih ad", + "pro duct", + "Ġtrans it", + "Ġrest ore", + "Ġexpl aining", + "Ġass et", + "Ġtransfer red", + "Ġbur st", + "ap olis", + "ĠMag azine", + "ĠC ra", + "ĠB R", + "gg ed", + "ĠH E", + "M ich", + "b et", + "ĠL ady", + "yl um", + "erv es", + "Ġme ets", + "wh ite", + "L og", + "Ġcorrespond ing", + "Ġins isted", + "G G", + "Ġsurround ed", + "Ġt ens", + "Ġl ane", + "Ġco inc", + "h ome", + "Ġexist ed", + "ect ed", + "ĠDou ble", + "lam m", + "Ġske pt", + "ex p", + "Ġper ception", + "ie v", + "ĠBe ing", + "o ft", + "Ġadop t", + ". :", + "] ;", + "Wind ows", + "Ġsatell ite", + "AS H", + "Ġinf ant", + "d escription", + "ĠMe anwhile", + "c m", + "oc a", + "ĠT reat", + "act or", + "Ġtob acco", + "ĠN orm", + "em ption", + "Ġfl esh", + "Ġj e", + "o op", + "ĠHe aven", + "Ġbe ating", + "an im", + "Ġgather ing", + "Ġcult iv", + "G O", + "ab e", + "ĠJon athan", + "ĠSaf ety", + "Ġbad ly", + "pro t", + "Ġcho osing", + "Ġcontact ed", + "Ġqu it", + "Ġdist ur", + "Ġst ir", + "Ġto ken", + "D et", + "ĠP a", + "Ġfunction ality", + "00 3", + "s ome", + "Ġlimit ations", + "Ġmet h", + "b uild", + "con fig", + "N T", + "re ll", + "ble m", + "ĠM om", + "Ġveter ans", + "ĠH u", + "Ġtrend s", + "are r", + "ĠG iven", + "ĠCa ption", + "m ay", + "AS T", + "Ġwond ering", + "ĠCl ark", + "n ormal", + "Ġsepar ated", + "Ġdes p", + "st ic", + "b rew", + "Ġrel ating", + "ĠN ik", + "ĠF arm", + "Ġenthus i", + "g ood", + "d eb", + "Ġactiv ist", + "Ġm art", + "Ġexplos ion", + "ĠEconom ic", + "L ink", + "Ġins ight", + "Ġconven ient", + "Ġcounter part", + "su pport", + "ĠV irt", + "ag en", + "ĠTenn essee", + "ĠSim on", + "ĠA ward", + "OC K", + "ĠF igure", + "Ġoverse as", + "Ġpr ide", + "ĠC as", + "n ote", + "m g", + "C urrent", + "Ġdispl ays", + "cont ent", + "Ġtravel ing", + "Ġhosp itals", + "ĠFin ancial", + "ĠP ast", + "Ġdefend ant", + "Ġstream ing", + "m ble", + "ĠBer lin", + "uk i", + "Ġdist ribut", + "Ġant ib", + "Ġch ocolate", + "ĠCast le", + "Ġinter rupt", + "ĠR ow", + "Ġconvers ion", + "Ġbug s", + "ĠR ather", + "li est", + "L Y", + "ĠJe an", + "com mon", + "ak h", + "Ġ1 30", + "ot ton", + "ĠDe an", + "Ġam endment", + "Ġgame play", + "ĠWar ren", + "od a", + "Ġhigh lights", + "Ġir re", + "ĠNAT O", + "Ġball s", + "Ġdemand ing", + "U RE", + "ĠL uke", + "F igure", + "st op", + "on ia", + "z one", + "iz ers", + "ĠW R", + "Ġaward ed", + "Ġregul atory", + "ĠH art", + "ĠS N", + "pl ing", + "Ġs our", + "ĠP ixel", + "us ive", + "Ġf et", + "ĠS ent", + "Ġautom atic", + "Ġf er", + "vern ment", + "ĠKh an", + "T ON", + "f ather", + "Ġextraord inary", + "th rop", + "ĠP ython", + "ĠG PU", + "Ġsex ually", + "Ġdesk top", + "it ivity", + "ĠAnton io", + "Ġo rient", + "Ġe ars", + "ob by", + "ous es", + "vertis ements", + "Ġmanufacture rs", + "ic ient", + "min ute", + "Ġconv iction", + "Ġg arden", + "p ublic", + "Ġsatisf ied", + "f old", + "O K", + "Ġin hab", + "ĠTh ink", + "Ġprogram me", + "Ġst omach", + "Ġcoord in", + "Ġh oly", + "Ġth reshold", + "Ġr het", + "Ġser ial", + "Ġemploy ers", + "ĠEvery thing", + "ra h", + "Ġb other", + "Ġbr ands", + "Val ue", + "ĠT ed", + "ĠPlan et", + "Ġp ink", + "ĠFurther more", + "s a", + "P E", + "re ck", + "ĠUS D", + "ot te", + "Ġ& &", + "Ġland ed", + "g ets", + "Ġprodu cers", + "Ġhealth care", + "Ġdomin ant", + "Ġdest ro", + "Ġam ended", + "ch ron", + "Ġf its", + "ĠSy d", + "ĠAuthor ity", + "AT CH", + "Ġfight s", + "ĠL LC", + "Ġ-- -", + "ĠCor p", + "Ġtox ic", + "spe cific", + "ĠC orn", + "ĠChe l", + "Ġtele phone", + "ĠP ant", + "Ġmyster ious", + "aun ch", + "od ox", + "med ia", + "Ġwitness es", + "ag u", + "Ġquestion ed", + "ĠBre xit", + "ĠRem ember", + "ene z", + "Ġend orse", + "iat ric", + "ĠId ent", + "Ġridic ulous", + "1 10", + "Ġpr ayer", + "Ġscient ist", + "Ġ19 50", + "ĠA qu", + "Ġunder ground", + "ĠU FC", + "m are", + "ĠL ater", + "w ich", + "Ġsubsc rib", + "Ġhost s", + "Ġer r", + "Ġgr ants", + "ant om", + "Ġsum mon", + "ear ly", + "ĠC lear", + "ĠPr im", + "Ġsusp ension", + "Ġguarant eed", + "app er", + "Ġr ice", + "ĠSe an", + "ĠSh in", + "Ġrefere ndum", + "Ġfl ed", + "r ust", + "Ġ3 60", + "ter y", + "Ġsh ocked", + "B R", + "ĠO il", + "ĠAll ah", + "Ġpart ly", + "Ġign or", + "Ġtrans mission", + "Ġhom osexual", + "ivers al", + "Ġhop efully", + "ãĤ ¤", + "Ġless on", + "L eg", + "Ġ ..", + "Y et", + "t able", + "app ropri", + "re tt", + "Ġbo ards", + "Ġincor rect", + "Ġb acteria", + "ar u", + "am ac", + "Ġsn ap", + ".' \"", + "Ġpar ad", + "t em", + "he art", + "Ġav ailability", + "Ġw isdom", + "Ġ( +", + "Ġpri est", + "ĠÂł ĠÂł", + "O pen", + "Ġsp an", + "Ġparam eter", + "Ġconv ince", + "Ġ( %)", + "r ac", + "Ġf o", + "Ġsafe ly", + "Ġconver ted", + "ĠOlymp ic", + "Ġres erve", + "Ġhe aling", + "ĠM ine", + "M ax", + "Ġin herent", + "ĠGra ham", + "Ġinteg rated", + "D em", + "Ġpip eline", + "Ġapp lying", + "Ġem bed", + "ĠCharl ie", + "Ġc ave", + "200 8", + "Ġcons ensus", + "Ġre wards", + "P al", + "ĠHT ML", + "Ġpopular ity", + "look ing", + "ĠSw ord", + "ĠAr ts", + "' )", + "Ġelect ron", + "clus ions", + "Ġinteg rity", + "Ġexclus ively", + "Ġgr ace", + "Ġtort ure", + "Ġburn ed", + "tw o", + "Ġ18 0", + "P rodu", + "Ġent reprene", + "raph ics", + "Ġg ym", + "ric ane", + "ĠT am", + "Ġadministr ative", + "Ġmanufacture r", + "Ġ vel", + "ĠN i", + "Ġisol ated", + "ĠMedic ine", + "Ġback up", + "Ġpromot ing", + "Ġcommand er", + "Ġfle e", + "ĠRus sell", + "Ġforg otten", + "ĠMiss ouri", + "Ġres idence", + "m ons", + "Ġrese mb", + "Ġw and", + "Ġmeaning ful", + "P T", + "Ġb ol", + "Ġhe lic", + "Ġwealth y", + "Ġr ifle", + "str ong", + "row ing", + "pl an", + "as ury", + "âĢ¦ .", + "Ġexpand ing", + "ĠHam ilton", + "Ġrece ives", + "S I", + "eat ures", + "ĠAn im", + "RE E", + "P ut", + "Ġbrief ly", + "ri ve", + "Ġstim ul", + "Ġ`` (", + "Ġ __", + "Ġch ip", + "Ġha z", + "Ġpri ze", + "ĠTh ings", + "AC E", + "ul in", + "d ict", + "ok u", + "Ġassoci ate", + "ock ets", + "y outube", + "St ory", + "ateg ory", + "Ġm ild", + "ail ing", + "ĠY e", + "O rig", + "ĠK a", + "or ig", + "Ġpropag anda", + "Ġan onymous", + "Ġstrugg led", + "Ġout rage", + "AT ED", + "ĠBe ijing", + "r ary", + "Ġle ather", + "Ġworld s", + "Ġbroad er", + "12 5", + "id al", + "ĠBet ter", + "Ġt ear", + "E xt", + "Ġpropos als", + "Ġit er", + "ĠSqu ad", + "Ġvol unt", + "m i", + "D id", + "ĠP u", + "p in", + "Ġspeak ers", + "Ġb orders", + "Ġfig ured", + "= '", + "Ġsimultane ously", + "aed a", + "Ġcharg ing", + "Ġur ged", + "Ġcon j", + "25 6", + "ĠG ordon", + "mer ce", + "Ġdocument ary", + "Sh are", + "it ol", + "ON E", + "ĠG arden", + "h att", + "ĠThom pson", + "ane ous", + "ap ore", + "Ġt anks", + "Ġless ons", + "tr ack", + "Ġout standing", + "Ġvolunte ers", + "Ġsp ray", + "Ġmanag ers", + "l arge", + "Ġcamp s", + "Ġart ificial", + "ĠR u", + "Ġb ags", + "th al", + "Ġcompat ible", + "ĠBl ade", + "Ġf ed", + "Ġarg ues", + "F I", + "Ġunf air", + "Ġcor n", + "Ġoff set", + "Ġdirect ions", + "Ġdisappoint ed", + "ĠCon vention", + "Ġview ing", + "M E", + "oc ity", + "Ġtown s", + "Ġlay ers", + "Ġro lled", + "Ġjump ed", + "Ġatt ribute", + "Ġun necess", + "inc oln", + "Ġsupp ose", + "ĠNet her", + "ch a", + "Ġbur ied", + "Ġsix th", + "B en", + "ress ing", + "OU R", + "Ġw ound", + "Ġcy cl", + "Ġmechan isms", + "Ġcongress ional", + "ĠE lement", + "Ġagre ements", + "Ġdec or", + "Ġclos est", + "ĠM it", + "Go ogle", + "} }", + "Ġm ixture", + "Ġflu id", + "S ign", + "ĠSch olar", + "Ġp ist", + "ask et", + "ab ling", + "Ġrac ing", + "he ro", + "ri el", + "ass y", + "Ġche aper", + "b en", + "Ġvert ical", + "amac are", + "ĠRead ing", + "g ments", + "Ġhelic op", + "Ġsacr ifice", + "ay a", + "p aren", + "V A", + "ĠL es", + "ĠStud io", + "Ġviol ations", + "ĠAn na", + "ac er", + "é ¾", + "ĠR at", + "ĠBe ck", + "ĠD ick", + "ĠA CT", + "Ġcomp osition", + "Ġtext ure", + "ĠO wn", + "Ġsmart phone", + "ĠN A", + "Ġfor b", + "im port", + "Ġdef ending", + "il st", + "re r", + "Ġo h", + "ĠJere my", + "Ġbank ing", + "cept ions", + "Ġrespect ive", + "/ .", + "Ġdr inks", + "ĠW i", + "Ġb ands", + "ĠL iverpool", + "Ġg rip", + "ĠB uy", + "Ġopen ly", + "Ġreview ed", + "per t", + "Ġver ify", + "ĠCo le", + "ĠW ales", + "M O", + "Ġun pre", + "Ġshel ter", + "ĠIm perial", + "Ġgu i", + "ĠD ak", + "Ġsuggest ions", + "Ġexplicit ly", + "Ġsl ave", + "Ġblock chain", + "Ġcompet ing", + "Ġprom ising", + "S ON", + "Ġsoc cer", + "Ġconst itution", + "4 29", + "Ġdist ract", + "ĠU ser", + "es ides", + "ĠMet hod", + "ĠTok yo", + "Ġaccompan ied", + "Cl ient", + "s ur", + "al og", + "Ġident ification", + "Ġinv asion", + "as ma", + "Ġindust ries", + "pp ers", + "Ġsub tle", + "ĠUn it", + "n atural", + "Ġsurv ived", + "Ġfl aw", + "ĺ ħ", + "ĠH oll", + "Ġdef icit", + "Ġtut orial", + "ĠCh ance", + "Ġarg uing", + "Ġcontem porary", + "Ġinteg ration", + "for ward", + "Ġt um", + "it is", + "Ġh iding", + "ĠD omin", + "ĠT an", + "ĠB uilding", + "ĠV in", + "Ġspokes person", + "ĠNot es", + "Ġemer ging", + "Ġprepar ation", + "Ġpro st", + "Ġsuspect s", + "Ġaut onom", + "D escription", + "Ġdeal t", + "ĠP ear", + "Ġstead y", + "Ġdecre ased", + "Ġso vere", + "ĠCl in", + "Ġgrad ually", + "ors es", + "ĠW AR", + "S erv", + "ãĤ ¢", + "h r", + "Ġd irty", + "ĠB arn", + "ĠB C", + "Ġd il", + "Ġcal endar", + "Ġcompl iance", + "Ġch amber", + "b b", + "Ġpass enger", + "ate ful", + "ĠT itle", + "ĠSyd ney", + "ĠG ot", + "Ġdark ness", + "Ġdef ect", + "Ġpack ed", + "ass ion", + "Ġgod s", + "Ġh arsh", + "IC K", + "le ans", + "Ġalgorith m", + "Ġoxy gen", + "Ġvis its", + "Ġbl ade", + "Ġkil omet", + "ĠKent ucky", + "Ġkill er", + "P ack", + "enn y", + "Ġdiv ine", + "Ġnom ination", + "be ing", + "Ġeng ines", + "Ġc ats", + "Ġbuff er", + "ĠPh ill", + "Ġtra ff", + "AG E", + "Ġtong ue", + "Ġrad iation", + "ere r", + "m em", + "ĠExpl icit", + "é¾ į", + "Ġcou ples", + "Ġphys ics", + "ĠMc K", + "Ġpolit ically", + "aw ks", + "ĠBl oom", + "Ġwor ship", + "e ger", + "ut er", + "ĠF O", + "Ġmat hemat", + "Ġsent enced", + "Ġdis k", + "ĠM arg", + "Ġ/ *", + "P I", + "Ġoption al", + "Ġbab ies", + "Ġse eds", + "ĠScott ish", + "Ġth y", + "] ]", + "ĠHit ler", + "P H", + "ng th", + "Ġrec overed", + "ing e", + "Ġpow der", + "Ġl ips", + "Ġdesign er", + "Ġdis orders", + "Ġcour age", + "Ġch aos", + "\" },{\"", + "Ġcar rier", + "b ably", + "H igh", + "ĠR T", + "es ity", + "l en", + "Ġrout es", + "u ating", + "F il", + "N OT", + "w all", + "s burgh", + "Ġeng aging", + "ĠJava Script", + "ore r", + "li hood", + "Ġun ions", + "ĠF ederation", + "ĠTes la", + "Ġcomple tion", + "ĠT a", + "Ġprivile ge", + "ĠOr ange", + "Ġne ur", + "paren cy", + "Ġb ones", + "Ġtit led", + "Ġprosecut ors", + "ĠM E", + "Ġengine er", + "ĠUn iverse", + "ĠH ig", + "n ie", + "o ard", + "Ġheart s", + "ĠG re", + "uss ion", + "Ġmin istry", + "Ġpen et", + "ĠN ut", + "ĠO w", + "ĠX P", + "in stein", + "Ġbul k", + "S ystem", + "ic ism", + "ĠMarket able", + "Ġpre val", + "Ġpost er", + "Ġatt ending", + "ur able", + "Ġlicens ed", + "ĠG h", + "et ry", + "ĠTrad able", + "Ġbl ast", + "à ¤", + "ĠTit an", + "ell ed", + "d ie", + "H ave", + "ĠFl ame", + "Ġprof ound", + "Ġparticip ating", + "Ġan ime", + "ĠE ss", + "Ġspec ify", + "Ġregard ed", + "ĠSpe ll", + "Ġs ons", + "own ed", + "Ġm erc", + "Ġexper imental", + "land o", + "h s", + "ĠDun geon", + "in os", + "Ġcomp ly", + "ĠSystem s", + "ar th", + "Ġse ized", + "l ocal", + "ĠGirl s", + "ud o", + "on ed", + "ĠF le", + "Ġconstruct ed", + "Ġhost ed", + "Ġsc ared", + "act ic", + "ĠIs lands", + "ĠM ORE", + "Ġbl ess", + "Ġblock ing", + "Ġch ips", + "Ġev ac", + "P s", + "Ġcorpor ation", + "Ġo x", + "Ġlight ing", + "Ġneighb ors", + "ĠU b", + "ar o", + "Ġbe ef", + "ĠU ber", + "F acebook", + "ar med", + "it ate", + "ĠR ating", + "ĠQu ick", + "Ġoccup ied", + "Ġaim s", + "ĠAdd itionally", + "ĠInt erest", + "Ġdram atically", + "Ġhe al", + "Ġpain ting", + "Ġengine ers", + "M M", + "ĠM ust", + "Ġquant ity", + "P aul", + "Ġearn ings", + "ĠPost s", + "st ra", + "ãĥ¼ ãĥ", + "Ġst ance", + "Ġdro pping", + "sc ript", + "Ġd ressed", + "M ake", + "Ġjust ify", + "ĠL td", + "Ġprompt ed", + "Ġscr ut", + "Ġspeed s", + "ĠGi ants", + "om er", + "ĠEd itor", + "Ġdescrib ing", + "ĠL ie", + "ment ed", + "Ġnow here", + "oc aly", + "Ġinst ruction", + "fort able", + "Ġent ities", + "Ġc m", + "ĠN atural", + "Ġinqu iry", + "Ġpress ed", + "iz ont", + "for ced", + "Ġra ises", + "ĠNet flix", + "ĠS ide", + "Ġout er", + "Ġamong st", + "im s", + "ows ki", + "Ġclim b", + "ne ver", + "Ġcomb ine", + "d ing", + "Ġcomp r", + "Ġsignific ance", + "Ġremem bered", + "ĠNev ada", + "ĠT el", + "ĠSc ar", + "ĠWar riors", + "ĠJ ane", + "Ġcou p", + "b as", + "Ġtermin al", + ", -", + "O H", + "Ġt ension", + "Ġw ings", + "ĠMy ster", + "�� ��", + "ĠUn like", + "val id", + "viron ments", + "ĠAl i", + "Ġn aked", + "book s", + "ĠM un", + "ĠG ulf", + "Ġd ensity", + "Ġdim in", + "Ġdesper ate", + "Ġpres idency", + "Ġ198 6", + "h y", + "IN D", + "Ġun lock", + "im ens", + "Ġhand led", + "ĠE b", + "Ġdisapp eared", + "Ġgen re", + "Ġ198 8", + "Ġdetermin ation", + "St ream", + "ik o", + "ap ters", + "Ġacknow ledge", + "J an", + "Ġcapital ism", + "P at", + "Ġ20 20", + "Ġpain ful", + "Ġcur ve", + "Ġbom bs", + "st orm", + "ĠMet al", + "en cer", + "ĠF ig", + "ĠA aron", + "anc hes", + "Ġins piration", + "Ġexha ust", + "t ains", + "ash i", + "Ġdesc ript", + "Ġr itual", + "ĠChel sea", + "Ġpromot ion", + "ĠH ung", + "ĠW ard", + "iv a", + "ĠE T", + "Ġto ss", + "all ow", + "ĠFranc is", + "D ep", + "Ġhapp iness", + "ĠGl ass", + "Ġbet a", + "Ġstreng then", + "N E", + "o a", + "Ġbutt ons", + "ĠMur ray", + "Ġkick ed", + "Qu est", + "ĠT alk", + "ĠS everal", + "ĠZ ero", + "Ġdr one", + "ul k", + "Ġc am", + "ĠM obile", + "Ġprevent ing", + "Ġret ro", + "ĠA x", + "Ġcru el", + "Ġflo at", + ". ),", + "Ġfil ing", + "ĠGr ant", + "ĠB or", + "Ġr ib", + "Ġchampions hip", + "ĠM erc", + "Ġsty les", + "Ġc ake", + "Ġbuild s", + "ĠS elf", + "io x", + "Ġep ic", + "oy d", + "B el", + "ĠSt ew", + ". (", + "ah u", + "ĠBe yond", + "Ġout s", + "Ġsol o", + "ĠT ree", + "Ġpres erve", + "Ġt ub", + "AR E", + "ro c", + "ĠIm pro", + "ĠW right", + "Ġbu nd", + "Ġtr aged", + "Ġoccas ional", + "b ian", + "Sec ond", + "r ons", + "Ġinter actions", + "form ed", + "s ing", + "Ġown s", + "Ġh ockey", + "Gener al", + "Ġlog ical", + "Ġexp end", + "Ġesc al", + "ĠGr iff", + "ĠC rown", + "ĠRes erve", + "Ġsto pping", + "Ġexc use", + "sec ond", + "Ġoper ated", + "Ġre aches", + "ĠMal ays", + "Ġpoll ution", + "ĠBrook lyn", + "Ġde lete", + "Ġhas h", + "Bl ock", + "ah a", + "âĢ ³", + "Ġsh orter", + "p iece", + "> >>", + "ĠM ormon", + "t or", + "Ġpartic les", + "ĠB art", + "ry ption", + "Ġad min", + "Ġsqu ee", + "VID IA", + "Ġcreat or", + "iam eter", + "ic ular", + "N BC", + "Ġgrab bed", + "Ġn odd", + "Ġr ated", + "Ġrot ation", + "Ġgr asp", + "Ġexcess ive", + "ĠE C", + "ĠWh it", + "Ġinvent ory", + "ault s", + "ĠF B", + "Ġe cosystem", + "Ġbill ions", + "Ġvent ure", + "n amed", + "Ġdef ender", + "out e", + "Inst ead", + "ir able", + "W ar", + "Ġassum ption", + "Ġb ite", + "Ġearth qu", + "t ail", + "sp ace", + "Ġgif ts", + "boy s", + "Ġinev itable", + "Ġstruct ural", + "Ġbenef icial", + "Ġcompe lling", + "h ole", + "erv ation", + "Ġco at", + "o j", + "inc arn", + "ĠY ears", + "Ġdetermin ing", + "Ġrhet oric", + "Ġbound aries", + "Ġwh ites", + "A nt", + "add y", + ") -", + "ra ham", + "eter min", + "Ġhar vest", + "ĠCon c", + "Ġlapt op", + "ĠM atch", + "Ġenjoy ing", + "cc a", + "oll ar", + "Ġtri ps", + "Ġadd iction", + "ĠS ak", + "Ġpow ered", + "Ġc ous", + "ĠRuss ians", + "ie re", + "Ġret rie", + "qu ality", + "Ġdiff er", + "Ġking dom", + "ĠL aur", + "ĠCap itol", + "Ġcon clusions", + "ĠAl tern", + "ĠN av", + "Ġtrans parent", + "B ER", + "G roup", + "ĠCom plete", + "Ġinf er", + "Ġint rig", + "Ġins ane", + "R O", + "oph ob", + "is en", + "qu al", + "Mich ael", + "Ġm useum", + "ĠP ope", + "Ġres et", + "r ative", + "f ive", + "Ġagg reg", + "itte es", + "osit ory", + "Ġcar b", + "ĠRec ord", + "Ġdec ides", + "ĠF ix", + "Ġexcept ions", + "ĠCommission er", + "un s", + "ĠEnvironment al", + "Ġlegend ary", + "ist ence", + "Ġtun nel", + "k m", + "Ġins ult", + "Ġt roll", + "Ġsh ake", + "Ġdet ention", + "qu es", + "ĠCh rome", + "ĠF iles", + "Ġsub t", + "Ġprospect s", + "Ġpro l", + "re nder", + "pro of", + "Ġperform ances", + "St r", + "Ġh ref", + "ern ame", + "Ġachieve ment", + "Ġf ut", + "F ull", + "ĠLe ban", + "go ogle", + "ãĥ Ī", + "amp a", + "May be", + "Ġproject ed", + "ĠE mb", + "Ġcol leg", + "Ġa wards", + "Ġâ Ķ", + "G old", + "ĠBl ake", + "ĠR aj", + "if ting", + "Ġp ending", + "Ġinst inct", + "Ġdevelop ments", + "Con nect", + "ĠM and", + "ĠW ITH", + "ĠPhilipp ines", + "prof ile", + "Ġalt ogether", + "ĠB und", + "ĠT D", + "oo oo", + "amp ed", + "ip h", + "Ġste am", + "Ġold est", + "Ġdet ection", + "ul pt", + "Ġ ç", + "ĠWay ne", + "200 6", + "f a", + "Ġcir cles", + "ĠF u", + "Ġdon ors", + "appropri ate", + "ĠDak ota", + "j amin", + "Ġmotiv ated", + "Ġpurch ases", + "ĠLouis iana", + "ĠS pl", + "Ġgl obe", + "Ġ10 5", + "z ip", + "c all", + "Ġdepart ments", + "Ġsustain able", + "10 5", + "ĠO P", + "if iers", + "Ġprevent ed", + "Ġinc omp", + "ĠComm ander", + "Ġdom inated", + "Ġ »", + "Ġinvest ed", + "Ġcomplex ity", + "Ġin cl", + "Ġens uring", + "Ġreal m", + "yn c", + "ĠInd ependent", + "r ained", + "ĠJ en", + "ĠFl ight", + "Ġat he", + "Ġspec ulation", + "ĠT E", + "oc ate", + "t ic", + "Ġpl aint", + "her ry", + "Ġto y", + "Ġ1 11", + "Ġpl ates", + "st atus", + "ĠIs a", + "Ġdev oted", + "C op", + "ĠE S", + "25 5", + "ur rency", + "M ain", + "Ġsl aves", + "Ġpe pper", + "Ġqu otes", + "Ġce iling", + "ĠF ish", + "Ġtrans formation", + "Ġfra ction", + "Ġadvant ages", + "Ġto ile", + "Ġstun ning", + "Ġmo ist", + "bre aking", + "s i", + "ĠL ocation", + "ĠMed ium", + "Ġtext s", + "Ġu gly", + "Ġb io", + ". âĢĶ", + "ĠB ased", + "Ġtr ains", + "ĠW ing", + "ĠAn cient", + "ĠRec ords", + "ĠH ope", + "Spe cial", + "ades h", + "ob i", + "[ /", + "Ġtempor arily", + "V er", + "h u", + "os er", + "Ġover night", + "Ġm amm", + "ĠTre asury", + "ĠV enezuel", + "ĠMeg a", + "Ġt ar", + "Ġexpect s", + "bl ack", + "or ph", + "\\\\ \\\\", + "Ġaccept ance", + "Ġrad ar", + "s is", + "Ġjun ior", + "Ġfram es", + "Ġobserv ation", + "ac ies", + "P ower", + "ĠAdv anced", + "M ag", + "olog ically", + "ĠMe chan", + "Ġsent ences", + "Ġanaly sts", + "augh ters", + "force ment", + "Ġv ague", + "Ġcl ause", + "Ġdirect ors", + "Ġeval uate", + "Ġcabin et", + "M att", + "ĠClass ic", + "A ng", + "Ġcl er", + "ĠB uck", + "Ġresear cher", + "Ġ16 0", + "Ġpoor ly", + "Ġexperien cing", + "ĠP ed", + "ĠMan hattan", + "Ġfre ed", + "Ġthem es", + "ad vant", + "Ġn in", + "Ġpra ise", + "10 4", + "ĠLib ya", + "b est", + "Ġtrust ed", + "Ġce ase", + "Ġd ign", + "D irect", + "Ġbomb ing", + "Ġm igration", + "ĠSci ences", + "Ġmunicip al", + "ĠA verage", + "Ġgl ory", + "Ġreve aling", + "Ġare na", + "Ġuncertain ty", + "Ġbattle field", + "ia o", + "G od", + "Ġc inem", + "ra pe", + "el le", + "ap ons", + "Ġlist ing", + "Ġwa ited", + "Ġsp otted", + "ke ley", + "ĠAud io", + "e or", + "ard ing", + "idd ing", + "ig ma", + "ĠN eg", + "Ġl one", + "Ġ ----", + "ex e", + "d eg", + "Ġtrans f", + "Ġwas h", + "Ġsl avery", + "Ġexpl oring", + "ĠW W", + "ats on", + "Ġen cl", + "l ies", + "ĠC reek", + "Ġwood en", + "Man ager", + "ĠBr and", + "um my", + "ĠAr thur", + "Ġbureau cr", + "Ġbl end", + "ar ians", + "F urther", + "Ġsupposed ly", + "Ġwind s", + "Ġ19 79", + "Ġgrav ity", + "Ġanalys es", + "ĠTra vel", + "ĠV eter", + "Ġd umb", + "Ġaltern ate", + "g al", + "Ġconsum ed", + "Ġeffect iveness", + ".' '", + "Ġpath s", + "ond a", + "L A", + "ĠStr ong", + "Ġen ables", + "Ġesc aped", + "Ġ\" \"", + "Ġ1 12", + "Ġ198 3", + "Ġsm iled", + "Ġtend ency", + "F ire", + "Ġp ars", + "ĠR oc", + "Ġl ake", + "Ġf itness", + "ĠA th", + "ĠH orn", + "Ġh ier", + "Ġimp ose", + "m other", + "Ġp ension", + "ic ut", + "bor ne", + "ic iary", + ". _", + "ĠS U", + "Ġpol ar", + "is y", + "eng u", + "itial ized", + "AT A", + "w rite", + "Ġexerc ises", + "ĠD iamond", + "ot ypes", + "Ġharm ful", + "on z", + "Ġprint ing", + "st ory", + "Ġexpert ise", + "ĠG er", + "Ġtraged y", + "ĠF ly", + "Ġd ivid", + "amp ire", + "st ock", + "M em", + "Ġre ign", + "Ġun ve", + "Ġam end", + "ĠProp het", + "Ġmut ual", + "ĠF ac", + "Ġrepl acing", + "H ar", + "ĠCirc uit", + "Ġthro at", + "ĠSh ot", + "Ġbatter ies", + "Ġto ll", + "Ġaddress ing", + "ĠMedic aid", + "Ġp upp", + "ĠN ar", + "ol k", + "Ġequ ity", + "M R", + "ĠHis pan", + "ĠL arge", + "m id", + "D ev", + "Ġexp ed", + "Ġdem o", + "ĠMarsh all", + "erg us", + "Ġf iber", + "Ġdiv orce", + "ĠCre ate", + "Ġsl ower", + "ĠPark er", + "ĠStud ent", + "ĠTr aining", + "Ret urn", + "ĠT ru", + "Ġc ub", + "ĠRe ached", + "Ġpan ic", + "Ġqu arters", + "Ġre ct", + "Ġtreat ing", + "Ġr ats", + "ĠChristian ity", + "ol er", + "Ġsac red", + "Ġdecl are", + "ul ative", + "et ing", + "Ġdeliver ing", + "est one", + "Ġt el", + "ĠL arry", + "Ġmet a", + "ac cept", + "art z", + "ĠRog er", + "hand ed", + "Ġhead er", + "Ġtra pped", + "ĠCent ury", + "Ġkn ocked", + "ĠOx ford", + "Ġsurviv ors", + "b ot", + "Ġdemon stration", + "Ġd irt", + "Ġass ists", + "OM E", + "ĠD raft", + "ortun ate", + "fol io", + "pe red", + "ust ers", + "g t", + "ĠL ock", + "Ġjud icial", + "ver ted", + "Ġsec ured", + "out ing", + "ĠBook s", + "Ġhost ing", + "Ġlif ted", + "l ength", + "Ġj er", + "Ġwhe els", + "ĠR ange", + "umbn ails", + "Ġdiagn osis", + "te ch", + "ĠStew art", + "ĠP ract", + "Ġnation wide", + "Ġde ar", + "Ġoblig ations", + "Ġgrow s", + "Ġmand atory", + "Ġsusp icious", + "! '", + "A pr", + "G reat", + "Ġmort gage", + "Ġprosecut or", + "Ġeditor ial", + "ĠK r", + "Ġprocess ed", + "ung le", + "Ġflex ibility", + "Ear lier", + "ĠC art", + "ĠS ug", + "Ġfoc uses", + "Ġstart up", + "Ġbre ach", + "ĠT ob", + "cy cle", + "ãĢ Į", + "ro se", + "Ġb izarre", + "ãĢ į", + "Ġveget ables", + "$ $", + "Ġret reat", + "osh i", + "ĠSh op", + "ĠG round", + "ĠSt op", + "ĠHawai i", + "ĠA y", + "Per haps", + "ĠBe aut", + "uff er", + "enn a", + "Ġproduct ivity", + "F ixed", + "cont rol", + "Ġabs ent", + "ĠCamp aign", + "G reen", + "Ġident ifying", + "Ġreg ret", + "Ġpromot ed", + "ĠSe ven", + "Ġer u", + "ne ath", + "aug hed", + "ĠP in", + "ĠL iving", + "C ost", + "om atic", + "me ga", + "ĠN ig", + "oc y", + "Ġin box", + "Ġem pire", + "Ġhor izont", + "Ġbr anches", + "Ġmet aph", + "Act ive", + "ed i", + "ĠFil m", + "ĠS omething", + "Ġmod s", + "inc ial", + "ĠOrig inal", + "G en", + "Ġspir its", + "Ġear ning", + "H ist", + "Ġr iders", + "Ġsacr ific", + "M T", + "ĠV A", + "ĠS alt", + "Ġoccup ation", + "ĠM i", + "Ġdis g", + "lic t", + "Ġn it", + "Ġn odes", + "e em", + "ĠP ier", + "Ġhat red", + "ps y", + "ãĥ ī", + "Ġthe ater", + "Ġsophistic ated", + "Ġdef ended", + "Ġbes ides", + "Ġthorough ly", + "ĠMedic are", + "Ġbl amed", + "arent ly", + "Ġcry ing", + "F OR", + "pri v", + "Ġsing ing", + "ĠI l", + "Ġc ute", + "o ided", + "olit ical", + "ĠNe uro", + "å ¤", + "Ġdon ation", + "ĠEag les", + "ĠG ive", + "T om", + "Ġsubstant ially", + "ĠLic ense", + "ĠJ a", + "Ġg rey", + "ĠAn imal", + "ĠE R", + "ĠU nd", + "Ġke en", + "Ġconclud e", + "ĠMississ ippi", + "Eng ine", + "ĠStud ios", + "P ress", + "o vers", + "ll ers", + "Ġ3 50", + "ĠR angers", + "Ġr ou", + "ert o", + "E p", + "iss a", + "iv an", + "Ġse al", + "ĠReg ist", + "dis play", + "Ġwe aken", + "u um", + "ĠComm ons", + "ĠS ay", + "Ġcult ures", + "Ġl aughed", + "Ġsl ip", + "Ġtreat ments", + "iz able", + "m art", + "ĠR ice", + "Ġbe ast", + "Ġob esity", + "ĠLa ure", + "ig a", + "Wh ich", + "hold er", + "Ġelder ly", + "Ġp ays", + "Ġcompl ained", + "Ġc rop", + "Ġpro c", + "Ġexplos ive", + "ĠF an", + "ĠAr senal", + "A uthor", + "ef ul", + "Ġme als", + "Ġ( -", + "id ays", + "Ġimag ination", + "Ġann ually", + "Ġm s", + "as ures", + "H ead", + "ik h", + "m atic", + "Ġboy friend", + "ĠCom puter", + "Ġb ump", + "Ġsur ge", + "ĠCra ig", + "ĠKir k", + "D el", + "medi ate", + "Ġscen arios", + "ĠM ut", + "ĠSt ream", + "Ġcompet itors", + "Ù Ħ", + "ĠStan ford", + "ĠRes ources", + "az ed", + "b age", + "Ġorgan is", + "ĠRe lease", + "Ġsepar ately", + "Ġha bits", + "Ġmeasure ments", + "ĠCl ose", + "Ġaccomp any", + "Ġg ly", + "Ġt ang", + "ĠR ou", + "Ġplug in", + "Ġcon vey", + "ĠChall enge", + "oot s", + "j an", + "Ġcur s", + "ĠRel ations", + "ke eper", + "Ġapproach ing", + "p ing", + "Spe aking", + "Ġarrang ement", + "ĠV I", + "are ttes", + "Ġaffect ing", + "Ġperm its", + "b ecause", + "Ġu seless", + "ĠH us", + "!! !!", + "Ġdestro ying", + "Un fortunately", + "Ġfasc inating", + "S em", + "Ġelect oral", + "Ġtrans parency", + "ĠCh aos", + "Ġvolunte er", + "Ġstatist ical", + "Ġactiv ated", + "ro x", + "We b", + "H E", + "ĠHamp shire", + "is ive", + "M ap", + "Ġtr ash", + "ĠLaw rence", + "st ick", + "C r", + "Ġr ings", + "EX T", + "Ġoper ational", + "op es", + "D oes", + "ĠEv ans", + "Ġwitness ed", + "P ort", + "Ġlaunch ing", + "ec onom", + "w ear", + "ĠPart icip", + "um m", + "cul es", + "ĠR AM", + "ĠT un", + "Ġass ured", + "Ġb inary", + "Ġbet ray", + "Ġexpl oration", + "ĠF el", + "Ġad mission", + "it ated", + "S y", + "Ġav oided", + "ĠSim ulator", + "Ġcelebr ated", + "ĠElect ric", + "¥ ŀ", + "Ġcl uster", + "itzer land", + "he alth", + "L ine", + "ĠN ash", + "at on", + "Ġsp are", + "Ġenter prise", + "ĠD IS", + "clud es", + "Ġfl ights", + "Ġreg ards", + "Ġà Ĺ", + "h alf", + "Ġtr ucks", + "Ġcontact s", + "Ġunc ons", + "ĠCl imate", + "Ġimm ense", + "N EW", + "oc c", + "ect ive", + "Ġemb od", + "Ġpat rol", + "Ġbes ide", + "Ġv iable", + "Ġcre ep", + "Ġtrig gered", + "ver ning", + "Ġcompar able", + "q l", + "Ġg aining", + "ass es", + "Ġ( );", + "ĠG rey", + "ĠM LS", + "s ized", + "Ġpros per", + "\" ?", + "Ġpoll ing", + "Ġsh ar", + "ĠR C", + "Ġfire arm", + "or ient", + "Ġf ence", + "Ġvari ations", + "g iving", + "ĠP i", + "osp el", + "Ġpled ge", + "Ġc ure", + "Ġsp y", + "Ġviol ated", + "Ġr ushed", + "Ġstro ke", + "ĠBl og", + "sel s", + "ĠE c", + ",' '", + "Ġp ale", + "ĠColl ins", + "ter ror", + "ĠCanad ians", + "Ġt une", + "Ġlabor atory", + "Ġn ons", + "t arian", + "Ġdis ability", + "ĠG am", + "Ġsing er", + "al g", + "ĠSen ior", + "Ġtrad ed", + "ĠWar rior", + "Ġinf ring", + "ĠFrank lin", + "Ġstr ain", + "ĠSwed ish", + "Ġsevent h", + "ĠB enn", + "ĠT ell", + "Ġsynd rome", + "Ġwond ered", + "id en", + "++ ++", + "ig o", + "Ġpur ple", + "Ġjournal ism", + "Ġreb el", + "Ġf u", + "bl og", + "Ġinv ite", + "ren cies", + "ĠCont act", + "Is rael", + "ĠCont ent", + "Ġche er", + "Ġbed room", + "ĠEngine ering", + "ĠQue ens", + "Ġd well", + "ĠPlay Station", + "ĠD im", + "ĠCol on", + "l r", + "Ġoper ates", + "Ġmotiv ation", + "US A", + "ast ered", + "C ore", + "ĠTr uth", + "ol o", + "OS E", + "ĠMem ory", + "Ġpred ec", + "Ġan arch", + "Ġ19 20", + "ĠY am", + "à ¨", + "b id", + "Ġgr ateful", + "Ġexc itement", + "Ġtre asure", + "Ġlong est", + "ct ive", + "Ġdes erves", + "Ġreserv es", + "Ġcop s", + "ĠOtt awa", + "ĠEgypt ian", + "ank ed", + "Ġart if", + "Ġhypot hesis", + ": /", + "Ġpurch asing", + "Ġlove ly", + "H P", + "Ġdiv ide", + "Ġstrict ly", + "Ġquestion ing", + "Ġtaxp ayers", + "ĠJ oy", + "Ġroll s", + "ĠHe avy", + "Ġp orts", + "Ġmag netic", + "Ġinf lamm", + "Ġbr ush", + "t ics", + "â ĪĴ", + "Ġbott les", + "pp y", + "Ġp add", + "ãĤ ¯", + "m illion", + "Ġdevast ating", + "Ġcomp iled", + "Ġmed ication", + "Ġtw elve", + "ĠPer ry", + "Sp ace", + "im b", + "y our", + "Ġle aked", + "ĠT ar", + "Ġun ity", + "Ġinfect ed", + "Ġtravel ed", + "ID E", + "ĠMc Donald", + "t xt", + "ĠPr inc", + "Ġinter ven", + "ĠTai wan", + "ĠP ow", + "Ġbe aring", + "ĠTh read", + "Ġz ones", + "iz ards", + "un ks", + "Ch apter", + "ll or", + "Ġ ·", + "Ġw ounds", + "Ġdisc retion", + "Ġsucceed ed", + "ik ing", + "Ġicon ic", + "C all", + "Ġscreen ing", + "ĠM is", + "ict s", + "Ġmin isters", + "Ġsepar ation", + "Pl ayer", + "Ġb ip", + "Ġbel oved", + "Ġcount ing", + "ĠE ye", + "ar ound", + "ing ing", + "Ġtable t", + "Ġoff ence", + "in ance", + "h ave", + "ĠInf o", + "ĠNin ja", + "Ġprotect ive", + "ĠC ass", + "M ac", + "ĠQual ity", + "N orth", + "Ġ ic", + "ĠCub a", + "ĠChron icle", + "ĠPro perty", + "Ġfast est", + "ot os", + "ĠG erm", + "OW N", + "Ġbo om", + "ĠStan ley", + "ergus on", + "Ġcle ver", + "Ġent ers", + "m ode", + "ter ior", + "ĠS ens", + "Ġlin ear", + "AR K", + "Ġcomp aring", + "Ġpure ly", + "Ġsaf er", + "ĠPot ter", + "Ġc ups", + "R T", + "Ġgl uc", + "Ġatt ributed", + "Ġdu pl", + "ĠP ap", + "Ġprec ious", + "Ġp a", + "iction ary", + "ĠT ig", + "ĠTo o", + "ol utions", + "st an", + "Ġrob ots", + "Ġlob b", + "Ġstat ute", + "Ġprevent ion", + "w estern", + "16 0", + "ĠAct ive", + "ĠMar ia", + "h al", + "N one", + "ell ar", + "ĠK B", + "ĠPart ners", + "ĠSing le", + "ĠFollow ing", + "ang o", + "ac ious", + "Ġth ou", + "Ġk g", + "Ġinflu ential", + "ĠFriend s", + "S ur", + "ain ted", + "Ġfor ums", + "Ġst arter", + "Ġcitizens hip", + "ĠE lection", + "on ge", + "ot ation", + "os ph", + ";; ;;", + "ut ical", + "p ur", + "ere n", + "Ġaccus ations", + "bit ious", + "ab bit", + "ĠOr d", + "Post ed", + "ir k", + "Ġsens itivity", + "ic he", + "ĠAm y", + "ĠF ab", + "Ġsum mit", + "Ġped est", + "Ġrub ber", + "Ġagric ultural", + "Ġcan cel", + "A E", + "Ġin aug", + "Ġcont am", + "Ġfirm ly", + "i w", + "st age", + "ĠK an", + "Ġt ier", + "Ġinv ention", + "Ġtransl ated", + "ĠR ules", + "B ox", + "Tw itter", + "ID S", + "Ġp izza", + "Ġdeb ug", + "ĠD rop", + "v s", + "Ġh orses", + "b ig", + "Ġb oring", + "Ġh ood", + "ĠMcC ain", + "at ched", + "ĠBro s", + "Ġsk ip", + "Ġess ay", + "st at", + "ĠLeg ends", + "Ġam munition", + "au c", + "Ġshoot er", + "Ġun h", + "Ġsuppl ied", + "Ġgener ic", + "ĠS K", + "ib an", + "yr ics", + "Ġ25 5", + "Ġclim bing", + "Form er", + "Ġfl ip", + "Ġjump ing", + "Ġfrust ration", + "ĠTer ry", + "Ġneighborhood s", + "Ġmed ian", + "be an", + "Ġbr ains", + "Follow ing", + "Ġsh aped", + "Ġdraw s", + "Ġal tered", + "J ack", + "Ġrecip es", + "Ġsk illed", + "we alth", + "ach i", + "e lection", + "Ġbehavi ors", + "de als", + "ĠU ntil", + "F e", + "Ġdecl aration", + "mar ks", + "ĠBet ween", + "cel ona", + "Ġres on", + "Ġbub ble", + "Am ong", + "Ġim perial", + "G S", + "Ġfemin ist", + "200 5", + "ĠK yle", + "Ġaccount ing", + "ĠTe le", + "ĠT yr", + "Ġconnect ing", + "Ġre hab", + "ĠP red", + "s im", + "Ġmeant ime", + "Ġphys ician", + "M W", + "ĠCamp bell", + "ĠBr andon", + "Ġcontribut ing", + "ĠR ule", + "ĠWe ight", + "ĠN ap", + "Ġinter active", + "Ġv ag", + "Ġhel met", + "ĠCom b", + "f our", + "Ġsh ipped", + "Ġcomple ting", + "ĠP D", + "PD ATE", + "Ġspread ing", + "Ġsc ary", + "erv ing", + "ĠG as", + "Ġfr ank", + "s chool", + "Ġrom antic", + "Ġstab il", + "R ob", + "Ġaccur ately", + "Ġac ute", + "ĠH ann", + "Ġsymbol s", + "Ġcivil ization", + "ĠA W", + "Ġlight ning", + "Ġcons iders", + "Ġven ue", + "Ġ ×", + "Ġo ven", + "ĠS F", + "h is", + "Ġn u", + "ĠLear n", + "Ġpe oples", + "Ġst d", + "Ġsle e", + "Ġs lic", + "ĠStat istics", + "Ġcor ners", + "ĠB aker", + "Ġ: )", + "ment ation", + "ol ver", + "Ġlaugh ing", + "ĠT odd", + "ond e", + "ĠH ills", + "Ġn uts", + "ĠW oman", + "pl ane", + "Ġl iver", + "ĠIn side", + "S orry", + "Ġagre es", + "Ġfund ament", + "ĠF isher", + "Ġa uction", + "Ġthread s", + "gl as", + "ĠBas ic", + "ĠN at", + "Ġlack ing", + "Ġceleb ration", + "j u", + "Ġs illy", + "E uro", + "Ġt att", + "ight y", + "cont rolled", + "T est", + "ĠSing h", + "Ġr age", + "Ġrh yth", + "o ffic", + "ĠPh antom", + "Ġhead lines", + "Ġrespond ing", + "ĠMor ning", + "Ġvit amin", + "Ġboot s", + "ĠS ite", + "al in", + "p i", + "Ġvir al", + "ĠU C", + "D ER", + "ĠSe x", + "Ġst ocks", + "c urrent", + "Ġch urches", + "ĠR are", + "ĠMur phy", + "Ġden ial", + "ĠG aming", + "Ġtou g", + "Ġn ick", + "Ġm akers", + "ĠRon ald", + "Ġgener ous", + "ĠD oc", + "ĠMor ris", + "Ġtransform ed", + "ĠN ormal", + "Ġ10 4", + "ĠKick starter", + "ĠUp on", + "On line", + "ĠI RS", + "Ġw rap", + "Ġl oving", + "Ġarri ves", + "ĠD ue", + "Ġhe ter", + "ĠM ade", + "Ġrent al", + "Ġbelong s", + "Ġatt orneys", + "Ġcro ps", + "Ġmat ched", + "ul um", + "ol ine", + "10 9", + "Ġdis par", + "Ġbuy ers", + "ĠCam bridge", + "Ġeth ics", + "rou ps", + "Ġjust ified", + "Ġmarg inal", + "Ġrespect ed", + "win ning", + "Ġnodd ed", + "ĠSer ge", + "ĠForm er", + "C raft", + "######## ########", + "ĠWar ner", + "Ġd ash", + "et e", + "Ġent ert", + "ĠE scape", + "out heast", + "Ġkn ees", + "ĠB omb", + "Ġr ug", + "P ass", + "Ġatt itudes", + "go vernment", + "ĠPri or", + "Ġqual ities", + "Ġnot ification", + "ĠPh one", + "l ie", + "Ġanticip ated", + "ĠCom bat", + "ĠBar ry", + "Ġ198 2", + "Us ers", + "on er", + "Ġcomput ing", + "ĠConnect icut", + "Ġless er", + "Ġpe ers", + "ĠC u", + "Ġtechn ically", + "Ġsub mission", + "ĠUn iversal", + "Ġman ually", + "our ge", + "Ġrespond ents", + "ĠB TC", + "ĠH ost", + "Ġf are", + "ĠB ird", + "Ġrece ipt", + "al so", + "Ġj ack", + "Ġagric ulture", + "Ġsk ull", + "Ġ! =", + "Ġpass ive", + "ĠC I", + "Ġsoc ieties", + "Ġremind ed", + "Ġinter ference", + "B uy", + "Ġâ ľ", + "g on", + "Ġscrut iny", + "ĠW itch", + "Ġconduct ing", + "Ġ ãĥ", + "Ġexch anges", + "ĠMit chell", + "Ġinhab it", + "Ġtw ist", + "B D", + "Ġwhere ver", + "group on", + "Ġj okes", + "ĠBen jamin", + "ĠR andom", + "fr ame", + "ĠL ions", + "Ġhighlight ed", + "ĠArk ansas", + "E nt", + "Ġp ile", + "Ġpre lim", + "g s", + "mind ed", + "Ġfel ony", + "ĠG A", + "ĠL uck", + "Ġpract ically", + "ĠB os", + "Ġact ress", + "D am", + "ĠB ou", + "Ġvis a", + "Ġembed ded", + "Ġhy brid", + "Ġear liest", + "Ġsoon er", + "s ocial", + "ĠH A", + "Ġste ep", + "Ġdis advant", + "Ġexplo it", + "ĠE gg", + "ĠUlt ra", + "Ġnecess ity", + "L ocal", + "ie ge", + "Ġd ated", + "Ġmass es", + "Ġsubsc ription", + "pl ess", + "Ġan onym", + "Ġpresum ably", + "Bl ue", + "The ir", + "asket ball", + "ĠPhil ip", + "Ġcom ed", + "load ed", + "r ane", + "Ġref lection", + "Ch ina", + "Ġext ends", + "Ġform ing", + "Ġund ers", + "200 1", + "Ġgr at", + "Ġconcent rations", + "Ġins ulin", + "Ġsec ular", + "Ġwh ilst", + "Ġwin ners", + "Ad vertisements", + "Ġdeliber ately", + "ĠWork ing", + "Ġs ink", + "et ics", + "d ale", + "Ġmand ate", + "Ġg ram", + "Ġvac ation", + "Ġwarn ings", + "ri pp", + "ĠTH AT", + "Ġcomment ary", + "Ġint u", + "Ġa est", + "Ġreason ing", + "Ġbreak down", + "ĠZ ombie", + "Ġ-- >", + "ĠPolit ical", + "c ott", + "Ġthr ust", + "Ġtechn ological", + "Ġdec iding", + "Ġtraff icking", + "L ong", + "W elcome", + "pr ising", + "ĠCommun ications", + "Ġend ors", + "Ġsw ift", + "Ġmetab ol", + "co ins", + "res a", + "ĠHT TP", + "Ġen roll", + "ĠH appy", + "us r", + "int age", + "Ġ[ \"", + "u ably", + "ĠM aterial", + "Ġrepe al", + "Se pt", + "k h", + "ĠMod i", + "Ġunder neath", + "ĠI L", + "sh ore", + "Ġdiagn osed", + "ace utical", + "Ġsh ower", + "au x", + "ĠSw itch", + "ĠStre ngth", + "Ġj ihad", + "n ational", + "Ġtra uma", + "uss y", + "on i", + "Ġcons olid", + "Ġcal ories", + "ĠF lynn", + "ag ged", + "16 8", + "ĠP ink", + "Ġfulf ill", + "Ġch ains", + "Ġnot ably", + "ĠA V", + "L ife", + "ĠCh uck", + "m us", + "ĠUr ban", + "ĠH end", + "Ġdep osit", + "ĠS ad", + "Ġaff air", + "OR K", + "ie val", + "ĠF DA", + "Ġt rop", + "ĠOver all", + "Ġvirt ue", + "Ġsatisf action", + "au nd", + "Ġl un", + "ĠSw itzerland", + "ĠOper ation", + "pro cess", + "Ġsh ook", + "Ġcount ies", + "le ased", + "ĠCharl otte", + "1 12", + "Ġtrans cript", + "Ġre dd", + "p ush", + "ĠHe y", + "ĠAn alysis", + "[ \"", + "Ġaltern atives", + "ard less", + "Ġele ph", + "Ġpre jud", + "ĠLe af", + "H aving", + "ĠH ub", + "Ġexpress ions", + "ĠVol ume", + "Ġshock ing", + "ĠRed s", + "Ġread ily", + "Ġplan ets", + "ad ata", + "Ġcollaps ed", + "ĠMad rid", + "Ġir rit", + "i pper", + "ĠEn c", + "ĠW ire", + "Ġbu zz", + "ĠG P", + "ash a", + "Ġaccident ally", + "ur u", + "Ġfrust rated", + "ĠS A", + "Ġhung ry", + "ĠH uff", + "Ġlab els", + "ant o", + "ĠE P", + "Ġbar riers", + ") |", + "ĠBer keley", + "ĠJ ets", + "Ġp airs", + "ĠL an", + "J ames", + "ĠB ear", + "Ġhum or", + "ĠLiber ty", + "Ġmagn itude", + "Ġag ing", + "ĠM ason", + "Ġfriends hip", + "umb ling", + "Ġemer ge", + "Ġnewsp apers", + "Ġam bitious", + "ĠRich ards", + "atern al", + "Ġ198 1", + "Ġcook ies", + "Ġsc ulpt", + "Ġpur suit", + "L ocation", + "Ġscript s", + "p c", + "Ġarrang ements", + "Ġd iameter", + "Ġl oses", + "am ation", + "Ġl iqu", + "ĠJ ake", + "aret te", + "Ġunderstand s", + "ĠZ en", + "v m", + "Ġappro ve", + "Ġw ip", + "Ġult ra", + "Ġint end", + "ĠD I", + "asc ular", + "Ġst ays", + "ĠK or", + "ĠK l", + "Ġinvest ing", + "L a", + "Ġbelie ving", + "b ad", + "m outh", + "Ġtaxp ayer", + "ãĥ ĥ", + "ĠQue bec", + "Ġl ap", + "ĠSw iss", + "d rop", + "Ġdr ain", + "ir i", + "et c", + "ft en", + "ĠN ex", + "Ġst raw", + "Ġscream ing", + "Ġcount ed", + "Ġdam aging", + "Ġamb assador", + "cent ury", + "Ġpro x", + "Ġarrest s", + "u v", + "il ateral", + "ĠCh arg", + "Ġpresc ribed", + "Ġindepend ently", + "Ġf ierce", + "ĠB aby", + "Ġb rave", + "Ġsu its", + "= >", + "Ġbas eline", + "ĠR ate", + "Ġis lands", + "Ġ( (", + "g reen", + "ix els", + "Ġname ly", + "ĠVill age", + "th an", + "am y", + "V ersion", + "g mail", + "ential s", + "ĠS ud", + "ĠMel bourne", + "Ġarri ving", + "Ġquant um", + "e ff", + "rop olitan", + "T ri", + "Ġfun eral", + "ĠI R", + "ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ", + "ĠC ob", + "it ably", + "Ġt urb", + "Ġcomb o", + "Re view", + "Ġdeploy ment", + "u ity", + "ĠB ott", + "Ġinv isible", + "Ġrender ing", + "Ġunl ocked", + "Ġa qu", + "ĠVlad imir", + "Ġp ad", + "ĠBr ain", + "ĠLeg acy", + "dr agon", + "ĠKurd ish", + "Ġsound ed", + "Ġdet ained", + "ĠD M", + "g ary", + "Ġd aughters", + "Ġdistur bing", + "uk a", + "ĠPar ad", + "Ġt ast", + "Ġunf ortunate", + "Ġu l", + "em in", + "Ġattend ance", + "tr l", + "Ġpar ks", + "ĠMem orial", + "ĠAl ice", + "oth y", + "gu ard", + "ĠD ise", + "ĠSh an", + "ĠFor um", + "R ich", + "Ġshif ted", + "ue z", + "Ġl ighter", + "ĠMag n", + "Ġc od", + "S ch", + "ham mad", + "P ub", + "3 50", + "ĠP okemon", + "Ġprot otype", + "Ġun re", + "B ase", + "ĠStud ents", + "ĠRep ly", + "ĠCommun ist", + "Ġg au", + "ĠTy ler", + "I Z", + "Ġparticip ated", + "Ġsup rem", + "ĠDet ails", + "Ġvessel s", + "ro d", + "Ġt ribe", + "ke ep", + "Ġassum ptions", + "Ġp ound", + "Ġcr ude", + "ĠAv ailable", + "Ġswim ming", + "Ġin clusion", + "Ġadv ances", + "c ulation", + "Ġconserv ation", + "Ġover d", + "ĠBuff alo", + "Art icle", + "ed ge", + "Ġaw a", + "ĠMad ison", + "Ġsid ew", + "Ġcat ast", + "ĠK rist", + "uc le", + "ĠHigh way", + "ĠTer ror", + "Ġactiv ation", + "Ġuncons cious", + "ĠSat an", + "ĠSus an", + "ill ery", + "Ġarr anged", + "i op", + "Ġrum ors", + "ur ring", + "th ink", + "ĠKe ith", + "ĠK ind", + "Ġavoid ing", + "by n", + "n ut", + "ĠSpe aker", + "r us", + "n ames", + "Ġgu ilt", + "ĠOlymp ics", + "Ġsa il", + "ĠM es", + "lev ant", + "ĠColumb us", + "a ft", + "C ity", + "S outh", + "ĠHar vey", + "ĠP un", + "S everal", + "Ġment ally", + "Ġimp ress", + "m ount", + "ĠUb untu", + "âĢĶâĢĶâĢĶâĢĶ âĢĶâĢĶâĢĶâĢĶ", + "ĠSuper man", + "ĠMP s", + "Ġintent ions", + "ĠR acing", + "Ġlike lihood", + "Ġ2 40", + "T otal", + "Ġto ys", + "ĠW atson", + "Ġur ge", + "L ear", + "ĠP aper", + "Ġoccur ring", + "ĠB eng", + "ĠC ert", + "Ġst ones", + "T im", + "ĠTw in", + "z b", + "ĠD ynam", + "Ġpolit ician", + "k ens", + "ĠEnter prise", + "UT ERS", + "Ġab ol", + "Ġref resh", + "Ġarbit rary", + "pe ction", + "Ġtrou bles", + "Ġ} );", + "t v", + "Ġpil ots", + "Ġdist ribute", + "Ġaud it", + "Ġp ause", + "orig inal", + "Ġr ivals", + " £", + "F ig", + "T L", + "ab il", + "ry ing", + "L in", + "ion ed", + "l on", + "Ġf ancy", + "Ġcr ashed", + "Ġt ract", + "Ġshe d", + "Ġcons ume", + "B ased", + "down load", + "in it", + "Ġvolt age", + "Int rodu", + "Ġcondem ned", + "ĠFin ance", + "res pect", + "Ġex cluded", + "Ġestablish ing", + "her ic", + "Ġher itage", + "Ġspect acular", + "Ġun st", + "ĠSnow den", + "ĠL ane", + "S an", + "Ġprotect ions", + "st ruction", + "inc inn", + "Ġmac ro", + "C ustom", + "ios ity", + "Ġes p", + "Ġfunction ing", + "Ġm ush", + "Ġp uzzle", + "Ġeth ical", + "M al", + "Ġgo verning", + "ĠF erguson", + "Ġrest ored", + "Ġst ressed", + "ĠCoun ter", + "ĠK as", + "cl ip", + "AN S", + "Ġse iz", + "U K", + "by ss", + "old own", + "ap i", + "Ġperman ently", + "oun ters", + "W est", + "Th rough", + "L ight", + "at oes", + "Ġne at", + "Ġc ord", + "ure r", + "Ġsevere ly", + "ĠA ven", + "Ġinter rog", + "Ġtri ple", + "G iven", + "N umber", + "Ġar ise", + "Ġs her", + "pl ant", + "Ġfl ower", + "ĠC ou", + "Ġat e", + "Ġnew er", + "b ul", + "Ġmean while", + "ĠL air", + "Ġadjust ment", + "ĠCop yright", + "Ġd ivers", + "i ological", + "Ġgam ers", + "o at", + "Ġhistor ically", + "Ġanal og", + "Ġlong time", + "Ġpres cription", + "ĠM ist", + "ĠHy per", + "ĠM aine", + "ĠDe ity", + "Ġmulti pl", + "ĠRe incarn", + "ĠH yd", + "ĠP ic", + "S il", + "r ants", + "ĠC ris", + ". ;", + "( {", + "epend ence", + "Ġrec y", + "ate ur", + "Ġqu ad", + "Ġgl ob", + "Ġcon ced", + "te am", + "Ġcapital ist", + "ĠL ot", + "Ġroy al", + "ĠCy ber", + "Ġblack s", + "met ic", + "ri v", + "ĠD anny", + "Ġsp o", + "ĠR O", + "Ġanim ated", + "rypt ed", + "ĠDep uty", + "Ġrend ered", + "F E", + "Ġstre ak", + "Ġcloud s", + "ĠDou g", + "~~~~ ~~~~", + "Ġdisc our", + "ĠVe h", + "Ġpsych ology", + "ĠJ ourney", + "Ġcry stal", + "ĠFro st", + "Ġsuspic ion", + "Ġrel ate", + "or us", + "ĠC rypt", + "ĠN VIDIA", + "com ed", + "ut ing", + "incinn ati", + "Ġvulner ability", + "ost ic", + "Ġisol ation", + "Ġcool ing", + "ĠCoal ition", + "Ġ1 19", + "F our", + "ĠDe al", + "Ġâ ī", + "se mble", + "ram ent", + "ĠBar celona", + "Ġ10 2", + "Ġcoc aine", + "ocaly pse", + "F eb", + "ogen ic", + "Ġmut ation", + "Ġcrypt oc", + "ĠK el", + "ĠG it", + "a is", + "Ġs isters", + "AN K", + "Ġactiv ate", + "T er", + "Ġd read", + "yl on", + "Ġprop ri", + "A ust", + "ĠDef ault", + "Ġout door", + "Ġshe er", + "ce ive", + "Ġg ently", + "Ð ¾", + "Pro gram", + "Ġâ ĨĴ", + "Ġve gan", + "ĠCr us", + "Ġrespons ibilities", + "ĠH R", + "OL D", + "Ġprev ents", + "Ġst iff", + "ĠW ere", + "Ġathlet ic", + "ĠSc ore", + "Ġ) :", + "Ġcolumn s", + "ĠL oc", + "av ailable", + "ĠF ram", + "ĠS essions", + "Ġcompan ion", + "Ġpack s", + "14 0", + "ĠKn ights", + "Ġf art", + "Ġstream s", + "Ġsh ore", + "Ġapp eals", + "ĠPer formance", + "h aul", + "ĠSt ra", + "ĠN ag", + "10 3", + "ĠTrans portation", + "B B", + "E v", + "z an", + "P ublic", + "Ġtw in", + "uls ion", + "M ult", + "Ġelect ro", + "Ġstat ue", + "ation ally", + "ĠN ort", + "Ġins pection", + "/ *", + "ig ue", + "Ġcomp assion", + "ĠT ales", + "ĠSte in", + "ĠSc reen", + "ĠB ug", + "ĠL ion", + "g irl", + "Ġwithdraw al", + "Ġobject ives", + "Ġblood y", + "Ġprelim inary", + "Ġj acket", + "Ġdim ensions", + "ĠC ool", + "ĠOcc up", + "Ġw reck", + "Ġdoub led", + "ank ing", + "Ġ19 75", + "Ġglass es", + "ĠW ang", + "pro v", + "P ath", + "connect ed", + "ĠMult i", + "ĠNor way", + "agon ist", + "Ġfe ared", + "Ġtouch ing", + "Ġarg uably", + "¯¯¯¯ ¯¯¯¯", + "ĠNC AA", + "che m", + "Ġsp at", + "ĠW WE", + "ĠC el", + "ig ger", + "Ġattack er", + "ĠJo in", + "ob ject", + "ett a", + "Ġelim inated", + "d et", + "Ġdest ruct", + "ĠLuc as", + "ct uary", + "18 0", + "ĠBr ady", + "ĠBl ues", + "B ay", + "au kee", + "Ġtim eline", + "Ġdeleg ates", + "w ritten", + "uff icient", + "Ġsh apes", + "Cop yright", + "ou ble", + "serv ice", + "Ġp ione", + "Ġcolleg es", + "Ġrow s", + "Ġsp ite", + "Ġassess ed", + "3 60", + "Ġle ase", + "Ġconfident ial", + "ck er", + "ĠMan ning", + "ĠV oice", + "Ġse aled", + "Ġcalcul ate", + "N O", + "ĠAss istant", + "Ġteen ager", + "ul ent", + "ather ine", + "Ġm ock", + "Ġd iamond", + "Ġf est", + "Ġsw itched", + "Ġres ume", + "ĠPu erto", + "Ġl anes", + "ir ation", + "ĠSimilar ly", + "Ġro d", + "ĠS el", + "ĠPal ace", + "ĠLim ited", + "e ous", + "Ġvar iant", + "Ġw ard", + "Ġ) )", + "Sh ow", + "OO K", + "A lex", + "ĠN ep", + "br is", + "ĠWik ipedia", + "Ġexcept ional", + "Ġman ages", + "ĠD raw", + "Ag ain", + "Ġco pper", + "ut t", + "Ġex ports", + "Ġport folio", + "Ġelev ated", + "R ated", + "ĠOther wise", + "ĠT act", + "ĠShe l", + "ĠT X", + "\" âĢĶ", + "Ġres ur", + "ĠW a", + "ven ant", + "Ġmon etary", + "pe ople", + "E mail", + "Ġfif ty", + "ĠS weet", + "ĠMalays ia", + "Ġconf using", + "ĠR io", + "ud a", + "uten ant", + "\" );", + "Ġpra ised", + "Ġvol umes", + "t urn", + "Ġm ature", + "Ġnon profit", + "Ġpassion ate", + "ĠPriv ate", + "Ġ10 3", + "Ġdesc end", + "ç ¥ŀ", + "uff y", + "head ed", + "Whe ther", + "ri en", + "ze ch", + "be it", + "Ġch rom", + "ĠMc M", + "Ġd ancing", + "Ġe leg", + "ĠNot iced", + "11 5", + "Ġadvoc acy", + "ENT S", + "amb ling", + "ĠMin or", + "ĠF inn", + "Ġprior ities", + "Ġthere of", + "ĠSt age", + "ĠRog ers", + "Ġsubst itute", + "ĠJ ar", + "ĠJeff erson", + "Ġlight ly", + "10 2", + "ĠL isa", + "u its", + "ys ical", + "Ġshif ts", + "Ġd rones", + "Ġwork place", + "Ġres id", + "ens ed", + "ah n", + "Ġpref erences", + "ser ver", + "Ġdeb ates", + "d oc", + "ĠGod s", + "Ġhelicop ter", + "Ġhon our", + "Ġconsider ably", + "ed ed", + "ĠF emale", + "ĠAn ne", + "Ġre un", + "ĠF ace", + "ĠHall ow", + "ĠBud get", + "Ġcondem n", + "Ġt ender", + "Pro f", + "ocr atic", + "ĠTurn er", + "ĠAg ric", + "Ġ19 76", + "Ġa pt", + "d isc", + "ĠF ighter", + "ĠA ur", + "Ġgar bage", + "in put", + "ĠK arl", + "ĠOl iver", + "ĠL anguage", + "k n", + "N on", + "ĠCl ar", + "Ġtrad itions", + "Ġad vertisement", + "ĠS or", + "Ġarch ive", + "Ġvill ages", + "7 50", + "Ġimplement ing", + "w aukee", + "Ġdiet ary", + "Ġswitch ing", + "Rep ublic", + "Ġvel ocity", + "Ġc it", + "ĠA wards", + "Ġfin ancing", + "Ġlast ed", + ") ]", + "Ġrem inder", + "P erson", + "Ġprec ision", + "Ġdesign ers", + "ĠF ried", + "ĠB order", + "Ġtr agic", + "Ġw ield", + "Ġiniti atives", + "ĠT ank", + "w er", + "Ġjo ins", + "R o", + "in ery", + "Ġar row", + "Ġgener ating", + "found er", + "Ġsear ches", + "Ġrandom ly", + "A ccess", + "Ġb atch", + "Ġp osed", + "l at", + "Ġpursu ing", + "as a", + "Ġtest ified", + "form ing", + "ĠSh ar", + "w iki", + "ĠE ither", + "S ometimes", + "Ġsen ators", + "ĠJohn ny", + "ĠTal iban", + "ĠG PS", + "\":\" /", + "ãģ® å", + "Ġanaly zed", + "ĠRub io", + "ĠMove ment", + "op ard", + "ii i", + "St and", + "f ight", + "Ġign oring", + "i ang", + "ĠG N", + "so ever", + "ĠST AT", + "Ġref using", + "Ġswe at", + "Ġb ay", + "P ORT", + "ir med", + "ak y", + "Ġdis pro", + "Ġlabel ed", + "Ġ10 8", + "H ello", + "Ġple asant", + "ab a", + "Ġtri umph", + "Ġab oard", + "Ġinc om", + "ĠC row", + "le tt", + "Ġfol k", + "Ġch ase", + "` `", + "ĠBr us", + "Ġte ens", + "c ue", + "Ġter rain", + "h yd", + "il ight", + "OR Y", + "Su pport", + "ew s", + "ll i", + "rain ts", + "ĠC and", + "Ġab used", + "ach ment", + "l arg", + "B as", + "ĠC ancer", + "Ġ19 78", + "Ġsupp orter", + "ac cess", + "ĠTer min", + "ĠT ampa", + "ĠAN Y", + "Ġnew est", + "ĠCrim inal", + "ed u", + "Ġ19 30", + "Ġadm its", + "Ġend e", + "Ġfail ures", + "ur ate", + "ful ness", + "cy cl", + "ĠSub ject", + "Ġinf inite", + "th ree", + "W A", + "p it", + "ĠInst all", + "R ad", + "ili ation", + "G M", + "Ġcontin ent", + "Ġaccommod ate", + "ĠCl ay", + "Ġp up", + "ĠF unction", + "Ġham mer", + "ĠAlbert a", + "Ġrev ised", + "Ġminor ities", + "Ġmeasure ment", + "Con nell", + "Ġdis able", + "ĠM ix", + "In cre", + "Ġfor k", + "ĠR osen", + "Ġimpl ies", + "umb lr", + "AN G", + "Ġprote ins", + "Ġagg ression", + "Ġfacilit ate", + "S N", + "Ġilleg ally", + "u er", + "Ġacad em", + "Ġp uzz", + "ĠSh ift", + "p ay", + "oll o", + "Ġaud iences", + "B uild", + "Ġno ble", + "Ġsynt ax", + "â ĺħ", + "Ġbe am", + "ĠB ed", + "ĠA ld", + "Ġorig ins", + "v ideo", + "Ġ19 77", + "ĠAss ault", + "Ġgar age", + "Te am", + "Ġver dict", + "Ġd war", + "ĠVirt ual", + "e vent", + "Ke ep", + "Ġsent iment", + "Ġwild life", + "sh irt", + "Ġb urg", + "Ġrecommend ation", + "rep resent", + "Ġgall ery", + "own ers", + "Ġsch olar", + "Ġconven ience", + "ĠSw ift", + "Ġconv inc", + "C ap", + "Ġwar fare", + "ĠVis ual", + "Ġconst itute", + "Ġab ort", + "ĠWe ather", + "ĠLook ing", + "ĠH em", + "Ġmart ial", + "Ġinc oming", + "et ition", + "Ġtoler ance", + "ĠCre ated", + "Ġfl ows", + "ĠE lder", + "Ġsoul s", + "Ġf oul", + "ĠP ain", + "ĠC AN", + "Ġ2 20", + "b c", + "he nd", + "Ġgen ius", + "R eal", + "ĠW r", + "omet er", + "p ad", + "Ġlim iting", + "ĠS i", + "ĠL ore", + "ĠAd ventures", + "Ġvar ied", + "D isc", + "f in", + "ĠPerson al", + "Ch ris", + "Ġinv ented", + "Ġd ive", + "ĠR ise", + "Ġo z", + "ĠCom ics", + "Ġexp ose", + "ĠRe b", + "let ters", + "s ite", + "im ated", + "Ġh acking", + "Ġeduc ated", + "ĠNob ody", + "Ġdep ri", + "Ġincent ive", + "ãĤ ·", + "Ġovers ight", + "Ġtrib es", + "ĠBelg ium", + "Ġlicens ing", + "our t", + "Produ ct", + "ah l", + "ĠG em", + "Ġspecial ist", + "Ġc ra", + "ann ers", + "ĠCor byn", + "Ġ19 73", + "RE AD", + "Ġsum mar", + "Ġover look", + "ĠApp lication", + "Ġin appropriate", + "Ġdownload ed", + "Q ue", + "ĠB ears", + "Ġth umb", + "ĠChar acter", + "ĠReincarn ated", + "ĠS id", + "Ġdemonstr ates", + "s ky", + "ĠBloom berg", + "ĠAr ray", + "ĠRes ults", + "ĠFour th", + "ĠED T", + "ĠO scar", + "c end", + "Ġ10 6", + "ĠN ULL", + "ĠH ERE", + "m atch", + "ĠBr un", + "Ġgluc ose", + "ie g", + "eg u", + "Ġcert ified", + "Ġrel ie", + "Ġhuman itarian", + "Ġpr ayers", + "K ing", + "Ġn an", + "h ou", + "10 8", + "ul u", + "Ġrenew able", + "Ġdistingu ish", + "Ġd ense", + "ĠV ent", + "ĠPack age", + "ĠB oss", + "Ġedit ors", + "Ġm igr", + "T ra", + "ĠPet ers", + "ĠAr ctic", + "200 4", + "ĠC ape", + "Ġloc ally", + "Ġlast ing", + "Ġhand y", + ". ).", + "P an", + "ĠR ES", + "Ind ex", + "Ġt ensions", + "Ġformer ly", + "Ġide ological", + "Ġsens ors", + "Ġdeal ers", + "Ġdef ines", + "S k", + "Ġproceed s", + "Ġpro xy", + "az ines", + "ĠB ash", + "ĠP ad", + "ĠC raft", + "eal ous", + "Ġshe ets", + "omet ry", + "J une", + "cl ock", + "T T", + "ĠThe atre", + "ĠB uzz", + "Ġch apters", + "Ġmill enn", + "Ġd ough", + "ĠCongress ional", + "Ġimag ined", + "av ior", + "Ġclin ic", + "Ġ19 45", + "Ġhold er", + "ro ot", + "oles ter", + "Ġrest art", + "B N", + "ĠHam as", + "ĠJ ob", + "Ġor b", + "Ġr am", + "Ġdiscl ose", + "Ġtransl ate", + "Ġimm igrant", + "Ġannoy ing", + "Ġtreat y", + "an ium", + "ĠTe a", + "ĠLeg ion", + "Ġcrowd s", + "ĠB ec", + "ĠA er", + "oh yd", + "B ro", + "Look ing", + "Ġl bs", + "Ġagg ress", + "Ġse am", + "Ġinter cept", + "ĠM I", + "mer cial", + "act iv", + "ĠC it", + "Ġdim ension", + "Ġconsist ency", + "Ġr ushing", + "ĠDou glas", + "Ġtr im", + "Inst all", + "ick er", + "Ġsh y", + "10 6", + "Ġment ions", + "pe lled", + "ĠT ak", + "c ost", + "Ġclass room", + "Ġfort une", + "dri ven", + "Ġun le", + "ĠWhe el", + "Ġinvest or", + "ĠM asters", + "k it", + "Ġassoci ations", + "ĠEv olution", + "op ing", + "us cript", + "Ġprov incial", + "ĠWal ter", + "av i", + "S O", + "Ġun limited", + "Eng lish", + "ĠC ards", + "ĠEb ola", + "ne red", + "Ġreven ge", + "Ġout right", + "um per", + "Ġf itting", + "ĠSol id", + "Ġform ally", + "Ġproblem atic", + "Ġhaz ard", + "Ġenc ryption", + "Ġstraight forward", + "ĠA K", + "Ġp se", + "ĠOr b", + "ĠCh amber", + "ĠM ak", + "Cont ents", + "Ġloyal ty", + "Ġl yrics", + "ĠSy m", + "Ġwel comed", + "Ġcook ed", + "Ġmon op", + "Ġn urse", + "Ġmis leading", + "Ġe ternal", + "Ġshif ting", + "Ġ+ =", + "V is", + "Ġinst itutional", + "ill ary", + "Ġp ant", + "VER T", + "ĠA CC", + "ĠEn h", + "Ġinc on", + "ĠRE UTERS", + "Ġdon ated", + "âĢ¦âĢ¦ âĢ¦âĢ¦", + "In tern", + "Ġexhib it", + "Ġt ire", + "ĠR ic", + "ĠCh ampion", + "ĠMu hammad", + "N ING", + "ĠSoc cer", + "Ġmob ility", + "Ġvary ing", + "ĠM ovie", + "Ġl ord", + "o ak", + "F ield", + "Ġve ctor", + "us ions", + "Ġsc rap", + "Ġen abling", + "m ake", + "T or", + ". *", + "| |", + "ĠWe bsite", + "ĠN PC", + "Ġsocial ist", + "ĠBill y", + "ĠAdd itional", + "Ġc argo", + "Ġfar ms", + "ĠSo on", + "ĠPri ze", + "Ġmid night", + "Ġ9 00", + "se en", + "ĠSp ot", + "Ġshe ep", + "Ġspons ored", + "ĠH i", + "ĠJ ump", + "Ġ19 67", + "Micro soft", + "ĠAg ent", + "Ġch arts", + "d ir", + "Ġadj acent", + "Ġtr icks", + "Ġman ga", + "Ġex agger", + "/ >", + "foot ball", + "ĠF CC", + "G C", + "ĠT ier", + "and ra", + "OU ND", + "% ),", + "Ġfru its", + "V C", + "ĠA A", + "R ober", + "Ġmid st", + "â Ĺ", + "ank a", + "Ġlegisl ature", + "ĠNe il", + "Ġtour ists", + "\" \"", + "ĠWar ning", + "ĠNever theless", + "ĠOffic ial", + "ĠWh atever", + "Ġm old", + "Ġdraft ed", + "Ġsubst ances", + "Ġbre ed", + "Ġt ags", + "ĠT ask", + "Ġver b", + "Ġmanufact ured", + "com ments", + "ĠPol ish", + "Pro v", + "Ġdetermin es", + "Ob ama", + "k ers", + "Ġutter ly", + "Ġse ct", + "sc he", + "ĠG ates", + "ĠCh ap", + "Ġal uminum", + "Ġz ombie", + "ĠT ouch", + "ĠU P", + "Ġsatisf y", + "Ġpred omin", + "asc ript", + "Ġelabor ate", + "Ġ19 68", + "Ġmeas uring", + "ĠV ari", + "any ahu", + "Ġs ir", + "ul ates", + "id ges", + "ick ets", + "ĠSp encer", + "T M", + "oub ted", + "Ġpre y", + "Ġinstall ing", + "ĠC ab", + "re ed", + "re ated", + "Su pp", + "Ġwr ist", + "ĠK erry", + "10 7", + "ĠK le", + "ĠR achel", + "Ġc otton", + "ĠA RE", + "ĠE le", + "Cont rol", + "Ġload s", + "ĠD od", + "an as", + "b one", + "Ġclass ical", + "ĠReg ional", + "ĠInt eg", + "V M", + "Ġdes ires", + "Ġaut ism", + "support ed", + "ĠM essage", + "Ġcomp act", + "writ er", + "Ġ10 9", + "ĠHur ricane", + "c ision", + "Ġcy cles", + "Ġdr ill", + "Ġcolle ague", + "Ġm aker", + "G erman", + "Ġmist aken", + "S un", + "ĠG ay", + "Ġwhat soever", + "Ġsell s", + "ĠA irl", + "l iv", + "ĠO ption", + "Ġsol ved", + "Ġse ctors", + "Ġhorizont al", + "Ġequ ation", + "ĠSk ill", + "ĠB io", + "g ement", + "ĠSn ap", + "ĠLeg al", + "Ġtradem ark", + "Ġmake up", + "Ġassemb led", + "Ġsa ves", + "ĠHallow een", + "ĠVer mont", + "ĠFR OM", + "Ġfar ming", + "ĠP odcast", + "accept able", + "ĠHig her", + "Ġas leep", + "ull ivan", + "Ġrefere n", + "ĠLe v", + "Ġbul lets", + "ok o", + "H C", + "Ġst airs", + "Ġmain tains", + "ĠL ower", + "ĠV i", + "Ġmar ine", + "Ġac res", + "Ġcoordin ator", + "ĠJ oh", + "Ġcounterpart s", + "ĠBrother s", + "Ġind ict", + "b ra", + "Ġch unk", + "Ġc ents", + "H ome", + "ĠMon th", + "Ġaccording ly", + "if les", + "ĠGerm ans", + "ĠSy n", + "H ub", + "Ġey eb", + "âĶĢâĶĢ âĶĢâĶĢ", + "Ġr anges", + "ĠHoll and", + "ĠRob ot", + "f c", + "M ike", + "Ġpl asma", + "Ġsw ap", + "Ġath lete", + "ĠR ams", + ",' \"", + "Ġinfect ions", + "Ġcor rid", + "Ġv ib", + "Ġpat ches", + "Ġtradition ally", + "Ġrevel ation", + "Ġswe ep", + "Ġgl ance", + "Ġin ex", + "200 3", + "ĠR aw", + "work ing", + "os ures", + "ĠD at", + "ĠLyn ch", + "Ġle verage", + "ĠRe id", + "Ġcorrel ation", + "ian ces", + "av ascript", + "Ġrep ository", + "ret ty", + "Ġ19 72", + "24 0", + "Ġo un", + "p ol", + "ĠRe ed", + "Ġtact ical", + "is ite", + "App le", + "ĠQu inn", + "Ġrap ed", + "ill o", + "Euro pe", + "Ġalgorith ms", + "ĠRod rig", + "i u", + "Ġill um", + "Ġf ame", + "Ġintrodu cing", + "Ġdel ays", + "ĠRaid ers", + "Ġwh istle", + "Ġnovel s", + "ĠRe ally", + "Ġder iv", + "Ġpublic ations", + "ĠNe ither", + "ĠCom merce", + "Ġa ston", + "l anguage", + "Not es", + "ĠR oth", + "ĠF ear", + "Ġm ate", + "Ġpar ade", + "ĠQ B", + "Ġman eu", + "ĠC incinnati", + "m itting", + "Ġwa ist", + "ĠR ew", + "Ġdisc ont", + "Ð °", + "Ġst aring", + "Ġal ias", + "Ġsec urities", + "Ġtoile t", + "ĠJ edi", + "Ġun law", + "v ised", + "//// ////", + "] (", + "ĠWe iss", + "Ġpre st", + "ĠComp an", + "Ġmem o", + "ĠGr ace", + "J uly", + "ĠEl ite", + "cent er", + "ĠSt ay", + "Ġgal axy", + "Ġto oth", + "ĠS ettings", + "Ġsubject ed", + "ãĤ ¦", + "Ġline back", + "Ġretail ers", + "ĠW ant", + "Ġd angers", + "A ir", + "Ġvolunt ary", + "ew ay", + "Ġinterpret ed", + "ot ine", + "à §", + "Ġp el", + "Serv ice", + "ĠEvent ually", + "Ġcare ers", + "Ġthreat en", + "Ġmem or", + "ĠBrad ley", + "anc ies", + "s n", + "ĠUn known", + "N ational", + "Ġsh adows", + "ail and", + "ĠD ash", + "Every one", + "izz ard", + "M arch", + "= (", + "Ġpull s", + "Ġstr anger", + "Ġback wards", + "ĠBern ard", + "imens ional", + "Ġch ron", + "Ġtheoret ical", + "k top", + "Ġw are", + "ĠInvest ig", + "ĠIn iti", + "ĠOper ations", + "o ven", + "oc ide", + "* /", + "Ġfl ames", + "ĠC ash", + "sh it", + "Ġc ab", + "ĠAn aly", + "ĠSe ah", + "Ġdefin ing", + "Ġorder ing", + "Ġimm un", + "Ġpers istent", + "AC H", + "Russ ian", + "m ans", + "Ġh ind", + "Ġphot ography", + " ©", + "Ġh ug", + "Ġ10 7", + "ĠH ence", + "i ots", + "ude au", + "Ġsubsid ies", + "Ġroutine ly", + "ĠDev ice", + "it ic", + "Ġdisg ust", + "land er", + "Ġ19 40", + "Ġassign ment", + "ĠB esides", + "w ick", + "ĠD ust", + "us c", + "struct ed", + "11 1", + "de velop", + "Ġf ond", + "Ġinter section", + "Ġdign ity", + "Ġcommission er", + "With out", + "re ach", + "Ġcart oon", + "Ġsc ales", + "ãĥ Ń", + "F IG", + "Ġsurve ys", + "ĠIndones ia", + "Ġart work", + "Ġun ch", + "Ġcy cling", + "un ct", + "au er", + "or ate", + "ĠOb viously", + "Ġcharacter ized", + "fe ld", + "Ġaff irm", + "Ġinn ings", + "Ġ é", + "Ġal iens", + "Ġcl oth", + "et ooth", + "ĠC ertain", + " §", + "Ġdig est", + "k now", + "ĠX L", + "Ġpredict ions", + "Ġd in", + "W AR", + "Ġafter math", + "Ex ample", + "ĠSu ccess", + "ĠTh r", + "IG N", + "Ġmin er", + "B us", + "Ġcl arity", + "heim er", + "ĠO UT", + "ĠS end", + "ĠCirc le", + "ĠD iet", + "Ġpron ounced", + "Ġcreat ors", + "Ġearthqu ake", + "atter y", + "ge ons", + "Ġo d", + "Ġlay ing", + "or p", + "U lt", + "pro ject", + "Ġunder min", + "Ġsequ el", + "S am", + "ĠDark ness", + "Ġre ception", + "b ull", + "Y S", + "ĠV ir", + "Ġsequ ences", + "ĠCo in", + "Ġout fit", + "ĠW ait", + "1 19", + "Ġdel ivers", + ".... ..", + "Ġbl own", + "ĠE sc", + "ĠM ath", + "per m", + "ĠU l", + "Ġgl im", + "Ġfac ial", + "Ġgreen house", + "Ġto kens", + "/ -", + "ĠAnn ual", + "ĠON E", + "Ġteen age", + "ĠPhys ical", + "ĠL ang", + "ĠC elt", + "Ġsu ed", + "ivid ually", + "Ġpat ience", + "ch air", + "reg ular", + "Ġa ug", + "in v", + "ex cept", + "ĠL il", + "Ġn est", + "f d", + "s um", + "ĠCh ase", + "Russ ia", + "ĠJenn ifer", + "Ġoff season", + "Over all", + "F ore", + "Ġr iot", + "A ud", + "form er", + "Ġdefend ers", + "ĠC T", + "iot ic", + "rib ly", + "Ġautom ated", + "Ġpen is", + "Ġins ist", + "Ġdi agram", + "ĠS QL", + "ĠG arc", + "Ġw itch", + "cl ient", + "ier ra", + "am bers", + "Ġrec ount", + "f ar", + "V ery", + "oster one", + "Ġappreci ated", + "ĠPer fect", + "S ection", + "Ġd oses", + "oca ust", + "Ġcost ly", + "Ġg rams", + "ĠSh i", + "Ġwrest ling", + "Ġ19 71", + "Ġtro phy", + "Ġn erve", + "ĠK az", + "ĠExper ience", + "Ġpled ged", + "Ġplay back", + "Ġcreat ivity", + "by e", + "Ġattack ers", + "Ġhold ers", + "ĠCo ach", + "ĠPh D", + "Ġtransf ers", + "Ġcol ored", + "ĠH indu", + "Ġd rown", + "Ġlist ened", + "ĠW A", + "ias m", + "P O", + "Ġappeal ing", + "Ġdiscl osed", + "ĠCh icken", + "ag ging", + "Ġple aded", + "Ġnav igation", + "ĠReturn s", + "Ġ[ [", + "R OR", + "E A", + "Ġphotograp her", + "ĠR ider", + "ipp ers", + "Ġsl ice", + "Ġe rect", + "Ġhe d", + "iss ance", + "ĠVik ings", + "ur ious", + "Ġapp et", + "oubted ly", + "Ch ild", + "Ġauthent ic", + "o os", + "ĠM aking", + "Ġannoun cing", + "Ġb od", + "Ġmet er", + "ĠN ine", + "ĠR ogue", + "Ġwork force", + "Ġrenew ed", + "Ġorganis ations", + "ac s", + "P LE", + "Sh ort", + "Ġcomp ounds", + "ĠVis it", + "Ġen velop", + "ear th", + "Ġsupport ive", + "gg le", + "ĠBrus sels", + "ĠGu ild", + "Cre ate", + "RE L", + "Ġaver aged", + "Ġ19 69", + "ri ages", + "Ġlength y", + "Ġforg ot", + "O kay", + "ĠE rd", + "Ġdeal er", + "Ġrec ession", + "D D", + "Ġdesper ately", + "Ġhun ger", + "Ġst icks", + "Ġm ph", + "ĠF aith", + "Ġintention ally", + "Ġdem ol", + "ue ller", + "ĠS ale", + "Ġde bris", + "s pring", + "Ġle ap", + ">> >>", + "Ġcontain ers", + "se lling", + "rane an", + "atter ing", + "Ġcomment ed", + "ĠC M", + "on ut", + "Ġwood s", + "es pecially", + "Ġorgan ize", + "iv ic", + "ĠWood s", + "ang a", + "s qu", + "Ġm aj", + "am on", + "Ġax is", + "Ġ19 74", + "ĠDen mark", + "Ġwar rior", + "ĠP and", + "Ġout lined", + "ĠB O", + "ins ula", + "z illa", + "eb ook", + "Ġd are", + "Ġsear ched", + "Ġnav igate", + "S n", + "writ ing", + "Ġun ited", + "J apan", + "ĠHe brew", + "Ġfl ame", + "Ġrel ies", + "Ġcatch ing", + "ĠSh o", + "Ġimprison ment", + "Ġp ockets", + "Ġclos ure", + "ĠF am", + "t im", + "ade qu", + "Act ivity", + "Ġrecru iting", + "ĠW ATCH", + "ĠArgent ina", + "d est", + "Ġapolog ize", + "or o", + "Ġlack s", + "Ġtun ed", + "ĠGriff in", + "Ġinf amous", + "Ġcelebr ity", + "ss on", + "Ġ ----------------------------------------------------------------", + "ĠIs is", + "ĠDis play", + "Ġcred ibility", + "Ġeconom ies", + "Ġhead line", + "ĠCow boys", + "Ġind ef", + "Ġl ately", + "Ġincent ives", + "but ton", + "ĠM ob", + "A ut", + "Ġres igned", + "ĠO m", + "c amp", + "Ġprof iles", + "Ġsche mes", + "olph ins", + "ay ed", + "Cl inton", + "en h", + "ĠY ahoo", + "Ġab st", + "Ġan k", + "su its", + "Ġw ished", + "ĠMar co", + "udd en", + "Ġsp here", + "ĠB ishop", + "Ġincorpor ated", + "ĠPl ant", + "11 4", + "Ġh ated", + "p ic", + "Ġdon ate", + "Ġl ined", + "Ġbe ans", + "Ġsteal ing", + "Ġcost ume", + "Ġsher iff", + "Ġfor ty", + "Ġint act", + "Ġadapt ed", + "Ġtrave lling", + "b art", + "Ġnice ly", + "Ġdri ed", + "Ġsc al", + "os ity", + "NOT E", + "ĠB h", + "ĠBron cos", + "ĠI gn", + "Ġint imate", + "Ġchem istry", + "Ġopt imal", + "D eb", + "ĠGener ation", + "Ġ] ,", + "ich i", + "ĠW ii", + "ĠYOU R", + "vent ions", + "W rite", + "Ġpop ul", + "un ning", + "ĠW or", + "V ol", + "Ġqu een", + "head s", + "K K", + "Ġanaly ze", + "op ic", + "ear chers", + "Ġd ot", + "leg raph", + "ast ically", + "Ġupgr ades", + "Ġca res", + "Ġext ending", + "Ġfree ze", + "Ġin ability", + "Ġorg ans", + "Ġpret end", + "Ġout let", + "11 3", + "ol an", + "ĠM all", + "ul ing", + "t alk", + "Ġexpress ing", + "ĠAl ways", + "ĠBe gin", + "f iles", + "Ġlic enses", + "% %", + "ĠM itt", + "Ġfil ters", + "ĠMil waukee", + "G N", + "Ġunf old", + "M o", + "Ġnut rition", + "pp o", + "B o", + "Ġfound ing", + "Ġunder mine", + "Ġeas iest", + "ĠC zech", + "ĠM ack", + "Ġsexual ity", + "ĠN ixon", + "W in", + "ĠAr n", + "ĠK in", + "ãĤ £", + "ic er", + "Ġfort un", + "Ġsurf aces", + "agh d", + "Ġcar riers", + "ĠP ART", + "ĠT ib", + "Ġinter val", + "Ġfrust rating", + "ĠSh ip", + "ĠAr med", + "ff e", + "Ġbo ats", + "ĠAb raham", + "in is", + "Ġsu ited", + "th read", + "i ov", + "ab ul", + "ĠVenezuel a", + "Ġto m", + "su per", + "Ġcast le", + "alth ough", + "iox ide", + "ec hes", + "Ġevolution ary", + "Ġnegoti ate", + "Ġconfront ed", + "Rem ember", + "Ġ17 0", + "S uch", + "Ġ9 11", + "m ult", + "ĠA byss", + "ur ry", + "ke es", + "spe c", + "ĠBarb ara", + "Ġbelong ing", + "Ġvill ain", + "ist ani", + "Ġaccount able", + "Ġport ions", + "ĠDe cl", + "U r", + "ĠK ate", + "g re", + "Ġmag azines", + "UC K", + "Ġregul ate", + "om on", + "ĠAl most", + "Ġover view", + "Ġsc ram", + "Ġl oot", + "ĠF itz", + "Ġcharacter istic", + "ĠSn ake", + "s ay", + "ĠR ico", + "Ġtra it", + "ĠJo ined", + "au cus", + "Ġadapt ation", + "ĠAirl ines", + "Ġarch ae", + "ĠI de", + "Ġb ikes", + "Ġliter ary", + "Ġinflu ences", + "ĠUs ed", + "C reat", + "Ġple a", + "ĠDef ence", + "ĠAss ass", + "Ġp ond", + "UL T", + ") \"", + "Ġeval uated", + "Ġob taining", + "Ġdem ographic", + "Ġvig il", + "ale y", + "Ġsp ouse", + "ĠSeah awks", + "resp ons", + "ĠB elt", + "um atic", + "Ġr ises", + "run ner", + "ĠMichel le", + "Ġpot ent", + "r ace", + "ĠP AC", + "F ind", + "olester ol", + "IS S", + "ĠIntrodu ced", + "ress es", + "ign ment", + "O s", + "ĠT u", + "ĠDe x", + "ic ides", + "Ġspark ed", + "ĠLaur a", + "ĠBry ant", + "Ġsm iling", + "ĠNex us", + "Ġdefend ants", + "ĠCat al", + "Ġdis hes", + "sh aped", + "Ġpro long", + "m t", + "( $", + "ãĢ Ĥ", + "Ġcalcul ations", + "ĠS ame", + "Ġp iv", + "H H", + "Ġcance lled", + "Ġgr in", + "Ġterrit ories", + "ist ically", + "C ome", + "ĠP arent", + "Pro ject", + "Ġneg lig", + "ĠPriv acy", + "Ġam mo", + "LE CT", + "olute ly", + "ĠEp ic", + "Ġmis under", + "w al", + "Apr il", + "m os", + "path y", + "ĠC arson", + "Ġalbum s", + "ĠE asy", + "Ġpist ol", + "< <", + "Ġ\\ (", + "t arget", + "hel p", + "Ġinter pre", + "cons cious", + "ĠH ousing", + "ĠJ oint", + "12 7", + "Ġbe ers", + "s cience", + "ĠFire fox", + "effect ive", + "ĠC abin", + "ĠO kay", + "ĠApp lic", + "Ġspace craft", + "ĠS R", + "ve t", + "ĠStr ange", + "S B", + "Ġcor ps", + "iber al", + "e fficient", + "Ġpreval ence", + "Ġeconom ists", + "11 8", + "Th read", + "ord able", + "OD E", + "ĠC ant", + "=- =-", + "if iable", + "ĠA round", + "Ġpo le", + "Ġwilling ness", + "CL A", + "ĠK id", + "Ġcomple ment", + "Ġsc attered", + "Ġin mates", + "Ġble eding", + "e very", + "Ġque ue", + "ĠTr ain", + "Ġh ij", + "Ġme lee", + "ple ted", + "Ġdig it", + "Ġg em", + "offic ial", + "Ġlif ting", + "Ð µ", + "Re qu", + "it utes", + "Ġpack aging", + "ĠWork ers", + "h ran", + "ĠLeban on", + "ol esc", + "Ġpun ished", + "ĠJ uan", + "Ġj am", + "ĠD ocument", + "Ġm apping", + "ic ates", + "Ġinev itably", + "Ġvan illa", + "ĠT on", + "Ġwat ches", + "Ġle agues", + "Ġiniti ated", + "deg ree", + "port ion", + "Ġrec alls", + "Ġru in", + "Ġm elt", + "I AN", + "Ġhe m", + "Ex p", + "Ġb aking", + "ĠCol omb", + "at ible", + "Ġrad ius", + "pl ug", + "ĠI F", + "et ically", + "Ġf ict", + "H ER", + "ĠT ap", + "atin um", + "Ġin k", + "Ġco h", + "ĠW izard", + "b oth", + "te x", + "Ġsp ends", + "ĠCurrent ly", + "ĠP it", + "Ġneur ons", + "ig nt", + "Ġr all", + "Ġbus es", + "b uilding", + "Ġadjust ments", + "Ġc ried", + "ibl ical", + "att ed", + "ĠZ ion", + "ĠM atter", + "Ġmed itation", + "ĠD ennis", + "Ġour s", + "ĠT ab", + "Ġrank ings", + "ort al", + "Ġad vers", + "Ġsur render", + "ĠG ob", + "ci um", + "om as", + "im eter", + "Ġmulti player", + "Ġhero in", + "Ġoptim istic", + "Ġindic ator", + "ĠBr ig", + "Ġgro cery", + "Ġapplic ant", + "ĠRock et", + "v id", + "Ex ception", + "p ent", + "Ġorgan izing", + "Ġenc ounters", + "ĠT OD", + "Ġjew el", + "S ave", + "ĠChrist ie", + "Ġhe ating", + "Ġl azy", + "ĠC P", + "Ġcous in", + "Con fig", + "Ġreg ener", + "Ġne arest", + "Ġachie ving", + "EN S", + "th row", + "ĠRich mond", + "ant le", + "200 2", + "Ġan ten", + "b ird", + "13 3", + "Ġn arc", + "r aint", + "un ny", + "ĠHispan ic", + "ourn aments", + "Ġprop he", + "ĠTh ailand", + "ĠT i", + "Ġinject ion", + "Ġinher it", + "rav is", + "Ġmed i", + "Ġwho ever", + "ĠDE BUG", + "G P", + "ĠH ud", + "C ard", + "p rom", + "Ġp or", + "Ġover head", + "L aw", + "Ġviol ate", + "Ġhe ated", + "Ġdescript ions", + "Ġachieve ments", + "ĠBe er", + "ĠQu ant", + "W as", + "Ġe ighth", + "ĠI v", + "Ġspecial ized", + "U PDATE", + "ĠD elta", + "P op", + "J ul", + "ĠAs k", + "oph y", + "Ġnews letters", + "ĠT ool", + "Ġg ard", + "ĠConf eder", + "ĠGM T", + "ĠAb bott", + "Ġimm unity", + "ĠV M", + "Is lam", + "Ġimpl icit", + "w d", + "Ġ19 44", + "rav ity", + "omet ric", + "Ġsurv iving", + "ur ai", + "ĠPr ison", + "Ġr ust", + "ĠSk etch", + "Ġbe es", + "ĠThe ory", + "Ġmer it", + "T ex", + "ch at", + "Ġm im", + "Ġpast e", + "ĠK och", + "Ġignor ance", + "ĠSh oot", + "Ġbas ement", + "Un ited", + "ĠAd vis", + "he ight", + "Ġf oster", + "Ġdet ain", + "in formation", + "Ġne ural", + "' ;", + "Ġprov es", + "all ery", + "Ġinv itation", + "um bers", + "Ġc attle", + "Ġbicy cle", + "z i", + "Ġconsult ant", + "Ġap ology", + "ĠT iger", + "Ġ12 3", + "99 9", + "Ġind ividually", + "r t", + "ig ion", + "ĠBrazil ian", + "Ġdist urb", + "Ġentreprene urs", + "Ġfore sts", + "cer pt", + "pl ates", + "p her", + "clip se", + "Ġtw itter", + "Ġac ids", + "ograph ical", + "h um", + "ĠB ald", + "if ully", + "Ġcomp iler", + "ĠD A", + "Ġdon or", + "as i", + "Ġtrib al", + "l ash", + "ĠCon fig", + "Ġapplic ants", + "Ġsal aries", + "13 5", + "Put in", + "ĠF ocus", + "ir s", + "Ġmisc onduct", + "ĠH az", + "Ġeat en", + "M obile", + "Mus lim", + "ĠMar cus", + "v iol", + "Ġfavor able", + "Ġst ub", + "ad in", + "ĠH ob", + "Ġfaith ful", + "Ġelectron ics", + "Ġvac uum", + "w ait", + "back ed", + "econom ic", + "d ist", + "Ġten ure", + "Ġsince re", + "ĠT ogether", + "ĠW ave", + "Ġprog ression", + "Ġden ying", + "Ġdist ress", + "br aska", + "th ird", + "Ġmix ing", + "Ġcolon ial", + "Ġpriv ately", + "Ġun rest", + "atern ity", + "Ġprem ises", + "ant i", + "greg ation", + "Ġlic ence", + "ĠH ind", + "ĠSam uel", + "Ġconvinc ing", + "ĠA ce", + "ĠR ust", + "ĠNet anyahu", + "Ġhand les", + "ĠP atch", + "orient ed", + "ah o", + "ĠG onz", + "Ġhack ers", + "claim er", + "Ġcustom s", + "ĠGr an", + "f ighters", + "Ġl uc", + "Ġman uscript", + "aren thood", + "Ġdev il", + "Ġwar riors", + "Ġoff enders", + "Will iam", + "Ġhol idays", + "Ġnight mare", + "Ġle ver", + "iff erent", + "St at", + "Ġexhib ition", + "put ed", + "ĠP ure", + "Ġal pha", + "Ġenthus iasm", + "ĠRepresent atives", + "E AR", + "ĠT yp", + "Ġwhe at", + "ĠAl f", + "Ġcor rection", + "Ġev angel", + "AT T", + "M iss", + "Ġs oup", + "Ġimpl ied", + "par am", + "Ġsex y", + "ĠL ux", + "Ġrep ublic", + "p atch", + "ab lish", + "Ġic ons", + "Ġfather s", + "ĠG ET", + "ĠCar ib", + "Ġregul ated", + "ĠCo hen", + "ĠBob by", + "Ġn er", + "Ġb ent", + "vent ory", + "ĠAl ong", + "ĠE ST", + "ĠWall ace", + "Ġmurd ers", + "r ise", + "ke ll", + "ĠCommon wealth", + "Ġn asty", + "et a", + "ĠM IT", + "Ġadminist ered", + "Ġgenuine ly", + "Ed itor", + "n ick", + "Ġhyd ro", + "**************** ****************", + "ĠB le", + "Ġfin es", + "Ġg orge", + "aus ible", + "r h", + "Ġapp le", + "ment ioned", + "Ġro pe", + "ot yp", + "H R", + "Ġdisappoint ing", + "Ġc age", + "n ik", + "Ġdoub ts", + "ĠF REE", + "print s", + "ĠM UST", + "Ġvend ors", + "ĠIn qu", + "Ġliber als", + "Ġcontract or", + "Ġup side", + "child ren", + "Ġtrick y", + "Ġregul ators", + "charg ed", + "l iter", + "Ġ ***", + "Ġreb ell", + "l ang", + "Ġloc als", + "Ġphys icians", + "Ġhe y", + "ar se", + "t m", + "ĠLe x", + "Ġbehavior al", + "success ful", + "F X", + "Ġbr ick", + "ov ic", + "Ġcon form", + "Ġreview ing", + "Ġins ights", + "Ġbi ology", + "ĠRem ove", + "ĠExt ra", + "Ġcomm itting", + "indu ced", + "ignt y", + "ig m", + "Ġat omic", + "Comm on", + "ĠE M", + "ĠP ere", + "ĠIt ems", + "e h", + "Ġpres erved", + "ĠH ood", + "Ġprison er", + "Ġbankrupt cy", + "Ġg ren", + "us hes", + "Ġexplo itation", + "Ġsign atures", + "Ġfin an", + "] ,\"", + "ĠM R", + "Ġme g", + "rem lin", + "Ġmusic ians", + "Ġselect ing", + "Ġexam ining", + "IN K", + "l ated", + "H i", + "Ġart ic", + "Ġp ets", + "Ġimp air", + "ĠM AN", + "Ġtable ts", + "in clude", + "R ange", + "Ġca ut", + "Ġlog s", + "Ġmount ing", + "Ġun aware", + "Ġdynam ics", + "ĠPalest ine", + "ĠQu arter", + "ĠPur ple", + "Ġm a", + "ĠIm port", + "Ġcollect ions", + "ci ation", + "Ġsuccess or", + "Ġcl one", + "Ġaim ing", + "Ġposs essed", + "Ġstick ing", + "Ġsh aking", + "Ġloc ate", + "ĠH ockey", + "T urn", + "17 0", + "Ġfif teen", + "ĠHar rison", + "Ġcontinu ously", + "ĠT C", + "ĠVal ent", + "ĠRes cue", + "Ġby pass", + "am ount", + "Ġm ast", + "Ġprotect s", + "Ġart istic", + "Ġsomet ime", + "Ġsh oe", + "Ġshout ed", + "ific ant", + "et itive", + "ĠReg ister", + "ĠJ in", + "Ġconcent rated", + "ling ton", + "on ies", + "Ġgener ator", + "yr im", + "ĠAr men", + "Ġclear ing", + "id o", + "ĠT W", + "al ph", + "Ġlad ies", + "H ard", + "Ġdial og", + "Ġinput s", + "æ ľ", + "Ġpos es", + "Ġsl ots", + "ĠPrem ium", + "Ġle aks", + "Ġboss es", + "Ġ11 3", + "c ourse", + "A cc", + "ĠNew ton", + "ĠAust ria", + "ĠM age", + "Ġte aches", + "ab ad", + "Ġwe ars", + "Ġc yl", + "Ġcur se", + "ĠS ales", + "ĠW ings", + "Ġp sy", + "Ġg aps", + "ĠIce land", + "ĠP interest", + "Ġland lord", + "Ġdefin itions", + "ĠK er", + "Ġsufficient ly", + "ĠP ence", + "ĠArch itect", + "Ġsur pass", + "Ġ11 4", + "Ġsuper hero", + "ĠDise ase", + "Ġpri ests", + "ĠC ulture", + "Ġdefin itive", + "Ġsecret ly", + "ĠD ance", + "inst all", + "ch ief", + "ĠJess ica", + "W ould", + "Up dated", + "Ġlock er", + "ĠK ay", + "Ġmem orial", + "è ¦", + "f at", + "Ġdis gu", + "Ġflav ors", + "ĠBase ball", + "ĠRes istance", + "Ġk icks", + "Ġen v", + "Ġteen agers", + "D ark", + "ĠC AR", + "Ġh alt", + "ĠL G", + "ĠGab riel", + "Ġfe ver", + "Ġs atur", + "Ġm all", + "Ġaffili ate", + "ĠS leep", + "ĠSpe cific", + "ĠV el", + "Ġj ar", + "ĠSac red", + "ĠEd wards", + "ĠA CL", + "Ġret ained", + "ĠG iant", + "Ġlim itation", + "in ces", + "Ġref usal", + "ĠT ale", + "ĠBut ler", + "Ġacc idents", + "ĠC SS", + "Ġimport ed", + "ĠCop y", + "Î ±", + "ER T", + "z el", + "Ġdiv isions", + "h ots", + "ĠAl b", + "ĠD S", + "Load er", + "W ashington", + "at isf", + "ĠCreat ive", + "\\ .", + "ĠAut om", + "red ict", + "Ġrecept or", + "ĠCarl os", + "Met hod", + "ok a", + "Ġmal icious", + "Ġste pping", + ", [", + "ĠD ad", + "Ġatt raction", + "ĠEffect s", + "ĠPir ate", + "ĠC er", + "ĠIndust ry", + "ĠR ud", + "Ġchar ter", + "Ġd ining", + "Ġins ists", + "Ġconfig ure", + "Ġ( #", + "ĠSim ple", + "ĠSc roll", + "UT C", + "17 5", + "ĠK on", + "Ġmarket place", + "Ġ ãĤ", + "Ġref res", + "Ġg ates", + "er red", + "ĠP od", + "Ġbeh ave", + "Fr ank", + "n ode", + "Ġendors ed", + "he tt", + "as ive", + "ĠHom eland", + "Ġr ides", + "ĠLe ave", + "er ness", + "Ġflood ing", + "A FP", + "Ġris en", + "Ġcontin ually", + "Ġun anim", + "ĠCont ract", + "ĠP as", + "Ġgu ided", + "ĠCh ile", + "b d", + "Ġsu cc", + "pt ic", + "Ġcomm ittees", + "ĠL uther", + "ĠAny one", + "Ġs ab", + "12 4", + "Ġp ixel", + "ĠB ak", + "ĠT ag", + "ĠBenn ett", + "En ter", + "sm all", + "ĠPresident ial", + "Ġp ul", + "Ġcontr ace", + "arch ive", + "Ġcoast al", + "ĠK ids", + "19 2", + "âĢ ²", + "ick y", + "ING TON", + "Ġw olf", + "ĠSt alin", + "T ur", + "id get", + "am as", + "ĠUn less", + "Ġspons or", + "Ġmor ph", + "ĠCho ose", + "Ġrun ner", + "Ġun bel", + "Ġm ud", + "ĠMan a", + "Ġdub bed", + "Ġg odd", + "ure rs", + "wind ow", + "Ġrel ied", + "Ġcelebr ating", + "os c", + "Ġ13 5", + "Ġlobb ying", + "Ġincom plete", + "Ġrestrict ion", + "Ġinc ap", + "it us", + "Ġexpect ation", + "ĠAp ollo", + "Ġint ens", + "Ġsyn c", + "G H", + "Ġmanip ulation", + "B Y", + "Ġspe ar", + "Ġbre asts", + "Ġvol can", + "il ia", + "M aterial", + "Ġform ats", + "ĠB ast", + "Ġparliament ary", + "Ġsn ake", + "Ġserv ants", + "ĠTr udeau", + "ĠGr im", + "ĠArab ic", + "ĠSC P", + "ĠBoy s", + "st ation", + "Ġprospect ive", + "ord e", + "in itialized", + "Ġb ored", + "AB LE", + "Ġaccess ed", + "Ġtax i", + "ĠShe ll", + "aid en", + "urs ed", + "in ates", + "ĠIns urance", + "ĠPet e", + "Sept ember", + "6 50", + "Ġad ventures", + "ĠCo ver", + "Ġt ribute", + "Ġsk etch", + "Ġem power", + "Ġ Ø", + "ĠGl enn", + "ĠD aw", + "= \\\"", + "ĠPolit ics", + "Ġgu ides", + "Ġd ioxide", + "ĠG ore", + "ĠBr ight", + "ĠS ierra", + "Ġval ued", + "c ond", + "Ġpo inter", + "Se lect", + "Ġrisk y", + "Ġabsor b", + "im ages", + "Ġref uses", + "Ġbon uses", + "__ _", + "Ġh ilar", + "ĠF eatures", + "2 20", + "ĠCollect or", + "F oot", + "Ġ19 64", + "cul us", + "Ġd awn", + "Ġwork out", + "ĠL O", + "Ġphilosoph ical", + "ĠSand y", + "ĠYou th", + "Ġl iable", + "A f", + "bl ue", + "Ġovert urn", + "less ness", + "ĠTrib une", + "ĠIn g", + "Ġfact ories", + "Ġcat ches", + "Ġpr one", + "Ġmat rix", + "Ġlog in", + "Ġin acc", + "Ġex ert", + "s ys", + "Ġneed le", + "ĠQ ur", + "Ġnot ified", + "ould er", + "t x", + "Ġremind s", + "Ġpublisher s", + "Ġn ort", + "Ġg it", + "Ġfl ies", + "ĠEm ily", + "Ġflow ing", + "ĠAl ien", + "ĠStr ateg", + "Ġhard est", + "Ġmod ification", + "AP I", + "ĠM Y", + "Ġcr ashes", + "st airs", + "n umber", + "Ġur ging", + "ch annel", + "ĠFal con", + "Ġinhabit ants", + "Ġterr ifying", + "Ġutil ize", + "Ġban ner", + "Ġcig arettes", + "Ġsens es", + "ĠHol mes", + "Ġpract ition", + "ĠPhill ips", + "ott o", + "Ġcomp ile", + "Mod el", + "ĠK o", + "Ġ[ ]", + "Americ ans", + "ĠTer ms", + "Ġmed ications", + "ĠAn a", + "Ġfundament ally", + "ĠNot ice", + "Ġwe aker", + "Ġ 0000", + "Ġgar lic", + "Ġout break", + "Ġeconom ist", + "ĠB irth", + "Ġobst acles", + "ar cer", + "ĠOr thodox", + "Ġplace bo", + "ĠC rew", + "asp berry", + "ĠAng els", + "Ġdis charge", + "Ġdestruct ive", + "11 7", + "ĠR ising", + "Ġd airy", + "l ate", + "Ġcoll ision", + "ĠTig ers", + "ean or", + "ocument ed", + "ĠIn valid", + "Ġd ont", + "ĠL iter", + "ĠV a", + "Ġhyd rogen", + "Ġvari ants", + "ĠBrown s", + "Ġ19 65", + "Ġind igenous", + "Ġtrad es", + "Ġremain der", + "Ġswe pt", + "ĠImp act", + "Ġred ist", + "Ġun int", + "grad uate", + "ãĥ ķ", + "ĠW ILL", + "ãģ® ç", + "ĠCrit ical", + "Ġf isher", + "Ġv icious", + "Ġrevers ed", + "Y ear", + "ĠS ox", + "Ġshoot ings", + "Ġfil ming", + "Ġtouchdown s", + "ai res", + "m el", + "Ġgrand father", + "Ġaffect ion", + "ing le", + "Ġover ly", + "Add itional", + "Ġsup reme", + "ĠGr ad", + "Ġsport ing", + "Ġmer cy", + "ĠBrook s", + "ount y", + "Ġperform s", + "Ġtight ly", + "Ġdem ons", + "Ġkill ings", + "Ġfact ion", + "ĠNov a", + "aut s", + "Ġund oubtedly", + "ar in", + "Ġunder way", + "ra k", + "Ġl iv", + "ĠReg ion", + "Ġbrief ing", + "s ers", + "cl oud", + "ĠM ik", + "us p", + "Ġpred iction", + "az or", + "Ġport able", + "ĠG and", + "Ġpresent ing", + "Ġ10 80", + " »", + "ush i", + "ĠSp ark", + "there um", + "Ġjust ification", + "ĠN y", + "Ġcontract ors", + "ming ham", + "ĠSt yle", + "å ħ", + "ĠChron icles", + "ĠPict ure", + "Ġprov ing", + "Ġw ives", + "set t", + "Ġmole cules", + "ĠFair y", + "Ġconsist ing", + "Ġp ier", + "al one", + "in ition", + "Ġn ucle", + "j son", + "Ġg otta", + "Ġmob il", + "Ġver bal", + "ar ium", + "Ġmon ument", + "uck ed", + "Ġ25 6", + "T ech", + "mine craft", + "ĠTr ack", + "Ġt ile", + "Ġcompat ibility", + "as is", + "Ġs add", + "Ġinstruct ed", + "ĠM ueller", + "Ġle thal", + "Ġhorm one", + "Ġor che", + "el se", + "Ġske let", + "Ġentert aining", + "Ġminim ize", + "ag ain", + "Ġunder go", + "Ġconst raints", + "Ġcig arette", + "ĠIslam ist", + "Ġtravel s", + "ĠPant hers", + "l ings", + "C are", + "Ġlaw suits", + "ur as", + "Ġcry st", + "Ġlow ered", + "Ġaer ial", + "Ġcomb inations", + "Ġha un", + "Ġch a", + "Ġv ine", + "Ġquant ities", + "Ġlink ing", + "b ank", + "Ġso y", + "B ill", + "ĠAngel a", + "Ġrecip ient", + "ĠProt est", + "Ġs ocket", + "Ġsolid arity", + "Ġâ Ĩ", + "m ill", + "Ġvar ies", + "ĠPak istani", + "Dr agon", + "Ġun e", + "Ġhor izon", + "³³³³ ³³³³", + "Ġprov inces", + "Ġfrank ly", + "Ġenact ed", + "not es", + "[ '", + "Ġ19 2", + "ocr acy", + "Ġendorse ment", + "Ġover time", + "Tr ue", + "L ab", + "lic ted", + "ĠD NC", + "Ġbe ats", + "ĠJam ie", + "15 2", + "ĠIN T", + "Cont act", + "Ġaccount ed", + "h ash", + "ĠPack ers", + "p ires", + "Ġles bian", + "Ġamend ments", + "Ġhop eful", + "ĠFin land", + "Ġspot light", + "Ġconfig ured", + "Ġtrou bled", + "Ġg aze", + "ĠCal gary", + "Ġrel iability", + "Ġins urg", + "sw er", + "b uy", + "ĠSk in", + "Ġp ixels", + "Ġhand gun", + "Ġpar as", + "Ġcateg or", + "ĠE L", + "ĠRe x", + "Ind eed", + "Ġkind a", + "Ġconj unction", + "ĠBry an", + "ĠMan ufact", + "y ang", + "Pl us", + "S QL", + "ish ment", + "Ġdom inate", + "Ġn ail", + "Ġo ath", + "Ġeru pt", + "ĠF ine", + "it bart", + "ĠCh ip", + "ĠAb d", + "ĠN am", + "Ġbuy er", + "Ġdiss ent", + "Le aks", + "Cont in", + "Ġr ider", + "ĠSome one", + "Ġill usion", + "c in", + "ĠBoe ing", + "Ġin adequ", + "ov ation", + "i ants", + "Ġreb uild", + "4 50", + "ĠDest iny", + "S W", + "ĠT ill", + "H it", + "ia z", + "ĠBang l", + "acher s", + "ĠRe form", + "Ġse gments", + "Ġsystem atic", + "d c", + "ĠConserv atives", + "Ġport al", + "h or", + "ĠDragon bound", + "Ġdrag ged", + "om o", + "Ġthe e", + "ad vert", + "ĠRep orts", + "ĠE t", + "Ġbarrel s", + "Aug ust", + "Ġcompar isons", + "Ġhe x", + "Ġan throp", + "\" [", + "bor ough", + "ab i", + "Ġpict ured", + "play ing", + "ĠAdd ress", + "ĠMir ror", + "Sm ith", + "Ġt ires", + "ĠN PR", + "AA AA", + "Ġclass ification", + "ĠTh an", + "ĠH arm", + "ĠR A", + "Ġreject ion", + "min ation", + "Ġr anged", + "ĠF alls", + "D I", + "H ost", + "ãĤ ´", + "ĠEx ample", + "list ed", + "th irds", + "Ġsaf egu", + "br and", + "Ġprob able", + "Can ada", + "IT ION", + "ĠQ aeda", + "Ġch ick", + "Ġimport s", + "h it", + "l oc", + "W W", + "Ġble w", + "Ġany time", + "Ġwh oles", + "ik ed", + "Ġcal culation", + "cre ate", + "ĠO ri", + "Ġupgr aded", + "Ġapp ar", + "ut ory", + "ĠM ol", + "B rit", + "ĠJ ong", + "IN AL", + "ĠStart ing", + "Ġd ice", + "urt le", + "Ġre lying", + "cl osure", + "Ġprof itable", + "Ġsl aughter", + "ĠMan ual", + "c aster", + "Ġ\" $", + "Ġfe ather", + "ĠSim ply", + "ie ves", + "Ġdeter ior", + "ĠPC I", + "Ġst amp", + "Ġfl aws", + "Ġsh ade", + "ham mer", + "Ġpass port", + "Ġcont ing", + "am el", + "Ġobser vers", + "Ġneg lect", + "ĠR B", + "ĠBrother hood", + "Ġskept ical", + "f amily", + "us k", + "Ġemotion ally", + "â Ļ", + "ĠBet a", + "ason able", + "id ity", + "ĠM ul", + "Ġkick ing", + "ĠC arm", + "oll ah", + "VERT IS", + "ĠAt hen", + "Ġlad der", + "ĠBul let", + "å £", + "00 01", + "ĠWild life", + "ĠM ask", + "ĠN an", + "R ev", + "Ġun acceptable", + "leg al", + "Ġcrowd ed", + "ag i", + "ĠC ox", + "j e", + "Ġmor ality", + "Ġfu els", + "Ġc ables", + "Ġman kind", + "ĠCarib bean", + "Ġanch or", + "Ġby te", + "ĠO ften", + "ĠO z", + "Ġcraft ed", + "Ġhistor ian", + "ĠW u", + "Ġtow ers", + "ĠCitiz ens", + "Ġhel m", + "Ġcred entials", + "Ġsing ular", + "ĠJes se", + "Ġtack les", + "Ġcont empt", + "Ġa fore", + "ĠSh adows", + "Ġn il", + "Ġur gent", + "app le", + "bl ood", + "Ġv on", + "Ġoff line", + "Ġbreat he", + "Ġj umps", + "Ġirre levant", + "ox ic", + "om al", + "import ant", + "J im", + "Ġgl oves", + "arm ing", + "dep th", + "Ġtal ents", + "ook ie", + "ĠS B", + "Ġpal m", + "uff s", + "est a", + "IG H", + "Ġcan on", + "ĠVer izon", + "ĠP le", + "Ġcou pled", + "vel t", + "Ġfundra ising", + "ĠGet ting", + "ĠD LC", + "Ġmathemat ical", + "ĠH S", + "ĠCard inals", + "te lling", + "Ġspons ors", + "Ġ Ï", + "ĠBull s", + "op tion", + "Ġprop ose", + "Ġmem orable", + "Ġembr aced", + "Ġdecl ining", + "He alth", + "ed a", + "Ġ} ;", + "Ġsp am", + "m ile", + "Ġpit cher", + "ĠE ight", + "Ġcar ing", + "ut ic", + "ro le", + "Ġair line", + "ernand ez", + "ĠAth let", + "Ġcert ification", + "ux e", + "rig er", + "Ġem pir", + "Ġsens ation", + "Ġdis m", + "Ġb olt", + "Ġev olve", + "H ouse", + "Ġconsult ation", + "ĠD uty", + "Ġtou ches", + "ĠN athan", + "Ġf aint", + "h ad", + "\" (", + "ĠCons umer", + "ĠExt reme", + "Ġ12 7", + "ĠHer m", + "ĠSac rament", + "iz oph", + "Ġanx ious", + "ul ously", + "Ġsoc ially", + "ĠU TC", + "Ġsol ving", + "ĠLet ter", + "Hist ory", + "ed uc", + "Pr ice", + ") );", + "Ġrel oad", + "am ic", + "Ġp ork", + "Ġdisc ourse", + "Ġt ournaments", + "ai ro", + "ĠK ur", + "ĠCost a", + "Ġviol ating", + "Ġinterf ere", + "Ġrecre ational", + "uff le", + "Ġspe eches", + "Ġneed ing", + "Ġremem bers", + "Ġcred ited", + "n ia", + "f ocused", + "amer a", + "Ġb ru", + "um bs", + "ĠCub an", + "Ġpreced ing", + "Ġnons ense", + "ac ial", + "Ġsmart phones", + "ĠSt ories", + "S ports", + "ĠEmer gency", + "oun cing", + "ef ined", + "Ġb er", + "Ġconsult ing", + "Ġm asters", + "he astern", + ".\" [", + "ĠRun ning", + "Ġsus cept", + "ĠF eng", + "Americ a", + "pr ises", + "st itial", + "ĠWeek ly", + "ĠGreat er", + "mod ules", + "if ter", + "G raphics", + "ul er", + "Ġwho lly", + "Ġsupp ress", + "Ġconce aled", + "Ġhapp ily", + "Ġaccept s", + "ĠEn joy", + "Ġr ivers", + "ĠEx cept", + "2 25", + "ĠN HS", + "ĠMc Connell", + "Ġp ussy", + "fer red", + "ut able", + "Ġatt ain", + "Ġ> =", + "Ġdepos its", + "roph ic", + "Ġnot orious", + "ĠSh aw", + "il itation", + "Ġepid emic", + "all ic", + "Ġsmall est", + "ov ich", + "Ġaccess ories", + "per ties", + "Ġsur plus", + "ĠMe ch", + "Ġamb ig", + "ĠImm igration", + "Ġch im", + "ev al", + "Ġpract icing", + "ĠMyster y", + "Ġdom ains", + "ĠSil icon", + "app s", + "Ġkilomet ers", + "e a", + "ĠSm ash", + "Ġwarrant y", + "Ġn ost", + "s il", + "re v", + "J on", + "ĠDub lin", + "Ġtast es", + "Ġb out", + "g reat", + "er ror", + "Ġsw itches", + "ĠB apt", + "D O", + "ok i", + "Ġsour ced", + "pro du", + "Ġattach ment", + "ĠIss ue", + "ĠQuest ion", + "Jo in", + "Ġf itted", + "Ġunlaw ful", + "^ ^", + "ere k", + "Ġauthent ication", + "Ġst ole", + "Ġaccount ability", + "l abel", + "S earch", + "Ġal beit", + "atic an", + "fund ed", + "ĠAdd ing", + "ĠI Q", + "Ġsub mar", + "l it", + "a que", + "ĠLear ning", + "Ġint eger", + "M aster", + "ĠCh rom", + "Ġprem ier", + "O p", + "ĠLi u", + "Ġbl essed", + "ĠGl obe", + "ĠResp onse", + "Ġlegit im", + "ĠMer kel", + "Ġdispos al", + " ´", + "Ġgau ge", + "pe at", + "Ġindu ced", + "Ġquestion able", + "arth y", + "ĠV it", + "ĠF eed", + "U ntil", + "U t", + "worth y", + "R Y", + "ĠH erald", + "ĠHam mer", + "Ġmed al", + "ĠR ivers", + "ĠH ack", + "Ġclar ify", + "Ġtrack ed", + "Ġautonom ous", + "Ġten ant", + "ĠQ atar", + "er ie", + "Ġgr im", + "ĠMon itor", + "Ġresist ant", + "ĠSpe c", + "ĠWell s", + "N AS", + "14 8", + "Ġmin ers", + "iot ics", + "Ġmiss es", + "11 6", + "g ian", + "g it", + "ĠE yes", + "p res", + "Ġgrad uated", + "Ġang el", + "Ġsyn chron", + "Ġefficient ly", + "Ġtrans mitted", + "H arry", + "Ġglob ally", + "EN CE", + "ĠMont ana", + "r aged", + "ĠPre vention", + "Ġp iss", + "ĠL l", + "Ġshe lf", + "ĠB JP", + "ĠTest ament", + "ĠL ate", + "ik er", + "ĠH app", + "ĠJul ian", + "h all", + "Ġsp ont", + "Ġshut down", + "Ġincons istent", + "Ġsubscrib ers", + "Ġske leton", + "ĠNe braska", + "Ġins pire", + "ĠV oid", + "F eed", + "Ġang les", + "ĠSpr ings", + "Ġbench mark", + "Ġvacc ines", + "izoph ren", + "se xual", + "uff ed", + "Ġsh ine", + "ĠK ath", + "Ġgest ure", + "ine a", + "Ġr ip", + "Ġopp ression", + "Ġcons cience", + "b t", + "ĠL um", + "Ġinc idence", + "ĠF a", + "w r", + "Ġmin eral", + "ĠSp urs", + "alk y", + "Ġth under", + "Ġop io", + "Be ing", + "ĠPal m", + "Ġwas ted", + "Ġl b", + "i aries", + "ĠIniti ative", + "Ġcur ric", + "Ġmark er", + "ĠMc L", + "Ġext ensions", + "ĠP v", + "ĠAr ms", + "Ġoffer ings", + "Ġdef enses", + "Ġvend or", + "Ġcontrad ict", + "ĠCol in", + "Ġredd it", + "Ġper ipher", + "12 2", + "Ġs ins", + "E dit", + "IC T", + "So ft", + "ĠSh ah", + "Ġadministr ator", + "ĠT rip", + "Ġporn ography", + "Ġtu ition", + "in ence", + "ĠPro gress", + "Ġcat alog", + "Ġsu ite", + "Ġh ike", + "Ġreprodu ctive", + "eng ine", + "Ġd rought", + "ĠNo ah", + "Ġ2 30", + "Ġd ude", + "Ġrelax ed", + "Ġpart ition", + "Ġparticip ant", + "Ġtel esc", + "Ġfe as", + "ĠF F", + "own er", + "Ġswe eping", + "Ġl enses", + "Ġmatch up", + "ĠRe pl", + "ourn als", + "Ġcred ible", + "Ġgrand mother", + "Ġther mal", + "Ġsubscrib ing", + "Ġident ities", + "col m", + "U CT", + "Ġreluct ant", + "us ers", + "ĠC ort", + "Ġassist ed", + "OS S", + "ATION S", + "IS H", + "Ġpharm aceutical", + "ic able", + "ad ian", + "ĠSon ic", + "ĠF ury", + "ĠM ong", + "A H", + "ĠPsych ology", + "Ġph osph", + "Ġtreat s", + "Ń Ķ", + "Ġstead ily", + "ĠHell o", + "Ġrel ates", + "Ġcl ue", + "Ex pl", + "a uth", + "Ġrev ision", + "Ġe ld", + "os ion", + "Ġbr on", + "14 4", + "ri kes", + "Ġmin es", + "Ġblank et", + "ĠF ail", + "el ed", + "ĠIm agine", + "ĠPl anned", + "a ic", + "Re quest", + "M ad", + "ĠHor se", + "ĠEag le", + "Ġcap ac", + "15 7", + "Ġl ing", + "ĠN ice", + "ĠP arenthood", + "min ster", + "og s", + "ens itive", + "Not hing", + "Ġcar n", + "F in", + "ĠP E", + "Ġr ifles", + "ĠL P", + "S and", + "Ġgui Active", + "Ġtour ist", + "C NN", + "Ġunve iled", + "Ġpredec essor", + "} {", + "u ber", + "Ġoff shore", + "Ġopt ical", + "ĠR ot", + "ĠPear l", + "et on", + "Ġst ared", + "Ġfart her", + "at ility", + "cont in", + "ĠG y", + "ĠF oster", + "ĠC oc", + "ri ents", + "Ġdesign ing", + "ĠEconom y", + "ON G", + "W omen", + "ĠN ancy", + "er ver", + "Ġmas cul", + "Ġcasual ties", + "Ġ2 25", + "ĠS ullivan", + "ĠCh oice", + "Ġa ster", + "w s", + "Ġhot els", + "Ġconsider ations", + "Ġcou ch", + "ĠSt rip", + "ĠG n", + "Ġmanip ulate", + "l ied", + "Ġsynt hetic", + "Ġassault ed", + "Ġoff enses", + "ĠDra ke", + "Ġim pe", + "Oct ober", + "ĠHer itage", + "h l", + "ĠBl air", + "Un like", + "Ġg rief", + "Ġ4 50", + "Ġopt ed", + "Ġresign ation", + "il o", + "Ġver se", + "ĠT omb", + "Ġu pt", + "Ġa ired", + "ĠH ook", + "ĠML B", + "Ġassum es", + "out ed", + "ĠV ers", + "Ġinfer ior", + "Ġbund le", + "ĠD NS", + "ograp her", + "Ġmult ip", + "ĠSoul s", + "Ġillust rated", + "Ġtact ic", + "Ġdress ing", + "Ġdu o", + "Con f", + "Ġrel ent", + "Ġc ant", + "Ġscar ce", + "Ġcand y", + "ĠC F", + "Ġaffili ated", + "Ġspr int", + "yl an", + "ĠGarc ia", + "Ġj unk", + "Pr int", + "ex ec", + "C rit", + "Ġport rait", + "ir ies", + "ĠOF F", + "Ġdisp utes", + "W R", + "L ove", + "ãģ Ħ", + "ĠRe yn", + "Ġh ipp", + "op ath", + "Ġflo ors", + "ĠFe el", + "Ġwor ries", + "Ġsett lements", + "ĠP os", + "Ġmos que", + "Ġfin als", + "Ġcr ushed", + "ĠPro bably", + "ĠB ot", + "ĠM ans", + "ĠPer iod", + "Ġsovere ignty", + "Ġsell er", + "Ġap ost", + "Ġam ateur", + "Ġd orm", + "Ġconsum ing", + "Ġarm our", + "ĠRo ose", + "Ġint ensive", + "Ġelim inating", + "ĠSun ni", + "ĠAle ppo", + "j in", + "Ġadv ise", + "p al", + "ĠH alo", + "Ġdes cent", + "Ġsimpl er", + "Ġbo oth", + "ST R", + "L ater", + "ĠC ave", + "== =", + "Ġm ol", + "Ġf ist", + "Ġshot gun", + "su pp", + "Ġrob bery", + "E ffect", + "Ġobsc ure", + "ĠProf essional", + "Ġemb assy", + "Ġmilit ant", + "Ġinc arcer", + "Ġgener ates", + "Ġlaun ches", + "Ġadministr ators", + "Ġsh aft", + "Ġcirc ular", + "Ġfresh man", + "ĠW es", + "ĠJo el", + "ĠD rew", + "ĠDun can", + "ĠApp arently", + "s ight", + "ĠIntern al", + "ĠInd ividual", + "ĠF E", + "Ġb ore", + "ĠM t", + "Ġbroad ly", + "ĠO ptions", + "ount ain", + "ip es", + "ĠV ideos", + "20 4", + "Ġh ills", + "Ġsim ulation", + "Ġdisappoint ment", + "it an", + "ĠLabor atory", + "Ġup ward", + "Ġbound ary", + "Ġdark er", + "h art", + "Ġdomin ance", + "C ong", + "ĠOr acle", + "ĠL ords", + "Ġscholars hip", + "ĠVin cent", + "ed e", + "ĠR ah", + "Ġencour ages", + "ro v", + "Ġqu o", + "Ġprem ise", + "ĠCris is", + "ĠHol ocaust", + "Ġrhyth m", + "Ġmet ric", + "cl ub", + "Ġtransport ed", + "Ġn od", + "ĠP ist", + "Ġancest ors", + "ĠFred er", + "th umbnails", + "ĠC E", + "ON D", + "Ph il", + "ven ge", + "ĠProduct s", + "cast le", + "Ġqual ifying", + "ĠK aren", + "VERTIS EMENT", + "Ġmight y", + "Ġexplan ations", + "Ġfix ing", + "D i", + "Ġdecl aring", + "Ġanonym ity", + "Ġju ven", + "ĠN ord", + "ĠDo om", + "ĠAct ually", + "O k", + "ph is", + "ĠDes ert", + "Ġ11 6", + "I K", + "ĠF M", + "Ġinc omes", + "V EL", + "ok ers", + "Ġpe cul", + "Ġlight weight", + "g ue", + "Ġacc ent", + "Ġincre ment", + "ĠCh an", + "Ġcompl aining", + "ĠB aghd", + "Ġmidfield er", + "Ġover haul", + "Pro cess", + "ĠH ollow", + "ĠTit ans", + "Sm all", + "man uel", + "ĠUn ity", + "ĠEv ents", + "S ty", + "Ġdispro portion", + "n esty", + "en es", + "ĠC od", + "Ġdemonstr ations", + "ĠCrim son", + "ĠO H", + "Ġen rolled", + "Ġc el", + "ĠBre tt", + "Ġa ide", + "Ġhe els", + "Ġbroad band", + "Ġmark ing", + "Ġw izard", + "ĠN J", + "ĠChief s", + "Ġingred ient", + "Ġd ug", + "ĠSh ut", + "urch ase", + "end or", + "Ġfar mer", + "ĠGold man", + "12 9", + "15 5", + "Or der", + "Ġl ion", + "i ably", + "Ġst ain", + "ar ray", + "ilit ary", + "ĠFA Q", + "Ġexpl oded", + "ĠMcC arthy", + "ĠT weet", + "ĠG reens", + "ek ing", + "l n", + "ens en", + "Ġmotor cycle", + "Ġpartic le", + "Ġch olesterol", + "B ron", + "Ġst air", + "Ġox id", + "Ġdes irable", + "ib les", + "Ġthe or", + "for cing", + "Ġpromot ional", + "ov o", + "b oot", + "ĠBon us", + "raw ling", + "Ġshort age", + "ĠP sy", + "Ġrecru ited", + "Ġinf ants", + "Ġtest osterone", + "Ġded uct", + "Ġdistinct ive", + "Ġfirm ware", + "bu ilt", + "14 5", + "Ġexpl ored", + "Ġfact ions", + "Ġv ide", + "Ġtatt oo", + "Ġfinan cially", + "Ġfat igue", + "Ġproceed ing", + "const itutional", + "Ġmis er", + "Ġch airs", + "gg ing", + "ipp le", + "Ġd ent", + "Ġdis reg", + "ç Ķ", + "st ant", + "ll o", + "b ps", + "aken ing", + "Ġab normal", + "ĠE RA", + "å£ «", + "ĠH BO", + "ĠM AR", + "Ġcon cess", + "Ġserv ant", + "Ġas pir", + "l av", + "ĠPan el", + "am o", + "Ġprec ip", + "Ġrecord ings", + "Ġproceed ed", + "Ġcol ony", + "ĠT ang", + "ab lo", + "Ġstri pped", + "Le ft", + "to o", + "Ġpot atoes", + "Ġfin est", + "% ).", + "Ġc rap", + "ĠZ ach", + "ab ases", + "ĠG oth", + "Ġbillion aire", + "w olf", + "Ġsan ction", + "S K", + "Ġlog ged", + "P o", + "ey ed", + "un al", + "Ġcr icket", + "Ġarm ies", + "Ġunc overed", + "Cl oud", + "ó n", + "Ġreb ounds", + "Ġm es", + "O per", + "P ac", + "Ġnation ally", + "Ġinsert ed", + "p ict", + "Ġgovern ance", + "Ð ¸", + "Ġprivile ges", + "G ET", + "Ġfavor ites", + "im ity", + "Ġlo ver", + "the m", + "em pl", + "Ġgorge ous", + "An n", + "Ġsl ipped", + "Ġve to", + "B ob", + "Ġsl im", + "u cc", + "ĠF ame", + "udden ly", + "Ġden ies", + "ĠM aur", + "Ġdist ances", + "Ġw anna", + "t ar", + "ĠS ER", + "Ġâ Ī", + "Ġle mon", + "at hetic", + "Ġlit eral", + "Ġdistingu ished", + "Ġansw ering", + "G I", + "Ġrelig ions", + "ĠPhil os", + "ĠL ay", + "Ġcomp os", + "ire ments", + "ĠK os", + "ine z", + "roll ing", + "Ġyoung est", + "and ise", + "ĠB orn", + "Ġalt ar", + "am ina", + "ĠB oot", + "v oc", + "Ġdig ging", + "Ġpress ures", + "Ġl en", + "26 4", + "Ġassass ination", + "ĠBir mingham", + "ĠMy th", + "Ġsovere ign", + "ĠArt ist", + "ĠPhot ograph", + "Ġdep icted", + "Ġdisp ens", + "orth y", + "Ġamb ul", + "int eg", + "ĠC ele", + "ĠTib et", + "Ġhier archy", + "Ġc u", + "Ġpre season", + "ĠPet erson", + "Ġcol ours", + "Ġworry ing", + "Ġback ers", + "ĠPal mer", + "ĠÎ ¼", + "Ġcontribut or", + "Ġhear ings", + "Ġur ine", + "Ġ Ù", + "ourge ois", + "Sim ilar", + "ĠZ immer", + "s omething", + "ĠUS C", + "Ġstrength s", + "ĠF I", + "Ġlog ging", + "As ked", + "ĠTh ai", + "in qu", + "ĠW alt", + "Ġcrew s", + "it ism", + "3 01", + "Ġshar ply", + "um ed", + "Ġred irect", + "r ators", + "In f", + "ĠWe apons", + "Ġte asp", + "19 99", + "L ive", + "ĠEs pecially", + "ĠS ter", + "ĠVeter ans", + "Ġint ro", + "other apy", + "Ġmal ware", + "Ġbre eding", + "Ġmole cular", + "ĠR oute", + "ĠCom ment", + "oc hem", + "Ġa in", + "Se ason", + "Ġlineback er", + "Ä «", + "ĠEconom ics", + "es ar", + "ĠL ives", + "ĠEm ma", + "Ġk in", + "ĠTer rit", + "Ġpl anted", + "ot on", + "ĠBut ter", + "ĠSp ons", + "P ER", + "Ġdun geon", + "Ġsymb olic", + "Ġfil med", + "Ġdi ets", + "Ġconclud es", + "Ġcertain ty", + "ĠForm at", + "Ġstr angers", + "form at", + "ĠPh ase", + "Ġcop ied", + "Ġmet res", + "ld a", + "ĠUs ers", + "Ġdeliber ate", + "Ġwas hed", + "ĠL ance", + "im ation", + "Ġimpro per", + "ĠGen esis", + "ick r", + "ĠK ush", + "Ġreal ise", + "Ġembarrass ing", + "alk ing", + "b ucks", + "Ġver ified", + "Ġout line", + "year s", + "ĠIn come", + "20 2", + "Ġz ombies", + "F inal", + "ĠMill enn", + "Ġmod ifications", + "ĠV ision", + "ĠM oses", + "ver b", + "iter ranean", + "ĠJ et", + "Ġnav al", + "ĠA gg", + "Ġur l", + "Ġvict ories", + "Ġnon etheless", + "Ġinj ust", + "ĠF act", + "ç ļ", + "Ġins ufficient", + "re view", + "face book", + "Ġnegoti ating", + "Ġguarant ees", + "im en", + "uten berg", + "Ġg ambling", + "Ġcon gr", + "Load ing", + "Ġnever theless", + "Ġpres idents", + "ĠIndust rial", + "Ġ11 8", + "Ġp oured", + "ĠT ory", + "Ġ17 5", + "Ġ: =", + "Sc ott", + "ange red", + "T ok", + "Ġorgan izers", + "M at", + "ĠG rowth", + "Ġad ul", + "Ġens ures", + "Ġ11 7", + "é¾į å", + "Ġmass acre", + "Ġgr ades", + "be fore", + "AD VERTISEMENT", + "ĠSl ow", + "ĠM MA", + "âĢĶ \"", + "ĠV atican", + "Q aeda", + "Ġo we", + "66 66", + "ĠS orry", + "ĠGr ass", + "Ġbackground s", + "Ġexha usted", + "Ġcl an", + "Ġcomprom ised", + "ĠE lf", + "ĠIsa ac", + "ens on", + "In vest", + "IF A", + "Ġinterrupt ed", + "ãĥī ãĥ©", + "Ġtw isted", + "ĠDrag ons", + "M ode", + "ĠK remlin", + "Ġfert il", + "he res", + "ph an", + "ĠN ode", + "f ed", + "ĠOr c", + "Ġunw illing", + "C ent", + "Ġprior it", + "Ġgrad uates", + "Ġsubject ive", + "Ġiss uing", + "ĠL t", + "Ġview er", + "Ġw oke", + "Th us", + "bro ok", + "Ġdep ressed", + "Ġbr acket", + "ĠG or", + "ĠFight ing", + "Ġstri ker", + "Rep ort", + "ĠPortug al", + "Ġne o", + "w ed", + "19 9", + "Ġflee ing", + "sh adow", + "ident ified", + "US E", + "Ste am", + "Ġstret ched", + "Ġrevel ations", + "art ed", + "ĠD w", + "Ġalign ment", + "est on", + "ĠJ ared", + "S ep", + "Ġblog s", + "up date", + "g om", + "r isk", + "Ġcl ash", + "ĠH our", + "Ġrun time", + "Ġunw anted", + "Ġsc am", + "Ġr ack", + "Ġen light", + "on est", + "ĠF err", + "Ġconv ictions", + "Ġp iano", + "Ġcirc ulation", + "ĠW elcome", + "Ġback lash", + "ĠW ade", + "Ġrece ivers", + "ot ive", + "J eff", + "Ġnetwork ing", + "ĠPre p", + "ĠExpl orer", + "Ġlect ure", + "Ġupload ed", + "ĠMe at", + "B LE", + "ĠNaz is", + "ĠSy nd", + "st ud", + "ro ots", + "ri ans", + "Ġportray ed", + "Ġ ??", + "ĠBudd ha", + "s un", + "Rober t", + "ĠCom plex", + "Ġover see", + "Ġste alth", + "T itle", + "ĠJ obs", + "ĠK um", + "Ġappreci ation", + "ĠM OD", + "Ġbas ics", + "Ġcl ips", + "Ġnurs ing", + "Ġpropos ition", + "Ġreal ised", + "ĠNY C", + "Ġall ocated", + "ri um", + "ar an", + "ĠPro duction", + "ĠV ote", + "Ġsm ugg", + "Ġhun ter", + "az er", + "ĠCh anges", + "Ġfl uct", + "y on", + "Ar ray", + "Ġk its", + "W ater", + "Ġuncom mon", + "Ġrest ing", + "ell s", + "w ould", + "Ġpurs ued", + "Ġassert ion", + "omet own", + "ĠMos ul", + "ĠPl atform", + "io let", + "Ġshare holders", + "Ġtra ils", + "P ay", + "ĠEn forcement", + "ty pes", + "ĠAn onymous", + "Ġsatisf ying", + "il ogy", + "Ġ( '", + "w ave", + "c ity", + "Ste ve", + "Ġconfront ation", + "ĠE ld", + "C apt", + "ah an", + "ht m", + "ĠC trl", + "ON S", + "2 30", + "if a", + "hold ing", + "Ġdelic ate", + "Ġj aw", + "ĠGo ing", + "or um", + "S al", + "Ġd ull", + "ĠB eth", + "Ġpr isons", + "Ġe go", + "ĠEl sa", + "avor ite", + "ĠG ang", + "ĠN uclear", + "Ġsp ider", + "ats u", + "Ġsam pling", + "Ġabsor bed", + "ĠPh arm", + "iet h", + "Ġbuck et", + "ĠRec omm", + "O F", + "ĠF actory", + "AN CE", + "Ġb acter", + "H as", + "ĠObs erv", + "12 1", + "Ġprem iere", + "De velop", + "Ġcur rencies", + "C ast", + "Ġaccompany ing", + "ĠNash ville", + "Ġfat ty", + "ĠBre nd", + "Ġloc ks", + "Ġcent ered", + "ĠU T", + "augh s", + "or ie", + "ĠAff ordable", + "v ance", + "D L", + "em et", + "Ġthr one", + "ĠBlu etooth", + "Ġn aming", + "if ts", + "AD E", + "Ġcorrect ed", + "Ġprompt ly", + "ĠST R", + "Ġgen ome", + "Ġcop e", + "Ġval ley", + "Ġround ed", + "ĠK end", + "al ion", + "p ers", + "Ġtour ism", + "Ġst ark", + "v l", + "Ġblow ing", + "ĠSche dule", + "st d", + "Ġunh appy", + "Ġlit igation", + "ced es", + "Ġand roid", + "Ġinteg ral", + "ere rs", + "ud ed", + "t ax", + "Ġre iter", + "ĠMot ors", + "oci ated", + "Ġwond ers", + "ĠAp ost", + "uck ing", + "ĠRoose velt", + "f ram", + "Ġyield s", + "Ġconstit utes", + "aw k", + "Int erest", + "Ġinter im", + "Ġbreak through", + "ĠC her", + "Ġpro sec", + "ĠD j", + "ĠM T", + "Res p", + "ĠP T", + "Ġs perm", + "ed it", + "B T", + "Lin ux", + "count ry", + "le ague", + "Ġd ick", + "Ġo ct", + "Ġinsert ing", + "Ġsc ra", + "ĠBrew ing", + "Ġ19 66", + "Ġrun ners", + "Ġpl un", + "id y", + "ĠD ian", + "Ġdys function", + "Ġex clusion", + "Ġdis gr", + "Ġincorpor ate", + "Ġrecon c", + "Ġnom inated", + "ĠAr cher", + "d raw", + "achel or", + "Ġwrit ings", + "Ġshall ow", + "Ġh ast", + "ĠB MW", + "ĠR S", + "Ġth igh", + "Ġ19 63", + "Ġl amb", + "Ġfav ored", + "ag le", + "Ġcool er", + "ĠH ours", + "ĠG U", + "ĠOrig in", + "Ġglim pse", + "---------------- ----", + "L im", + "Ġche ek", + "Ġj ealous", + "- '", + "Ġhar ness", + "ĠPo ison", + "Ġdis abilities", + "ne apolis", + "Ġout look", + "Ġnot ify", + "ĠIndian apolis", + "Ġab rupt", + "ns ic", + "Ġenc rypted", + "Ġfor fe", + "reat h", + "Ġr abb", + "Ġfound ations", + "Ġcompl iment", + "ĠInter view", + "ĠS we", + "Ġad olesc", + "Ġmon itors", + "ĠSacrament o", + "Ġtime ly", + "Ġcontem pl", + "Ġposition ed", + "Ġpost ers", + "ph ies", + "iov ascular", + "v oid", + "ĠFif th", + "Ġinvestig ative", + "OU N", + "Ġinteg rate", + "ĠIN C", + "ish a", + "ibl ings", + "ĠRe quest", + "ĠRodrig uez", + "Ġsl ides", + "ĠD X", + "Ġfemin ism", + "Ġdat as", + "Ġb end", + "ir us", + "ĠNig eria", + "F ox", + "Ch ange", + "Ġair plane", + "ĠLad en", + "Ġpublic ity", + "ixt y", + "Ġcommit ments", + "Ġaggreg ate", + "Ġdisplay ing", + "ĠAr row", + "Ġ12 2", + "Ġrespect s", + "and roid", + "s ix", + "ĠSh a", + "Ġrest oration", + ") \\", + "W S", + "oy s", + "Ġillust rate", + "with out", + "12 6", + "ĠâĶ Ĥ", + "Ġpick up", + "n els", + "Ġ ....", + "f ood", + "ĠF en", + ") ?", + "Ġphenomen a", + "Ġcompan ions", + "ĠW rite", + "Ġsp ill", + "Ġbr idges", + "ĠUp dated", + "ĠF o", + "Ġinsect s", + "ASH INGTON", + "Ġsc are", + "il tr", + "ĠZh ang", + "Ġsever ity", + "Ġind ul", + "14 9", + "ĠCo ffee", + "Ġnorm s", + "Ġp ulse", + "ĠF T", + "Ġhorr ific", + "ĠDest roy", + "ĠJ SON", + "Ġo live", + "Ġdiscuss es", + "R est", + "E lect", + "ĠW inn", + "ĠSurv iv", + "ĠH ait", + "S ure", + "op ed", + "Ġro oted", + "ĠS ke", + "ĠBron ze", + "Ġl ol", + "Def ault", + "Ġcommod ity", + "red ited", + "Ġliber tarian", + "Ġforb idden", + "Ġgr an", + "à ¨", + "Ġl ag", + "en z", + "dri ve", + "Ġmathemat ics", + "Ġw ires", + "Ġcrit ically", + "Ġcarb ohyd", + "ĠChance llor", + "ĠEd die", + "Ġban ning", + "ĠF ri", + "Ġcompl ications", + "et ric", + "ĠBangl adesh", + "Ġband width", + "St op", + "ĠOrig inally", + "Ġhalf way", + "yn asty", + "sh ine", + "Ġt ales", + "rit ies", + "av ier", + "Ġspin ning", + "ĠWH O", + "Ġneighbour hood", + "b ach", + "Ġcommer ce", + "ĠS le", + "B U", + "Ġentreprene ur", + "Ġpecul iar", + "ĠCom ments", + "f re", + "3 20", + "IC S", + "Ġimag ery", + "ĠCan on", + "ĠElect ronic", + "sh ort", + "( (", + "D ig", + "Ġcomm em", + "u ced", + "Ġincl ined", + "ĠSum mon", + "Ġcl iff", + "ĠMed iterranean", + "Ġpo etry", + "Ġprosper ity", + "ĠRe ce", + "Ġp ills", + "m ember", + "Ġfin ale", + "un c", + "ĠG ig", + "ä ½", + "Ġl od", + "Ġback ward", + "- +", + "ĠFor ward", + "Ġth ri", + "s ure", + "Ġso ap", + "ĠF X", + "R ES", + "ĠSe xual", + "oul os", + "Ġfool ish", + "Ġright eous", + "Ġco ff", + "terror ism", + "ust ain", + "ot er", + "Ġab uses", + "ne xt", + "Ġab usive", + "Ġthere after", + "Ġprohib ition", + "ĠS UP", + "Ġd ip", + "Ġr ipped", + "Ġinher ited", + "Ġb ats", + "st ru", + "G T", + "Ġflaw ed", + "ph abet", + "Ġf og", + "do ors", + "Ġim aging", + "Ġdig its", + "ĠHung ary", + "Ġar rog", + "Ġteach ings", + "Ġprotocol s", + "ĠB anks", + "à ¸", + "p ound", + "ĠC urt", + ".\" )", + ". /", + "Ġex emption", + "end ix", + "ĠM ull", + "Ġimpro ves", + "ĠG amer", + "d imensional", + "I con", + "ĠMarg aret", + "St atus", + "d ates", + "Ġint ends", + "Ġdep ict", + "Ġpark ed", + "J oe", + "ĠMar ines", + "chn ology", + "! ).", + "Ġjud ged", + "Ġwe ights", + "R ay", + "Ġapart ments", + "he ster", + "Ġrein force", + "Ġoff ender", + "occ up", + "Ġs ore", + "e pt", + "ĠPH P", + "ĠB row", + "Ġauthor ization", + "ĠR isk", + "ĠDel aware", + "ĠQ U", + "Ġnot ifications", + "Ġsun light", + "Ġex clude", + "d at", + "Ġm esh", + "ĠSud an", + "Ġbelong ed", + "Ġsub way", + "Ġno on", + "ĠInter ior", + "ol ics", + "ĠL akers", + "Ġc oding", + "Dis claimer", + "Cal if", + "O ld", + "Ġdis l", + "???? ?", + "Ġconfir ms", + "Ġrecruit ment", + "Ġhom icide", + "Cons ider", + "ĠJeff rey", + "ft y", + "} ;", + "Ġobject ion", + "do ing", + "ĠLe o", + "W ant", + "Ġgl ow", + "ĠClar ke", + "ĠNorm an", + "Ġver ification", + "Ġpack et", + "ĠForm ula", + "Ġpl ag", + "es ville", + "Ġshout ing", + "Ġo v", + "ĠR EC", + "ĠB ub", + "Ġn inth", + "Ġener g", + "Ġvalid ity", + "Ġup s", + "j ack", + "Ġneighbor ing", + "ĠN ec", + "ew orks", + "ĠH ab", + "are z", + "Ġsp ine", + "Ġevent ual", + "ĠLe aders", + "ĠC arn", + "Ġprob ation", + "Ġrom ance", + "ms g", + "ĠMechan ical", + "ER Y", + "R ock", + "Ġpart isan", + "N ode", + "ass ets", + "min ent", + "Ġforeign ers", + "Ġtest ify", + "ĠUs ually", + "l ords", + "ĠG ren", + "ĠPow ell", + "BI L", + "Ġs r", + "Ġadd ict", + "Ġshell s", + "Ġs igh", + "ĠY ale", + "tern ity", + "Ġ7 50", + "E U", + "ĠR ifle", + "Ġpat ron", + "em a", + "ĠB annon", + "an ity", + "Ġtrop ical", + "ĠV II", + "c ross", + "Every thing", + "ĠIS O", + "Ġhum ble", + "ass ing", + "ĠF IG", + "Ġupd ating", + "ys on", + "Ġcal cium", + "Ġcompet ent", + "Ġste ering", + "Pro t", + "ĠS Y", + "ĠFin als", + "ĠR ug", + "15 9", + "13 7", + "ĠG olf", + "Ġ12 6", + "Ġaccommod ation", + "ĠHug hes", + "Ġaest hetic", + "art isan", + "ĠTw ilight", + "Ġpr ince", + "ĠAgric ulture", + "ĠDis co", + "Ġpreced ent", + "Ġtyp ing", + "author ized", + "O ption", + "ĠA ub", + "l ishes", + "ach t", + "m ag", + "P eter", + "ĠU FO", + "mont on", + "ĠL ith", + "Ġa rom", + "Ġsec uring", + "Ġconf ined", + "priv ate", + "Ġsw ords", + "Ġmark ers", + "Ġmetab olic", + "se lect", + "ĠCur se", + "ĠO t", + "g ressive", + "Ġinc umb", + "ĠS aga", + "Ġpr iced", + "Ġclear ance", + "Cont ent", + "Ġdr illing", + "Ġnot ices", + "Ġb ourgeois", + "Ġv est", + "Ġcook ie", + "ĠGuard ians", + "ry s", + "in yl", + "Ġ12 4", + "Ġpl ausible", + "on gh", + "ĠOd in", + "Ġconcept ion", + "ĠY uk", + "ĠBaghd ad", + "ĠFl ag", + "Aust ral", + "ĠI BM", + "Ġintern ationally", + "ĠWiki Leaks", + "I ED", + "Ġc yn", + "Ġcho oses", + "ĠP ill", + "Ġcomb ining", + "Ġrad i", + "ĠMoh ammed", + "def ense", + "atch ing", + "Sub ject", + "ic iency", + "Fr ame", + "Ġ{ \"", + "Ġche ss", + "Ġtim er", + "19 0", + "Ġt in", + "Ġord inance", + "emet ery", + "Ġacc using", + "Ġnotice able", + "Ġcent res", + "Ġl id", + "ĠM ills", + "img ur", + "Ġz oom", + "erg ic", + "Ġcomp ression", + "pr im", + "f ind", + "Ġsur g", + "Ġp and", + "ĠK ee", + "ĠCh ad", + "cell ence", + "oy le", + "Ġsocial ism", + "ĠT ravis", + "ĠM Hz", + "Ġgu ild", + "ALL Y", + "ĠSub scribe", + "ĠRel ated", + "Ġoccur rence", + "itch ing", + "Ġfict ional", + "Ġcr ush", + "ĠE A", + "c od", + "m ix", + "ĠTri ple", + "Ġretrie ve", + "Ġstimul us", + "Ġpsych iat", + "ĠDo or", + "Ġhomosexual ity", + "Ġelement ary", + "Ġcell ular", + "id ian", + "ĠL aun", + "Ġintrig uing", + "Ġfo am", + "ĠB ass", + "id i", + "its u", + "Ġass ure", + "Ġcongr at", + "Ġbusiness man", + "ĠBo ost", + "cl ose", + "Ġl ied", + "Ġsc iences", + "ĠO mega", + "ĠG raphics", + "Ġ< =", + "sp oken", + "Ġconnect ivity", + "S aturday", + "ĠAven gers", + "Ġto ggle", + "Ġank le", + "Ġnational ist", + "mod el", + "ĠP ool", + "ophob ia", + "V ar", + "ĠM ons", + "ator ies", + "Ġaggress ively", + "C lear", + "For ge", + "act ers", + "Ġhed ge", + "Ġpip es", + "Ġbl unt", + "Ġs q", + "Ġremote ly", + "W ed", + "as ers", + "Ġref riger", + "Ġt iles", + "Ġresc ued", + "Ġcompr ised", + "ins ky", + "Ġman if", + "avan augh", + "Ġprol ifer", + "Ġal igned", + "x ml", + "Ġtri v", + "Ġcoord ination", + "ĠP ER", + "ĠQu ote", + "13 4", + "b f", + "ĠS aw", + "Ġtermin ation", + "Ġ19 0", + "Ġadd itions", + "Ġtri o", + "Ġproject ions", + "Ġpositive ly", + "Ġin clusive", + "Ġmem br", + "19 90", + "old er", + "Ġpract iced", + "ink le", + "Ar ch", + "Ġstar ters", + "ari us", + "Ġinter mediate", + "ĠBen ef", + "ĠK iller", + "Ġinter ventions", + "ĠK il", + "ĠF lying", + "In v", + "Ġprem ature", + "Ġpsych iatric", + "Ġind ie", + "Ġcoll ar", + "ĠRain bow", + "af i", + "Ġdis ruption", + "ĠFO X", + "cast ing", + "Ġmis dem", + "c ro", + "Ġw ipe", + "ard on", + "Ġb ast", + "ĠTom my", + "ĠRepresent ative", + "Ġbell y", + "ĠP O", + "ĠBre itbart", + "13 2", + "Ġmess aging", + "Sh ould", + "Ref erences", + "ĠG RE", + "ist ical", + "L P", + "ĠC av", + "ĠC razy", + "Ġintu itive", + "ke eping", + "ĠM oss", + "Ġdiscont in", + "ĠMod ule", + "Ġun related", + "ĠPract ice", + "ĠTrans port", + "Ġstatist ically", + "orn s", + "Ġs ized", + "p u", + "Ġca f", + "ĠWorld s", + "ĠRod gers", + "ĠL un", + "ĠCom ic", + "l iving", + "Ġc ared", + "Ġclim bed", + ") {", + "Ġconsist ed", + "Ġmed ieval", + "fol k", + "Ġh acked", + "Ġd ire", + "ĠHerm ione", + "Ġt ended", + "ce ans", + "D aniel", + "w ent", + "Ġlegisl ators", + "Ġred es", + "g ames", + "Ġg n", + "am iliar", + "Ġ+ +", + "gg y", + "th reat", + "Ġmag net", + "Ġper ceive", + "Ġz ip", + "Ġindict ment", + "Ġcrit ique", + "g ard", + "ĠSaf e", + "ĠC ream", + "Ġad vent", + "ob a", + "Ġv owed", + "ous ands", + "Ġsk i", + "Ġabort ions", + "u art", + "Ġstun ned", + "Ġadv ancing", + "Ġlack ed", + "Ġ\\ \"", + "Ġsch izophren", + "Ġeleg ant", + "Ġconf erences", + "Ġcance led", + "ĠHud son", + "ĠHop efully", + "Ġtr ump", + "Ġfrequ encies", + "Ġmet eor", + "ĠJun ior", + "ĠFle et", + "ĠMal colm", + "ĠT ools", + "Ġ ........", + "Ġh obby", + "ĠEurope ans", + "Ġ15 00", + "ĠInt o", + "Ġs way", + "ĠApp ro", + "ĠCom pl", + "Comm unity", + "Ġt ide", + "ĠSum mit", + "ä »", + "Ġinter vals", + "ĠE ther", + "Ġhabit at", + "ĠSteven s", + "lish ing", + "ĠDom ain", + "Ġtrig gers", + "Ġch asing", + "Ġchar m", + "ĠFl ower", + "it ored", + "Ġbless ing", + "Ġtext ures", + "F ive", + "Ġliqu or", + "R P", + "F IN", + "Ġ19 62", + "C AR", + "Un known", + "Ġres il", + "ĠL ily", + "Ġabund ance", + "Ġpredict able", + "r ar", + "Ġbull shit", + "le en", + "che t", + "M or", + "M uch", + "ä ¹", + "Ġemphas ized", + "Ġcr ust", + "Ġprim itive", + "Ġenjoy able", + "ĠPict ures", + "Ġteam mate", + "pl er", + "ĠT ol", + "ĠK ane", + "Ġsummon ed", + "th y", + "ram a", + "ĠH onda", + "Ġreal izing", + "Ġquick er", + "Ġconcent rate", + "cle ar", + "Ġ2 10", + "ĠErd ogan", + "ar is", + "Ġrespond s", + "ĠB I", + "Ġelig ibility", + "Ġpus hes", + "ĠId aho", + "Ġagg rav", + "Ġru ins", + "ur ations", + "Ġb ans", + "Ġan at", + "sh are", + "Ġgr ind", + "h in", + "um en", + "Ġut ilities", + "ĠYan kees", + "Ġdat abases", + "ĠD D", + "Ġdispl aced", + "Ġdepend encies", + "Ġstim ulation", + "h un", + "h ouses", + "ĠP retty", + "ĠRaven s", + "ĠTOD AY", + "Ġassoci ates", + "Ġthe rape", + "cl ed", + "Ġde er", + "Ġrep airs", + "rent ice", + "Ġrecept ors", + "Ġrem ed", + "ĠC e", + "Ġmar riages", + "Ġball ots", + "ĠSold ier", + "Ġhilar ious", + "op l", + "13 8", + "Ġinherent ly", + "Ġignor ant", + "Ġb ounce", + "ĠE aster", + "REL ATED", + "ĠCur rency", + "E V", + "ãĥ ŀ", + "ĠLe ad", + "Ġdece ased", + "B rien", + "ĠMus k", + "J S", + "Ġmer ge", + "heart ed", + "c reat", + "m itt", + "m und", + "ĠâĢ ĭ", + "ĠB ag", + "Ġproject ion", + "Ġj ava", + "ĠStand ards", + "ĠLeon ard", + "Ġcoc onut", + "ĠPop ulation", + "Ġtra ject", + "Ġimp ly", + "Ġcur iosity", + "ĠD B", + "ĠF resh", + "ĠP or", + "Ġheav ier", + "ne ys", + "gom ery", + "Ġdes erved", + "Ġphr ases", + "ĠG C", + "Ġye ast", + "d esc", + "De ath", + "Ġreb oot", + "Ġmet adata", + "IC AL", + "Ġrep ay", + "ĠInd ependence", + "Ġsubur ban", + "ical s", + "Ġat op", + "Ġall ocation", + "gener ation", + "ĠG ram", + "Ġmoist ure", + "Ġp ine", + "ĠLiber als", + "Ġa ides", + "Ġund erest", + "ĠBer ry", + "Ġcere mon", + "3 70", + "ast rous", + "ĠPir ates", + "Ġt ense", + "ĠIndust ries", + "ĠApp eals", + "ĠN ear", + "Ġè£ı ç", + "Ġlo vers", + "ĠC AP", + "ĠC raw", + "Ġg iants", + "Ġeffic acy", + "E lement", + "ĠBeh avior", + "ĠToy ota", + "Ġint est", + "P riv", + "A I", + "Ġmaneu ver", + "Ġperfect ion", + "Ġb ang", + "p aper", + "r ill", + "Ge orge", + "b order", + "in ters", + "ĠS eth", + "Ġcl ues", + "ĠLe vi", + "ĠRe venue", + "14 7", + "Ġv apor", + "Ġfortun ate", + "Ġthreat ens", + "Ġve t", + "Ġdepend ency", + "ers ed", + "art icle", + "ĠBl izzard", + "Ġch lor", + "Ġmin us", + "ĠB ills", + "Ġcryptoc urrency", + "Ġmetabol ism", + "ter ing", + "Ġp estic", + "step s", + "ĠTre asure", + "ract ed", + "ĠConst ant", + "Ġtem p", + "13 9", + "ĠDet ective", + "ur ally", + "Ġrecover ing", + "Ġcort ex", + "Ġ14 4", + "cl osed", + "Ġprejud ice", + "aun ted", + "Ġstorm s", + "ĠN OW", + "Ġmach inery", + "Add ress", + "Ġcompe lled", + "27 0", + "Ġdesp air", + "b ane", + "Ġveget able", + "Ġbed s", + "Lear n", + "Ġcolor ful", + "Ġsp ike", + "Ġmarg ins", + "Ġsymp athy", + "Ġworks hop", + "ĠC BC", + "S at", + "Ġburn s", + "ĠG ender", + "Ġ12 9", + "ĠC able", + "Ġdeb ts", + "ĠThe resa", + "Ġreflect ing", + "Ġa irst", + "Ġr im", + "ram id", + "Ġweakness es", + "W rit", + "ogg le", + "t i", + "ĠCh arge", + "Ġwe ighed", + "Ġ( .", + "Ġl aughter", + "Ġrou ter", + "ĠDemocr acy", + "D ear", + "Ġhas ht", + "Ġd y", + "Ġhint s", + "run ning", + "Ġfin ishes", + "ar us", + "M ass", + "res ult", + "asc us", + "Ġv intage", + "Ġcon qu", + "Ġwild ly", + "ac ist", + "Ġl ingu", + "Ġprot agonist", + "st rom", + "te enth", + "ĠSol o", + "m ac", + "f illed", + "Ġre nown", + "it ives", + "Ġmot ive", + "ĠAnt ar", + "ĠM ann", + "ĠAd just", + "Ġrock ets", + "Ġtrou bling", + "e i", + "Ġorgan isms", + "ass is", + "Christ ian", + "Ġ14 5", + "ĠH ass", + "Ġsw all", + "Ġw ax", + "ĠSurv ival", + "V S", + "ĠM urd", + "v d", + "stand ard", + "Ġdrag ons", + "Ġacceler ation", + "r ational", + "f inal", + "Ġp aired", + "ĠE thereum", + "Ġinterf aces", + "Ġres ent", + "Ġartif acts", + "Å «", + "are l", + "Ġcompet itor", + "ĠNich olas", + "ĠSur face", + "c pp", + "ĠT ot", + "Ġeconom ically", + "Ġorgan ised", + "Ġen forced", + "in ho", + "Ġvar ieties", + "Ġab dom", + "ĠBa iley", + "id av", + "ĠSal v", + "p aid", + "Ġalt itude", + "ess ert", + "ĠG utenberg", + "are a", + "op oulos", + "Ġprofess ors", + "igg s", + "ĠF ate", + "he y", + "Ġ3 000", + "D ist", + "Ġtw ins", + "c ill", + "ĠM aps", + "Ġtra ps", + "Ġwe ed", + "ĠK iss", + "Ġy oga", + "Ġrecip ients", + "ĠWest minster", + "Ġpool s", + "ĠWal mart", + "18 8", + "ĠSchool s", + "att ack", + "ĠAR M", + "par agraph", + "W arning", + "j l", + "Ġself ish", + "anche z", + "ĠHe ights", + "F re", + "ĠS oph", + "Ġ --------------------------------", + "t ml", + "33 3", + "Ġraid s", + "Ġsatell ites", + "KE Y", + "Ġlast s", + "Ñ Ĥ", + "In s", + "ĠD ame", + "Ġunp redict", + "// /", + "gh ai", + "Ġart illery", + "Ġcru ise", + "Ġg el", + "ĠCabin et", + "Ġbl ows", + "ĠE sp", + "Ġprox imity", + "ot he", + "ĠSk ills", + "ĠU pper", + "ob o", + "ĠN DP", + "Ġenjoy s", + "Ġrepe ating", + "ĠConst ruction", + "ĠQuest ions", + "H illary", + "Ġu int", + "Ġprocess ors", + "ĠGib son", + "ĠMult iple", + "q a", + "ĠB om", + "ĠM iles", + "vent ional", + "Ġhur ts", + "s kin", + "ĠA IDS", + "Ġadvis ers", + "ĠR oot", + "Ġmethod ology", + "ĠD ale", + "Ġdet on", + "ĠKnow ledge", + "sequ ently", + "Ġ12 1", + "Ġconnect s", + "C y", + "ĠD anger", + "Ġcontribut ors", + "ĠB ent", + "Ġbr ass", + "ĠGun s", + "int o", + "ĠFort une", + "Ġbro ker", + "bal ance", + "Ġlength s", + "Ġv ic", + "Ġaver aging", + "Ġappropri ately", + "ĠCamer a", + "Ġsand wich", + "ĠCD C", + "Ġcoord inate", + "Ġnav ig", + "Ġgood ness", + "l aim", + "Ġbra ke", + "Ġextrem ist", + "ĠW ake", + "ĠM end", + "ĠT iny", + "ĠC OL", + "ĠR F", + "ĠD ual", + "ĠW ine", + "C ase", + "Ġref ined", + "Ġl amp", + "L ead", + "Ġb apt", + "ĠCar b", + "ĠS add", + "ĠMin neapolis", + "PD F", + "Ear ly", + "ĠH idden", + "I ts", + "ĠT IME", + "Ġp ap", + "Ġcommission ed", + "ĠF ew", + "ĠCol ts", + "ĠB ren", + "Ġbot hered", + "Ġlike wise", + "Ex per", + "ĠSch w", + "c ry", + "n n", + "ĠM itch", + "im on", + "M G", + "b m", + "UM P", + "r ays", + "Ġregist ry", + "Ġ2 70", + "ach ine", + "re lla", + "ant ing", + "00 000", + "Ġru ined", + "sp ot", + "Ġt a", + "Ġmaxim ize", + "Ġincon ven", + "D ead", + "H uman", + "En abled", + "ĠMar ie", + "Ġch ill", + "ĠParad ise", + "Ġstar ring", + "ĠLat ino", + "ĠProt ocol", + "ĠE VER", + "Ġsuppl iers", + "m essage", + "ĠBro ck", + "Ġser um", + "âĸĪâĸĪ âĸĪâĸĪ", + "Ġen comp", + "Ġamb ition", + "ues e", + "Ġar rows", + "And rew", + "Ġanten na", + "Ġ19 61", + "ĠB ark", + "Ġb ool", + "ãĤ ª", + "ĠSt orage", + "Ġrail way", + "Ġtoug her", + "ĠC ad", + "Ġwas hing", + "P y", + "' ]", + "em bed", + "ĠMem phis", + "ack le", + "Ġfam ously", + "ĠF ortunately", + "ov ies", + "Ġmind set", + "Ġsne ak", + "ĠD h", + "RA W", + "ĠSim pson", + "Ġliv est", + "Ġland mark", + "Ġc ement", + "L ow", + "Ġthr illed", + "ĠCour se", + "in el", + "Ġch uck", + "id ate", + "gl obal", + "Ġwh it", + "Ġ �", + "ad ays", + "s ki", + "ĠS V", + "Ġvir uses", + "30 6", + "ĠResp ons", + "Ġthe aters", + "ĠBr anch", + "ĠGene va", + "ĠM K", + "Ġunbel iev", + "Ġcommun ist", + "Orig inal", + "ĠRe ceived", + "ĠTrans fer", + "ĠAr g", + "In put", + "ĠStr ategy", + "Ġpal ace", + "the ning", + "D ri", + "Ġsent encing", + "umbn ail", + "Ġp ins", + "re cy", + "Ġs iblings", + "Get ting", + "ĠB U", + "ĠNorth west", + "Ġprolong ed", + "ĠSak ura", + "C omb", + "ĠB our", + "Ġinadequ ate", + "ĠK ash", + "Ġus ername", + "ĠImpro ve", + "Ġbatt ling", + "ĠM AC", + "Ġcurric ulum", + "Ġs oda", + "ĠC annon", + "Ġsens ible", + "sp ons", + "De cember", + "Ġw icked", + "ĠP engu", + "Ġdict ators", + "ĠHe arts", + "og yn", + "Ġsimilar ities", + "ĠSt ats", + "Ġh ollow", + "it ations", + "\": [", + "Ġh over", + "ĠList en", + "s ch", + "S und", + "Ġc ad", + "ĠPar ks", + "Ġl ur", + "Ġhy pe", + "ĠL em", + "N AME", + "is ure", + "Fr iday", + "Ġshoot s", + "Ġclos es", + "Ġd b", + "ĠR idge", + "ĠDiff erent", + "Ġrepl ies", + "ĠBroad way", + "op ers", + "Ġint oler", + "ĠZe us", + "akes pe", + "Ġpropri etary", + "Ġrequest ing", + "Ġcontro llers", + "ĠM IN", + "im edia", + "be cca", + "Ġexp ans", + "Ġoil s", + "B ot", + "ĠCh and", + "Ġpr inter", + "Ġto pped", + "ĠP OL", + "ĠEar lier", + "S ocial", + "av in", + "Ġdecre ases", + "ĠSe b", + "Ġspecific ations", + "ĠBl ast", + "ĠK urt", + "Ġfre el", + "B rown", + "Ġdil ig", + "ro e", + "ĠPro blem", + "ĠQu ad", + "Ġdecent ral", + "ĠV ector", + "an ut", + "Ġplug ins", + "ĠGreg ory", + "Ġfuck ed", + "el ines", + "ĠAmb assador", + "t ake", + "Ġcle ans", + "ong yang", + "An onymous", + "st ro", + "\" }", + "al ine", + "ĠO dd", + "ĠE ug", + "2 16", + "Ġbo il", + "ĠP owers", + "Ġnurs es", + "Ob viously", + "ĠTechn ical", + "Ġexceed ed", + "OR S", + "Ġextrem ists", + "Ġtr aces", + "ex pl", + "Ġcom r", + "ĠS ach", + ") /", + "Ġm asks", + "Ġsc i", + "B on", + "Ġreg ression", + "we gian", + "Ġadvis or", + "it ures", + "ĠV o", + "ex ample", + "ĠInst ruct", + "Ġs iege", + "Ġredu ctions", + "pt r", + "Ġstat utory", + "Ġrem oves", + "Ġp uck", + "red its", + "Ġbe e", + "Ġsal ad", + "Ġpromot ions", + "ĠJosh ua", + "with standing", + "ET H", + "ĠCh a", + "im us", + "Ġexpend iture", + "aun ting", + "Ġdelight ed", + "Ġ15 5", + "be h", + "Ġcar pet", + "ĠSp art", + "Ġj ungle", + "l ists", + "Ġbull ying", + "ĠNob el", + "ĠGl en", + "Ġreferen ced", + "Ġintrodu ces", + "se in", + "Ġcho pped", + "gl ass", + "ĠW rest", + "Ġneutral ity", + "Ġâ Ļ", + "Ġinvestig ator", + "Ġshel ves", + "Ġun constitutional", + "Ġreprodu ction", + "Ġmer chant", + "m ia", + "Ġmet rics", + "Ġexplos ives", + "ĠSon ia", + "Ġbod ily", + "Ġthick ness", + "Ġpredomin antly", + "ĠAb ility", + "Ġmon itored", + "IC H", + "Ġ] .", + "ĠMart inez", + "Ġvis ibility", + "Ġqu eries", + "Ġgen ocide", + "ĠWar fare", + "Qu ery", + "Ġstud ios", + "Ġemb ry", + "Ġcorrid or", + "Ġclean ed", + "com plete", + "ĠM H", + "Ġenroll ment", + "ING S", + "Ġimpact ed", + "Ġdis astrous", + "ĠY un", + "ĠCl aire", + "ĠBas ically", + "y t", + "uster ity", + "Ġindirect ly", + "w ik", + "Ġd od", + "ĠCar r", + "Ġam p", + "Ġprohib it", + "ĠIn itial", + "ĠR d", + "ij i", + "Ġeduc ate", + "c orn", + "i ott", + "ĠBeaut y", + "Ġdetect ive", + "ĠCon n", + "s ince", + "Ġst agger", + "Ġob ese", + "Ġb ree", + "olog ic", + "is se", + "walk er", + "Ġbl ades", + "Ġlaw ful", + "fun c", + "ĠBeh ind", + "Ġappet ite", + "Ġ( *", + "Ġt ennis", + "Ġoff spring", + "Ġj ets", + "Ġstruct ured", + "Ġafore mentioned", + "N ov", + "Ġsc aling", + "f ill", + "Ġst ew", + "Ġcur b", + "ĠStep han", + "ed In", + "S F", + "ob ic", + "é ŃĶ", + "ou g", + "ĠM M", + "Ġgen etically", + "ope z", + "13 6", + "Ġu mb", + "anc ers", + "Ġcoh ort", + "Ġmerch andise", + "Ġimp osing", + "ĠLegisl ature", + "ĠArch ive", + "iv ia", + "ĠN aval", + "Ġoff ences", + "Ġmir acle", + "Ġsn apped", + "Ġf oes", + "Ġextensive ly", + "ĠR af", + "Ġc ater", + "ed ience", + "K it", + "ĠB in", + "Ġrecomm ends", + "ĠC ities", + "Ġrig id", + "ĠRE AD", + "ĠNob le", + "ĠT ian", + "Ġcertific ates", + "ant is", + "o iler", + "ĠBudd hist", + "d id", + "Ġsurvey ed", + "Ġdown ward", + "Ġprint s", + "ĠMot ion", + "ron ics", + "ĠS ans", + "oss ibly", + "u ctions", + "Ġcolon ies", + "ĠDan ish", + "un it", + "Ġsp oil", + "Ġadvis ory", + "ber ries", + "Pl an", + "Ġspecific ation", + "op hers", + "ĠRes ource", + "Ġsh irts", + "prising ly", + "commun ications", + "Ġtriv ial", + "Ġmention ing", + "ise xual", + "Ġsupp lements", + "Ġsuper vision", + "B P", + "v or", + "Ġw it", + "Ġco oldown", + "Ġplaint iff", + "ĠReview s", + "ĠS ri", + "ĠM int", + "ĠSug ar", + "Ġafter ward", + "ĠPri est", + "ĠInvest ment", + "og ene", + "ĠT aking", + "Ġstretch ing", + "Ġinflamm ation", + "ĠTe hran", + "Ġl ining", + "Ġfree zing", + "ĠEnt ity", + "Ġins piring", + "spe cial", + "pr ice", + "Ġsu e", + "ĠP orter", + "oun ge", + "ET A", + "ĠD erek", + "ĠLu is", + "u o", + "ym ph", + "Ġex terior", + "ih il", + "ĠAsh ley", + "in ator", + "Ġnut rients", + "ĠTh rones", + "Ġfin ances", + "ĠIn spect", + "Ġspe cially", + "ĠRequ ired", + "ĠP TS", + "ĠViol ence", + "oint ed", + "sh ots", + "Ġex cerpt", + "co on", + "IN S", + "ĠG ri", + "Ġrecogn ised", + "We ek", + "You ng", + "Ġv om", + "is le", + "ĠCur ry", + "ĠBudd h", + "Ġnot ebook", + "Ġd urable", + "/ ?", + "ĠG ad", + "ĠP upp", + "Ġforg ive", + "p ark", + "Ġpersonal ities", + "an alysis", + "cl amation", + "Ġelev ator", + "Ġware house", + "ĠR ole", + "un n", + "Ġillust ration", + "ĠSc an", + "Ġatmosp heric", + "Im port", + "AN C", + "rict ed", + "f u", + "01 0", + "Ġar che", + "Ġreward ed", + "akespe are", + "Ġintern ally", + "ĠR BI", + "alk er", + "Ġeleph ant", + "ow itz", + "ĠP izza", + "Ġbip artisan", + "é s", + "Ġslow ed", + "ĠSt ark", + "Ġover ride", + "OU S", + "Ġ3 20", + "undred s", + "ĠDe ck", + "ĠC ensus", + "be e", + "14 6", + "ot or", + "Ġ ip", + "Ġu b", + "oc ations", + "ĠBut ton", + "r ice", + "Ġc ripp", + "ff f", + "Ġorig inated", + "Ġoverwhel med", + "app a", + "Ġfore most", + "âĢ ij", + "ĠL EG", + "re lease", + "eat ured", + "at ches", + "Ġre ps", + "Ġl ending", + "ĠRe ference", + "ĠCl ient", + "16 5", + "vent h", + "Com plete", + "ĠPat rol", + "Ġsw orn", + "c am", + "Ġshut tle", + "ĠR alph", + "Ġh ometown", + "- ,", + "on al", + "ĠB P", + "å ı", + "Ġpersu ade", + "ĠAlex and", + "Ġcomb ines", + "Ġv ivid", + "ĠL ag", + "Ġenc oding", + "Ġsal vation", + "w en", + "ĠRec overy", + "i ya", + "Un iversity", + "ĠB iden", + "Ġbud gets", + "ĠTex ans", + "f its", + "Ġhon ored", + "Ġp ython", + "T D", + "## #", + "cl one", + "Ġbl ink", + "ĠL iquid", + "Ġunemploy ed", + "Ġcl ashes", + "ĠCoun sel", + "Ġdirect ing", + "Ġpun ct", + "ĠFal cons", + "Ġsh ark", + "ĠDam ascus", + "Ġje ans", + "Ġemb ark", + "Ġse ize", + "Ġup wards", + "2 80", + "ĠE z", + "ĠAny thing", + "Ġex otic", + "l ower", + "ĠCreat or", + "ĠU m", + "Ġsubur bs", + "ber ger", + "ĠW end", + "Ġm int", + "ĠX X", + "ĠD ro", + "Ġsuff ers", + "Ġher b", + "t ree", + "Ġfrag ile", + "Ġflood ed", + "ĠAl cohol", + "ole an", + "ny der", + "ĠK O", + "F ram", + "Ġ13 6", + "Ġow ed", + "ĠMe lee", + "ĠH ash", + "Ġwh isk", + "Ġsu do", + "r r", + "Qu ick", + "app ro", + "Ġi i", + "ĠEx amples", + "he e", + "Ġpromot es", + "per ature", + "k ar", + "ĠHon or", + "Ġs odium", + "ĠL if", + "ros so", + "intend ent", + "Ġcorrespond ent", + "F ound", + "sec ret", + "Ġident ifies", + "ag ne", + "Ġl ou", + "ĠP P", + "Ġcoinc idence", + "m ove", + "Ġmilit ia", + "Ġinf iltr", + "ĠPrim ary", + "Ġpitch ing", + "ĠI b", + "ĠGO OD", + "ãĤ ¸", + "ĠW izards", + "ir al", + "ĠVen us", + "R R", + "ĠâĢ ķ", + "ĠCase y", + "Ġsad ly", + "Ġadm ire", + "Ġembarrass ed", + "c b", + "M el", + "Ġtub es", + "Ġbeaut ifully", + "ĠQueens land", + "Bel ow", + "re z", + "qu et", + "ple asant", + "Ġ «", + "C amp", + "Ġdec isive", + "19 98", + "ĠL amb", + "ut ton", + "h n", + "ĠJ agu", + "au nder", + "ĠC ord", + "Ġcl erk", + "Ġca ffe", + "Ġwip ed", + "Ġre im", + "ĠMount ains", + "Ġimprison ed", + "Ġdevelop s", + "ĠP ra", + "Ġmodel ing", + "Any one", + "ance l", + "ĠS it", + "Ġshield s", + "Ġl awn", + "Ġcard iovascular", + "Ġdemonstr ating", + "Ġpar se", + "ĠIsrael is", + "Ġeuro s", + "14 3", + "Ġgl orious", + "ins ki", + "ec d", + "Ġcondition ing", + "Ġhel pless", + "Ġmicro sc", + "ĠHar bor", + "Ġst akes", + "Ġ2 60", + "Ġun equ", + "ĠFl oyd", + "Ġd amp", + "Ġappar atus", + "ĠLaw s", + "Ġcoun ters", + "Ġindu ce", + "at able", + "ĠAh med", + "Ġsl am", + "N ovember", + "Ġpers ist", + "Ġim minent", + "á n", + "Ġsh red", + "Ġph ases", + "ĠEd monton", + "ĠArm strong", + "ĠMe et", + "ĠK itty", + "Ñ Ģ", + "c irc", + "ĠAd ult", + "Ġa rose", + "ĠX en", + "D an", + "g ow", + "Ġsuper f", + "ĠAd mir", + "Ġend ure", + "Ġkey word", + "yr us", + "Ġy arn", + "Ġpath way", + "ĠHop kins", + "mid t", + "Ġcens orship", + "d ependent", + "Ġinstruct or", + "S ources", + "Ġto e", + "Ġball oon", + "N ob", + "Ġsw ear", + "ĠCast ro", + "Ġgl oss", + "ĠK avanaugh", + "Ġremark ably", + "Ph otos", + "ĠN om", + "ĠS outheast", + "y ers", + "Ġvalid ation", + "Ġcann on", + "ĠVict ory", + "ĠPier re", + "Ġcaut ious", + "Aud io", + "Ġf etch", + "ĠG ift", + "ĠH yp", + "Ġrem edy", + "Z E", + "Ġsc ent", + "Ġbe ard", + "ĠR ut", + "- \"", + "Ġpat ents", + "H y", + "Ġun just", + "Ġpot ato", + "Ġforth coming", + "Ġche f", + "ĠR ift", + "aff e", + "ĠR OM", + "ĠL aunch", + "Ġp ads", + "ĠNe o", + "Ġon set", + "Ġsquee ze", + "s afe", + "Ġpref ix", + "ĠT M", + "ĠN early", + "ĠClin ical", + "ĠM ental", + "ot iation", + "ĠUn ic", + "ant ry", + "ĠC ir", + "Ġep it", + "à ¦", + "Ġextract ed", + "verse ly", + "ri ad", + "Ġstr ains", + "Ġto ps", + "Ġpo em", + "ĠRand y", + "ĠMap le", + "TH ER", + "up iter", + "ĠSS D", + "ļ é", + "Ġun con", + "per ing", + "Ġsle pt", + "in ers", + "Ġunder water", + "ĠEv idence", + "g one", + "20 5", + "Ġhistor ians", + "Ġsynt hesis", + "Ġf rog", + "b asketball", + "Ġvibr ant", + "Ġsub ord", + "Ġ3 65", + "ĠD ial", + "Ġcooper ate", + "HA HA", + "Ġgreet ed", + "15 8", + "Ġj azz", + "Ġinto x", + "ĠWalk ing", + "Ġsuper visor", + "ĠF usion", + "ĠMer cedes", + "s end", + "H am", + "s d", + "n l", + "Ġtour s", + "ĠF IFA", + "Ġcul p", + "g d", + "30 4", + "Ġple as", + "Ġillust rates", + "ĠColomb ia", + "Ġhighlight ing", + "ĠSum mary", + "Ġexp osing", + "ĠD ru", + "Ġir ony", + "r itional", + "ĠCar roll", + "ĠEll is", + "P ict", + "ĠR apt", + "Ġad apter", + "Ġun m", + "Ġcor pse", + "Ġceleb rities", + "D en", + "at um", + "ĠAp ocalypse", + "ĠW ag", + "lin ing", + "Ġhorm ones", + "R ub", + "ĠX i", + "ĠV aults", + "20 8", + "alky rie", + "inos aur", + "Ġfeed s", + "v ity", + "Ġdefe ating", + "W ait", + "Ġemphas ize", + "ĠSteel ers", + "yr inth", + "le ys", + "ĠWhe never", + "Current ly", + "ĠCl ock", + "Ġcollect ively", + "any on", + "ĠJ P", + "Ġment ality", + "Ġdownload s", + "Ġsurround ings", + "ĠBarn es", + "Ġflags hip", + "Ġindic ators", + "Ġgra pp", + "Jan uary", + "ĠElement al", + "ĠAthen a", + "ib al", + "Ġs ights", + "Ġcap ita", + "ĠTreat y", + "Ġvo iced", + "ĠG az", + "let te", + "Ġy a", + "Ġexp ired", + "Leg end", + "H ot", + "n ature", + "Ġunst able", + "Ġ2 80", + "à º", + "Com ment", + "AL E", + "Ġquest s", + "Ġhand ler", + "n is", + "Ġvers atile", + "Ġconce al", + "enge ance", + "ĠInter active", + "Ġobs essed", + "ĠDog s", + "Ġcr acked", + "S ound", + "s v", + "ĠD ylan", + "ro ads", + "f x", + "ĠCath olics", + "ĠH ag", + "Ġsl ammed", + "Ġgl owing", + "s ale", + "Ġtiss ues", + "ĠCh i", + "ne e", + "Ġc her", + "s ic", + "ur rection", + "Ġb acon", + "ul atory", + ") .\"", + "Ġir regular", + "FOR M", + "ass ed", + "Ġintention al", + "Ġcompens ate", + "ĠSpe aking", + "ĠS ets", + "15 3", + "Ġconvent ions", + "b ands", + "em ade", + "Ġe cc", + "ĠWin ston", + "ĠAssass in", + "ĠBelg ian", + "Ġdepend ence", + "Ġnic he", + "Ġb ark", + "ĠJ azz", + "Ġdisadvant age", + "Ġgas oline", + "Ġ16 5", + "çļ Ħ", + "ess a", + "mod ule", + "ang ular", + "O Y", + "ĠTreat ment", + "it as", + "ol ation", + "ĠArn old", + "Ġfe ud", + "ĠN est", + "Ġthe atre", + "ew ater", + "Ġmin ors", + "olic y", + "ĠH aven", + "div ision", + "Ġtr unk", + "F ar", + "ĠP ull", + "Ġcapt uring", + "Ġ18 00", + "ĠTe en", + "Ġex empl", + "Ġclin ics", + "ĠB urg", + "Ġsubst it", + "Ġpay load", + "ĠL av", + "ĠT roy", + "ĠW itness", + "Ġfrag ments", + "Ġpass words", + "Ġg ospel", + "ĠG in", + "Ġten ants", + "ol ith", + "S ix", + "Pre vious", + "ĠAg es", + "ĠDar win", + "Ġbl at", + "Ġem pathy", + "sm ith", + "b ag", + "ĠE cho", + "ĠC amb", + "ĠM add", + "ĠB oo", + "Ġred e", + "ĠBurn ing", + "Ġsmooth ly", + "ĠAd rian", + "ĠV ampire", + "ĠMon sters", + "ste am", + "Sty le", + "M a", + "re a", + "ĠD war", + "aly st", + "urs or", + "Ġelim ination", + "Ġcrypt o", + "ch t", + "ĠE ternal", + "âĢ¦ ]", + "ĠS orce", + "I ll", + "N ER", + "Ġu h", + "Con clusion", + "w age", + "Ġresp ir", + "Ġrem inis", + "het ical", + "Ġg y", + "Ġutil ized", + "ic idal", + "Ġ19 00", + "Ġhun ters", + "ĠSw an", + "ĠRe act", + "Ġvis itor", + "ĠThanks giving", + "30 8", + "Post s", + "Ġh ips", + "19 97", + "om ers", + "Ġkn ocking", + "ĠVeh icle", + "Ġt il", + "Ġ13 8", + "Ġm i", + "ĠInvest igation", + "ĠKen ya", + "Ġcas ino", + "Ġmot ives", + "Ġreg ain", + "re x", + "Ġweek ends", + "Ġstab bed", + "bor o", + "Ġexplo ited", + "ĠHA VE", + "ĠTe levision", + "c ock", + "Ġprepar ations", + "Ġende av", + "ĠRem ote", + "ĠM aker", + "ĠPro du", + "ĠEv an", + "Ġinform ational", + "ĠLouis ville", + "15 4", + "ĠDream s", + "Ġpl ots", + "ĠRun ner", + "Ġhur ting", + "Ġacad emy", + "ĠMont gomery", + "n m", + "ĠL anc", + "ĠAl z", + "2 10", + "el ong", + "Ġretail er", + "Ġar ising", + "Ġrebell ion", + "Ġbl onde", + "play ed", + "Ġinstrument al", + "C ross", + "Ġret ention", + "Ġtherape utic", + "Ġse as", + "Ġinfant ry", + "ĠCl int", + "Ġprompt ing", + "Ġbit ch", + "Ġst ems", + "ĠK ra", + "Ġthe sis", + "ĠB og", + "ru ed", + "Ġk ings", + "Ġcl ay", + "ific ent", + "ĠY ES", + "ĠTh ing", + "ĠCub s", + "vey ard", + "els h", + "in arily", + "ĠE y", + "ĠRoll ing", + "Ġev olving", + "Ind ia", + "Ġrecogn izes", + "Ġgrad uation", + "is ers", + "Ġfert ility", + "ĠMil an", + "Comm and", + "Ġbox ing", + "Ġ19 43", + "Ġgl uten", + "ĠEm ir", + "Ġid ol", + "Ġcon ceived", + "ĠCre ation", + "Mer it", + "udd y", + "uss ions", + "ĠLie utenant", + "iet al", + "Ġunch anged", + "ĠSc ale", + "ĠCrime a", + "ball s", + "ator ial", + "Ġdepth s", + "Ġempir ical", + "Ġtrans m", + "Ġuns afe", + "miss ible", + "com fort", + "15 6", + "Ġmechan ic", + "00 2", + "l ins", + "Ġsm oked", + "P os", + "Ġslow ing", + "Ġl av", + "Tex as", + "Ġche ating", + "ĠMet ropolitan", + "eth yl", + "Ġdiscover ing", + "as se", + "Ġpen cil", + "ĠPy ongyang", + "Ġclos et", + "ĠShe et", + "ĠEnt ry", + "ou stic", + "Ġmy st", + "er ate", + "ari at", + "Ġminer als", + "Ġmusic ian", + "ĠP ul", + "ĠM az", + "24 9", + "Ġper missions", + "Ġ iv", + "en ary", + "ick ers", + "ĠB ing", + "he a", + "en able", + "Ġgri ev", + "Ġassert ed", + "ĠColon el", + "Ġaff idav", + "w o", + "Ġse ated", + "ĠR ide", + "Ġpaint ings", + "ĠP ix", + "Ġ13 7", + "ish i", + "umb ai", + "g otten", + "ĠEar l", + "Ġin ning", + "Ġc ensus", + "Ġtrave lled", + "ĠCons ult", + "18 5", + "b ind", + "Ġsimpl icity", + "Ġoverlook ed", + "ĠHelp ful", + "Ġmon key", + "Ġoverwhelming ly", + "Bl ood", + "ĠFl int", + "ĠJ ama", + "ĠPres ent", + "ĠR age", + "ĠT A", + "pt ive", + "Ġturn out", + "w ald", + "ĠD olphins", + "ĠV PN", + "Ġon ion", + "Ġcraft ing", + "m ma", + "ĠMerc ury", + "Ġarr ange", + "Ġalert s", + "ĠO T", + "zb ollah", + "Ġg ases", + "ĠRichards on", + "s al", + "l ar", + "Ġfro st", + "Ġlower ing", + "Ġacc laim", + "Ġstart ups", + "ĠG ain", + "ess ment", + "Ġguard ian", + "äº º", + "ĠP ie", + "ĠL inks", + "Ġmer its", + "Ġaw ake", + "Ġparent al", + "Ġexceed s", + "Ġid le", + "ĠPil ot", + "Ġe Bay", + "ĠAc cept", + "ipe g", + "C am", + "ĠK ot", + "Ġtrad ers", + "olit ics", + "unk er", + "ĠP ale", + "os i", + "an mar", + "Ġ19 47", + "ĠF ell", + "est ial", + "it ating", + "G F", + "ĠS r", + "if ted", + "Ġconnect or", + "ĠB one", + "ill es", + "2 60", + "h ma", + "Ġoverl ap", + "ĠGit Hub", + "Ġclean er", + "ĠBapt ist", + "ĠW AS", + "Ġlung s", + "Ñ ģ", + "ĠB UT", + "Ġc ite", + "Ġpit ched", + "reat ment", + "Ġtro phies", + "ĠN u", + "38 6", + "ĠPr ide", + "Ġattend ees", + "[ ]", + "17 9", + "Ġspat ial", + "Ġpri zes", + "ĠRel igion", + "Ġshow case", + "ĠC ategory", + "vid ia", + "T arget", + "Pro perty", + "? ,", + "Ġf usion", + "p ie", + "ĠU CLA", + "Ġsound track", + "Ġprin cess", + "ĠC aval", + "sh ould", + "Ġlim bs", + "Back ground", + "Ġlone ly", + "Ġc ores", + "ĠT ail", + "she et", + "Ġ13 2", + "R a", + "ãĤ «", + "ĠB olt", + "Ġbook ed", + "Ġadmin ister", + "Ġequ als", + "w y", + "Ġobserv ing", + "ĠBar on", + "ĠAd obe", + "Ġv irgin", + "ĠSocial ist", + "M ove", + "gh azi", + "ĠLind a", + "2 12", + "Ġbre wing", + "Ġmerch ants", + "bur se", + "Ġdiv or", + "Ġmet als", + "ĠN er", + "Ġsum s", + "ĠEn emy", + "Ġen vision", + "Ġgrant ing", + "ĠH oney", + "ĠSk yrim", + "Ġsoc io", + "gr aded", + "Ġselect ive", + "W ASHINGTON", + "Ġ19 48", + "ĠSir ius", + "ĠG ross", + "act ivity", + "ĠI van", + "Ġfur ious", + "BS D", + "ĠPre vious", + "Ġrespons ive", + "Ġchar itable", + "Ġle aning", + "ĠP ew", + "Ġviol ates", + "\\\\\\\\ \\\\\\\\", + "ĠCom ing", + "w ire", + "Ġpo et", + "Ġres olutions", + "comm and", + "ĠPortug uese", + "Ġnick name", + "Ġde af", + "Feb ruary", + "Ġrecogn ise", + "Ġentire ty", + "Ġseason al", + "pl aced", + "ĠTe legraph", + "Ġmicro phone", + "our ing", + "Ġgr ains", + "Ġgovern ed", + "Ġpost p", + "ĠW aters", + "in ement", + "Ġund ocumented", + "ĠCom cast", + "Ġf ox", + "Ġassault s", + "re on", + "man y", + "ĠJen kins", + "ĠAny way", + "Ġassess ments", + "Ġdown s", + "ĠM ouse", + "Ġsuper b", + "k t", + "ĠD ow", + "Ġtax ation", + "4 01", + "Ġsm iles", + "Ġundert aken", + "Ġex h", + "Ġenthusi astic", + "Ġtw ent", + "Ġgovernment al", + "Ġautonom y", + "ĠTechn ologies", + "ĠCh ain", + "Ġpreval ent", + "f b", + "Ġnic otine", + "og ram", + "j ob", + "Ġawa iting", + "ĠMen u", + "Ġdep uties", + "k ov", + "ish ops", + "But ton", + "ĠShan ghai", + "Ġdies el", + "ĠD uck", + "R yan", + "ĠPC s", + "N F", + "j ury", + "ent e", + "Ġinacc urate", + "edd y", + "Wh atever", + "Ġshow c", + "ĠN ad", + "od us", + "et r", + "Ġplaint iffs", + "ĠW OR", + "ĠAss ange", + "Ġpriv at", + "Ġpremium s", + "Ġt am", + "UR L", + "Ġel ites", + "ĠR anger", + "otten ham", + "ĠH off", + "ĠAt hens", + "Ġdefin ite", + "Ġs ighed", + "Ġeven ly", + "2 11", + "ĠAm ber", + "ak ia", + "Ġmail ing", + "Ġcr ashing", + "ĠConfeder ate", + "ru gged", + "W al", + "ĠDep ths", + "Ġjuven ile", + "Ġreact or", + "Introdu ction", + "ĠDel uxe", + "19 95", + "ĠS anchez", + "ĠM ead", + "iv able", + ": -", + "ĠPlan ning", + "ĠT rap", + "qu in", + "ĠProt ect", + "ve red", + "In formation", + "Ġkid ney", + "inn amon", + "l as", + "Ġpolic ing", + "Ġtoler ate", + "ĠQ i", + "Ġbi ased", + "F ort", + "ĠK i", + "s ave", + "Ġprivile ged", + "Ġbe asts", + "ĠGl as", + "ĠC inem", + "Ġcome back", + "Sund ay", + "Ġext inction", + "h ops", + "Ġtrans mit", + "Ġdoub les", + "ĠFl at", + "16 7", + "Ġdis puted", + "Ġinjust ice", + "f oo", + "V ict", + "role um", + "ĠJul ie", + "Con text", + "ĠR arity", + "iss ue", + "Comp onent", + "Ġcounsel ing", + "an ne", + "d ark", + "Ġobject ions", + "u ilt", + "Ġg ast", + "Ġpl ac", + "Ġun used", + "ãĥ ĩ", + "ĠT rial", + "ĠJ as", + "hed ral", + "ob b", + "Ġtempor al", + "ĠPR O", + "ĠN W", + "ĠAnn iversary", + "L arge", + "Ġther m", + "Ġd avid", + "Ġsystem ic", + "ĠSh ir", + "m ut", + "ĠNe pt", + "add ress", + "Ġscan ning", + "Ġunderstand able", + "Ġcan vas", + "C at", + "ĠZ oo", + "Ġang els", + "L O", + "ĠStat ement", + "ĠS ig", + "ov able", + "ĠA way", + "sh aring", + "ocr ats", + "st ated", + "Ġweigh ing", + "N or", + "w ild", + "B ey", + "Ġaston ishing", + "ĠReyn olds", + "Ġop ener", + "Ġtrain er", + "Ġsurg ical", + "p n", + "Ġadjust ing", + "whe el", + "Ġf rown", + "erv ative", + "Ġsusp end", + "With in", + "te in", + "Ġobst acle", + "Ġliber ties", + "ym es", + "Ġur anium", + "ans om", + "an ol", + "ub a", + "ĠL oss", + "Ġa rous", + "ĠHend erson", + "W ow", + "s pl", + "c ur", + "Ġ Ń", + "Ġtheir s", + "Dam age", + "Ġdownload ing", + "Ġdisc ern", + "ĠSt o", + "ĠFl a", + "Ġh ath", + "ĠA j", + "Ġun pleasant", + "Europe an", + "exp ensive", + "Ġscreens hot", + "ĠU V", + "Ġall ied", + "ĠPers ian", + "Ġmonop oly", + "Ġat om", + "ĠReds kins", + "\"> <", + "Ġcan cell", + "Ġcinem a", + "13 1", + "f air", + "ĠAlf red", + "Ġd uck", + "arg s", + "22 3", + "ĠIS I", + "Ġsign aling", + "in ar", + "Ġlaugh s", + "Ġfor wards", + "Ġreck less", + "Ġlisten ers", + "at ivity", + "Ġvast ly", + "n ant", + "L ess", + "ĠHun ting", + "ĠScient ific", + "IT ED", + "Ġkn ight", + "ĠH TC", + "us a", + "t mp", + "Ġr ude", + "ĠLegend ary", + "Ġar ises", + "B ad", + "ĠCl aim", + "pe g", + "Ġreal ities", + "Th ink", + "Ġ °", + "Ġro de", + "Ġstri ve", + "Ġan ecd", + "Ġshort s", + "Ġhypot hes", + "Ġcoord inated", + "ĠGand hi", + "ĠF PS", + "R ED", + "Ġsuscept ible", + "Ġshr ink", + "ĠCh art", + "Hel p", + "Ġ ion", + "de ep", + "rib es", + "ĠK ai", + "ĠCustom er", + "Sum mary", + "Ġc ough", + "w ife", + "Ġl end", + "Ġposition ing", + "Ġlot tery", + "ĠC anyon", + "Ġf ade", + "Ġbron ze", + "ĠKenn y", + "Ġbo asts", + "ĠEnh anced", + "rec ord", + "Ġemer gence", + "Ġa kin", + "ĠB ert", + "it ous", + "âĸ ij", + "Ġst ip", + "Ġexch anged", + "om ore", + "als h", + "Ġreserv oir", + "Ġstand point", + "W M", + "Ġiniti ate", + "Ġdec ay", + "Ġbrew ery", + "Ġter ribly", + "Ġmort al", + "lev ard", + "Ġrev is", + "N I", + "el o", + "Ġconf ess", + "ĠMS NBC", + "Ġsub missions", + "Cont roller", + "Ġ20 2", + "ĠR uth", + "} );", + "ĠAz ure", + "Ġ .\"", + "20 6", + "ĠMarket ing", + "Ġl aund", + "ien cies", + "Ġrenown ed", + "ĠT rou", + "ĠN GO", + "ble ms", + "Ġterr ified", + "Ġwar ns", + "Ġper t", + "Ġuns ure", + "4 80", + "ale z", + "ult z", + "ĠOut side", + "Ġst yl", + "ĠUnder ground", + "Ġp anc", + "Ġd ictionary", + "Ġf oe", + "rim inal", + "ĠNor wegian", + "Ġj ailed", + "Ġm aternal", + "é e", + "ĠLu cy", + "c op", + "Ch o", + "Ġuns igned", + "ĠZe lda", + "ĠIns ider", + "ĠContin ued", + "Ġ13 3", + "ĠNar uto", + "ĠMajor ity", + "16 9", + "ĠW o", + "ãĤ ĵ", + "Ġpast or", + "Ġinform al", + "Ð ½", + "an throp", + "jo in", + "ãģ Ĺ", + "it ational", + "N P", + "ĠWrit ing", + "f n", + "ĠB ever", + "19 5", + "Ġy elling", + "Ġdr astically", + "Ġe ject", + "Ġne ut", + "Ġth rive", + "ĠFre qu", + "ou x", + "Ġpossess es", + "ĠSen ators", + "ĠD ES", + "ĠSh akespeare", + "ĠFran co", + "ĠL B", + "uch i", + "Ġinc arn", + "Ġfound ers", + "F unction", + "Ġbright ness", + "ĠB T", + "Ġwh ale", + "ĠThe ater", + "m ass", + "ĠD oll", + "S omething", + "Ġecho ed", + "ĠHe x", + "c rit", + "af ia", + "Ġgodd ess", + "Ġele ven", + "ĠPre view", + "ĠAur ora", + "Ġ4 01", + "uls ive", + "ĠLog an", + "in burgh", + "ĠCent ers", + "ĠON LY", + "ĠA id", + "Ġparad ox", + "Ġh urd", + "ĠL C", + "D ue", + "c ourt", + "Ġoff ended", + "Ġeval uating", + "ĠMatthew s", + "Ġto mb", + "Ġpay roll", + "Ġextra ction", + "ĠH ands", + "if i", + "Ġsuper natural", + "ĠCOM M", + "] =", + "dog s", + "Ġ5 12", + "ĠMe eting", + "Rich ard", + "ĠMax imum", + "Ġide als", + "Th ings", + "m and", + "ĠReg ardless", + "Ġhum ili", + "b uffer", + "L ittle", + "ĠD ani", + "ĠN ak", + "Ġliber ation", + "ĠA be", + "ĠO L", + "Ġstuff ed", + "ac a", + "ind a", + "raph ic", + "Ġmos qu", + "Ġcampaign ing", + "Ġoccup y", + "S qu", + "r ina", + "ĠW el", + "ĠV S", + "Ġphys ic", + "Ġp uls", + "r int", + "oad ed", + "ET F", + "ĠArch ives", + "Ġven ues", + "h ner", + "ĠTur bo", + "Ġl ust", + "Ġappeal ed", + "que z", + "il ib", + "ĠTim othy", + "Ġo mn", + "d ro", + "Ġobs ession", + "ĠSav age", + "19 96", + "Gl obal", + "J es", + "2 14", + "Ġsl iding", + "Ġdisapp ro", + "ĠMag ical", + "Ġvolunt arily", + "g b", + "ane y", + "Ġprop het", + "ĠRe in", + "ĠJul ia", + "ĠW orth", + "aur us", + "Ġb ounds", + "ie u", + ")) )", + "Ġcro re", + "ĠCitiz en", + "S ky", + "Ġcolumn ist", + "Ġseek ers", + "ond o", + "IS A", + "ĠL ength", + "Ġnost alg", + "Ġnew com", + "Ġdet rim", + "ent ric", + "3 75", + "ĠG E", + "Ġaut op", + "Ġacadem ics", + "App Data", + "ĠS hen", + "Ġid iot", + "ĠTrans it", + "Ġteasp oon", + "W il", + "K O", + "ĠCom edy", + "> ,", + "Ġpop ulated", + "W D", + "Ġp igs", + "ĠO culus", + "Ġsymp athetic", + "Ġmar athon", + "19 8", + "Ġseiz ure", + "s ided", + "Ġd op", + "irt ual", + "L and", + "ĠFl oor", + "osa urs", + "... ]", + "Ġl os", + "Ġsubsid iary", + "E Y", + "ĠPart s", + "ĠSt ef", + "ĠJud iciary", + "Ġ13 4", + "Ġmir rors", + "Ġk et", + "t imes", + "Ġneuro log", + "Ġc av", + "ĠGu est", + "Ġtum or", + "sc ill", + "ĠLl oyd", + "E st", + "Ġcle arer", + "Ġstere otypes", + "Ġd ur", + "not hing", + "Red dit", + "Ġnegoti ated", + "---------------- --------", + "23 5", + "Ġfl own", + "ĠSe oul", + "ĠRes ident", + "ĠS CH", + "Ġdisappear ance", + "ĠV ince", + "g rown", + "Ġgrab s", + "r il", + "ĠInf inite", + "ĠTw enty", + "Ġpedest rian", + "Ġjer sey", + "ĠF ur", + "ĠInf inity", + "ĠEll iott", + "Ġment or", + "Ġmor ally", + "Ġob ey", + "sec ure", + "iff e", + "Ġantib iotics", + "ang led", + "ĠFre eman", + "ĠIntrodu ction", + "J un", + "Ġm arsh", + "ic ans", + "ĠEV ENTS", + "och ond", + "W all", + "icult y", + "Ġmisdem eanor", + "Ġl y", + "Th omas", + "ĠRes olution", + "Ġanim ations", + "ĠD ry", + "Ġinter course", + "ĠNew castle", + "ĠH og", + "ĠEqu ipment", + "17 7", + "Ġterrit orial", + "Ġarch ives", + "20 3", + "Fil ter", + "ĠMun ich", + "Ġcommand ed", + "ĠW and", + "Ġpit ches", + "ĠCro at", + "Ġrat ios", + "ĠM its", + "Ġaccum ulated", + "ĠSpecific ally", + "Ġgentle man", + "acer b", + "Ġp enn", + "Ġa ka", + "ĠF uk", + "Ġinterven e", + "ĠRef uge", + "ĠAlz heimer", + "Ġsuccess ion", + "oh an", + "d oes", + "L ord", + "Ġsepar at", + "Ġcorrespond ence", + "Ġsh iny", + "P rior", + "Ġs ulf", + "Ġmiser able", + "Ġded ication", + "( ).", + "Ġspecial ists", + "Ġdefect s", + "ĠC ult", + "ĠX ia", + "Ġje opard", + "ĠO re", + "Ab ility", + "Ġle ar", + "Ġamb itions", + "ĠB MI", + "ĠArab s", + "Ġ19 42", + "Ġpres ervation", + "ific ate", + "Ġash amed", + "l oss", + "ĠRest aur", + "Ġrese mble", + "Ġen rich", + "ĠK N", + "ĠCl an", + "fl oat", + "Ġplay able", + "IT T", + "Ġharm ony", + "arr ison", + "ĠWe instein", + "w ere", + "Ġpoison ing", + "ĠCom put", + "ĠWord Press", + "m ajor", + "ĠVal ve", + "F an", + "ĠTh row", + "ĠRom ans", + "ĠDep ression", + "ad os", + "Ġtort ured", + "Ġbal ancing", + "bott om", + "Ġacqu iring", + "ĠMon te", + "ard i", + "Ġa ura", + "Ġ# #", + "ĠStand ing", + "ĠAtl as", + "C F", + "Ġintr ins", + "ĠBen ghazi", + "Ġcamp ing", + "Ġt apped", + "bl ade", + "st rous", + "ĠR abb", + "ĠW ritten", + "t ip", + "ĠNe igh", + "ster dam", + "ĠAll ow", + "ĠHe aling", + "ĠR hod", + "n um", + "Ġcaffe ine", + "ĠPer cent", + "Ġbo o", + "Ġapp les", + "30 5", + "Ġwel coming", + "Ġappl aud", + "Ġa usterity", + " ±", + "ĠRe ality", + "ef e", + "å ®", + "Ġsu cks", + "Ġtab s", + "ĠPay Pal", + "Ġback pack", + "Ġgif ted", + "abul ary", + "ĠSc out", + "ir teen", + "Ġch in", + "Ġo mitted", + "Ġnegative ly", + "Ġaccess ing", + "ĠE arn", + "Ġambul ance", + "Ġhead phones", + "Ġ20 5", + "ĠRef resh", + "p resident", + "ĠKit chen", + "ĠEnt ered", + "ĠS nyder", + "00 5", + "om ical", + "Ġborrow ed", + "ĠN em", + "Ġav iation", + "Ġst all", + "rim ination", + "Ġuniform s", + "it ime", + "ĠSim mons", + "ener gy", + "ab lished", + "y y", + "qual ified", + "Ġrall ies", + "ĠSt uart", + "fl ight", + "Ġgang s", + "r ag", + "Ġv ault", + "lu x", + "ĠCom par", + "Ġdesign ation", + "20 9", + "ĠJ os", + "d ollar", + "z ero", + "Ġwell s", + "30 3", + "Ġconstitu ents", + "Ġhe ck", + "Ġc ows", + "Ġcommand ers", + "Ġdifferent ial", + "ĠC atherine", + "29 9", + "Ġval ve", + "Ġbr ace", + "Ġperspect ives", + "c ert", + "f act", + "icular ly", + "ĠMc N", + "pl anes", + "Ġint ric", + "Ġpe as", + "ov an", + "Ġtoss ed", + "ret ch", + "ĠL opez", + "Ġunf amiliar", + "de ath", + "ĠA part", + "ĠCh ang", + "Ġrelie ved", + "rop he", + "Ġair ports", + "Ġfre ak", + "ut il", + "M ill", + "ĠCh in", + "ĠOw en", + "m ale", + "ĠBro ken", + "ĠWind s", + "ro b", + "r ising", + "Ġfire fighters", + "Ġauthor itarian", + "Ġ14 8", + "Bit coin", + "ex ternal", + "Ġbrow sers", + "iche ver", + "or ian", + "Ġun b", + "Ġpo ke", + "ĠZ ot", + "M id", + "ĠPop ular", + "Ġco vert", + "Ġcont ributes", + "Ġ6 50", + "Ġcont ention", + "G ate", + "Ġcons oles", + "Ġchrom os", + "ĠI X", + "Ġvis ually", + "ĠE isen", + "Ġjewel ry", + "Ġdeleg ation", + "Ġacceler ate", + "ĠR iley", + "Ġsl ope", + "Ġind oor", + "it ially", + "Ġhuge ly", + "Ġtun nels", + "Ġfin ed", + "Ġdirect ive", + "Ġfore head", + "ustom ed", + "Ġsk ate", + "Mus ic", + "g as", + "Ġrecogn izing", + "am bo", + "Ġover weight", + "ĠGr ade", + "Ù Ĭ", + "Ġsound ing", + "Ġlock ing", + "ĠR EM", + "St ore", + "Ġexc av", + "ĠLike wise", + "ĠL ights", + "Ġel bow", + "ĠSupp ly", + "w ic", + "Ġhands ome", + "19 94", + "C oll", + "Ġadequ ately", + "ĠAssoci ate", + "Ġstri ps", + "Ġcrack down", + "Ġmar vel", + "ĠK un", + "Ġpass ages", + "@@ @@", + "ĠT all", + "Ġthought ful", + "names e", + "Ġprost itution", + "bus iness", + "Ġball istic", + "person al", + "c ig", + "iz ational", + "R ound", + "ĠÂłĠÂł ĠÂłĠÂł", + "ĠCole man", + "Ġadm itting", + "ĠPl ug", + "Ġbit coins", + "ĠSu z", + "Ġfair ness", + "Ġsupp lier", + "Ġcatast rophic", + "ĠHel en", + "o qu", + "M arc", + "ĠArt icles", + "g ie", + "Ġend angered", + "Ġdest iny", + "ĠVol t", + "ol ia", + "ax is", + "Ġche at", + "Ġun ified", + "IC O", + "qu ote", + "30 2", + "ĠS ed", + "Ġsupp ression", + "Ġanaly zing", + "Ġsqu at", + "Ġfig uring", + "Ġcoordin ates", + "Ġch unks", + "Ġ19 46", + "Ġsub p", + "Ġw iki", + "ĠFor bes", + "ĠJ upiter", + "ĠE rik", + "im er", + "ĠCom mercial", + "\\ )", + "Ġlegitim acy", + "Ġd ental", + "ĠMe an", + "Ġdefic its", + "5 50", + "Orig inally", + "ĠHor ror", + "Ġcontam ination", + "ll ah", + "Ġconf isc", + "ĠCl are", + "T B", + "ĠF ailed", + "an ed", + "Ġrul er", + "ĠCont roller", + "Ġfemin ists", + "F ix", + "g ay", + "20 7", + "Ġr abbit", + "Th ird", + "ownt own", + "Ġgl ue", + "Ġvol atile", + "Ġsh ining", + "Ġf oll", + "Ġimp aired", + "Ġsup ers", + "æ Ī", + "Ġcl utch", + "ļé ĨĴ", + "Ġpro let", + "Ġ( !", + "Ġy elled", + "ĠK iev", + "ĠEr n", + "ĠSh ock", + "K B", + "Ġsit uated", + "qu ery", + "ĠN as", + "Ġan nex", + "char acter", + "ĠHol iday", + "Ġautom ation", + "ĠJ ill", + "ĠRem astered", + "Ġl inem", + "Ġwild erness", + "ĠHor izon", + "ĠGu inea", + "A Z", + "Ġmain land", + "Ġsec recy", + "LE ASE", + "Ġp unk", + "ĠProv ince", + "( ),", + "Spe ed", + "Ġhand ing", + "ĠSeb ast", + "S ir", + "r ase", + "Ġj ournals", + "Ġcon gest", + "ĠT ut", + "ir rel", + "Ġschizophren ia", + "Ġmis ogyn", + "health y", + "I ron", + "Ġreact ed", + "- $", + "25 2", + "Ġpl ural", + "Ġpl um", + "Ġbarg ain", + "Ġground ed", + "f inder", + "Ġdis se", + "ĠL az", + "O OD", + "Ġat roc", + "F actory", + "Ġmin ions", + "Ġo ri", + "ĠB rave", + "ĠP RE", + "ĠMy anmar", + "ĠH od", + "Ġexped ition", + "Ġexpl ode", + "ĠCo ord", + "Ġext r", + "ĠB rief", + "ĠAD HD", + "Ġhard core", + "feed ing", + "Ġd ile", + "ĠF ruit", + "Ġvacc ination", + "ĠM ao", + "osp here", + "Ġcont ests", + "- |", + "Ġf ren", + "isp here", + "R om", + "ĠSh arp", + "ĠTre nd", + "Ġdis connect", + "âĢ¢ âĢ¢", + "Ġper secution", + "Ear th", + "Ġhealth ier", + "38 4", + "Ġc ob", + "ĠTr inity", + "OW S", + "AN N", + "Ġspecial ty", + "Ġg ru", + "Ġcooper ative", + "wh y", + "Start ing", + "ĠIss ues", + "st re", + "ens or", + "Ġ18 5", + "Ad v", + "! ?", + "ĠRe vel", + "em ia", + "ĠH ulk", + "Ġcelebr ations", + "ĠS ou", + "ra ud", + "ĠKle in", + "Ġun real", + "con text", + "Ġpartners hips", + "Ġadop ting", + "t ical", + "Ġspl ash", + "ĠHe zbollah", + "c ategory", + "cycl op", + "xt on", + "ĠD ot", + "urd y", + "t z", + "Ġenvelop e", + "ĠN L", + "â ķ", + "Ġwhere in", + "Spe c", + "18 4", + "Ġte lev", + "al iation", + "Ġmyth s", + "å °", + "Ġrig orous", + "Ġcommun icating", + "Ġobser ver", + "Ġre he", + "ĠW ash", + "Ġapolog ized", + "ĠT in", + "Ġexpend itures", + "work ers", + "d ocument", + "Ġhes itate", + "ĠLen in", + "Ġunpredict able", + "Ġrenew al", + "cl er", + "ok ia", + "ĠCON T", + "Ġpost season", + "Tok ens", + "Ġex acerb", + "Ġbet ting", + "Ġ14 7", + "Ġelev ation", + "W ood", + "ĠSol omon", + "19 4", + "00 4", + "out put", + "Ġredu nd", + "ĠM umbai", + "Ġp H", + "Ġreprodu ce", + "ĠD uration", + "MA X", + "Ġb og", + "C BS", + "ĠBal ance", + "ĠS gt", + "ĠRec ent", + "Ġc d", + "Ġpo pped", + "Ġincomp et", + "pro p", + "ay an", + "g uy", + "Pac ific", + "Ġty r", + "Ġ{ {", + "ĠMy stic", + "ĠD ana", + "Ġmast urb", + "Ġge ometry", + "à ¢", + "ĠCor rect", + "Ġtraject ory", + "Ġdistract ed", + "Ġf oo", + "ĠW elsh", + "L uc", + "m ith", + "Ġrug by", + "Ġrespir atory", + "Ġtri angle", + "Ġ2 15", + "Ġunder graduate", + "ĠSuper ior", + "ch anging", + "_ -", + "Ġright ly", + "Ġrefere e", + "Ġluc rative", + "Ġun authorized", + "Ġresemb les", + "ĠGN U", + "ĠDer by", + "Ġpath ways", + "ĠL ed", + "Ġend urance", + "Ġst int", + "Ġcollect or", + "F ast", + "Ġd ots", + "Ġnational s", + "ĠSec urities", + "Ġwh ip", + "Par am", + "Ġlearn s", + "M agic", + "Ġdetail ing", + "m oon", + "Ġbroadcast ing", + "Ġb aked", + "26 5", + "hol m", + "ĠS ah", + "ĠHus sein", + "ĠCourt esy", + "17 4", + "Ġ14 6", + "Ġge ographic", + "pe ace", + "Ġjud ging", + "ĠS tern", + "B ur", + "Ġstory line", + "G un", + "ĠSt ick", + "24 5", + "30 7", + "ãĤ´ ãĥ³", + "ĠAdminist rator", + "Ġbur nt", + "Ġp ave", + "ch oes", + "Ex ec", + "Ġcamp uses", + "Res ult", + "Ġmut ations", + "ĠCh arter", + "Ġcapt ures", + "Ġcomp ares", + "Ġbad ge", + "S cient", + "Ġer ad", + "ier y", + "o i", + "ett es", + "ĠE state", + "Ġst rap", + "Ġproud ly", + "Ġf ried", + "Ġwithd rawn", + "ĠV oy", + "ph ony", + "It ems", + "ĠP ierce", + "b ard", + "Ġann otation", + "ant on", + "ill on", + "Im pro", + "... )", + "Ġhapp ier", + "---- --", + "ad just", + "Ġstaff ers", + "Ġactiv ism", + "Ġper f", + "Ġal right", + "N eed", + "Ġcomm ence", + "Ġopio id", + "ĠAm anda", + "E s", + "ĠP ars", + "ĠK aw", + "W orks", + "24 8", + "Ġind o", + "t c", + "end ant", + "ĠM oto", + "Ġlegal ization", + "OT E", + "Ġtask ed", + "Ġt sp", + "ĠACT IONS", + "16 6", + "Ġrefres hing", + "ĠN R", + "ĠPere z", + "Ġinfring ement", + "S Y", + "List en", + "in ning", + "k u", + "Ġrot ate", + "pro gram", + "ar ah", + "Des ign", + "Ġ( £", + "Ġst oring", + "Ġwar rants", + "Ġjud gement", + "ĠB rist", + "us ually", + "ph oto", + "ĠR an", + "ĠP ine", + "Ġoutrage ous", + "ĠValent ine", + "lu ence", + "ĠEvery body", + "Al tern", + "Ġrele vance", + "Ġtermin ated", + "Ġd essert", + "Ġfulf illed", + "Ġprosecut ed", + "ĠW ords", + "Ġm igrant", + "Ġcultiv ation", + "ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ", + "idel ity", + "ĠV ern", + "ĠLog in", + "Ġmetaph or", + "ĠT ip", + "Ġrecru its", + "ĠP ig", + "rib ing", + "Ġenthusi asts", + "ex per", + "Ġfright ening", + "ĠH air", + "ans on", + "str ate", + "Ġh i", + "He ight", + "Ġown ing", + "n one", + "Ġdis like", + "Ġkn ives", + "pher d", + "Ġloud ly", + "ĠAP Is", + "Dis play", + "ĠL ac", + "ĠUS S", + "ab l", + "ver ages", + "J ew", + "Ġ17 2", + "ĠHist orical", + "at oon", + "ĠPhys ics", + "in tern", + "Ġwarm th", + "Ġto pp", + "D M", + "Ġgun man", + "Ġem peror", + "od i", + "ãĥ £", + "in atory", + "ĠR ib", + "Ġ13 1", + "ĠSat urn", + "ĠSh ining", + "Ġw aking", + "Qu otes", + "Ġcomed ian", + "en berg", + " ½", + "Ġbelie vers", + "Ġpaper work", + "c ustom", + "Ġle v", + "Ġl ament", + "Ġpour ing", + "22 2", + "p olitical", + "ĠSupp lement", + "m aid", + "Ġcruel ty", + "Ġt read", + "ys ics", + "A w", + "rit es", + "Ġmod ifier", + "ĠP osition", + "Ad am", + "l b", + "ub s", + "Ġimper fect", + "Ġcl usters", + "ĠEngine er", + "ĠC herry", + "Ġinaug uration", + "ĠS au", + "Ġembod iment", + "ĠUn cle", + "Ġover r", + "Ġexplos ions", + "c ule", + "ĠPrinc eton", + "ĠAndre a", + "Ġincorrect ly", + "Ġearn est", + "Ġpil gr", + "ĠS print", + "Ġslee ve", + "Ġhe ars", + "ĠAm azing", + "Ġbrow sing", + "ag in", + "Ġhom eland", + "Ġha w", + "Ġd iving", + "ist ered", + "17 8", + "Ġbarg aining", + "ĠArc ade", + "Ġdeleg ate", + "ters on", + "................................ ................................", + "ĠJackson ville", + "27 5", + "Ġst agn", + "Ġad am", + "ĠSher man", + "C B", + "Ġsub urb", + "ĠFood s", + "Ġconver ting", + "ĠAr ist", + "Ġch ambers", + "l ove", + "Ġam ino", + "ĠG an", + "Ġmad ness", + "m c", + "ĠUS E", + "def ined", + "Ġul tr", + "ind ust", + "Ġw olves", + "l ance", + "Add itionally", + "Ġcr acks", + "as ia", + "ĠRe ason", + "ĠP ump", + "Ġaccident al", + "ĠL aser", + "ĠR id", + "Ġinitial ized", + "ell i", + "Ġun named", + "Ġn oun", + "ĠPass ed", + "Ġhost age", + "ĠEth iop", + "sh irts", + "Ġun rel", + "ĠEmb assy", + "Ġ19 41", + "Ġat oms", + "Ġpur ported", + "16 4", + "ĠF i", + "Ġgall ons", + "ĠMon ica", + "Ġp g", + "en ment", + "Ġsort ed", + "ĠG ospel", + "Ġhe ights", + "Ġtr aced", + "Ġunder going", + "She ll", + "Ġs acks", + "Ġproport ions", + "Ġhall uc", + "F ont", + "ac et", + "Ġwar mer", + "ĠIN TER", + "Ġgrab bing", + "Pl ug", + "Ġreal ization", + "ĠBur ke", + "Ġen chant", + "AT ER", + "ĠSe ed", + "Ġabund ant", + "F M", + "Ġc ivic", + "V s", + "is i", + "Ġv ow", + "Ġre per", + "ĠPartners hip", + "Ġpenet ration", + "Ġax e", + "Ġsh attered", + "ĠZ ombies", + "Ġv inyl", + "ĠAl ert", + "e on", + "Ġoblig ed", + "ĠIll ust", + "ĠPl aza", + "ĠFront ier", + "Ġdavid jl", + "ĠSer ial", + "ĠH av", + "ĠNut rition", + "B i", + "Ġâĸ Ī", + "ĠJ ays", + "lin ux", + "Ġhur ry", + "Ġv oy", + "Ġhop eless", + "ĠSte alth", + "Ġ ãģ", + "ess ors", + "tt le", + "b org", + "ĠSaf ari", + "f ell", + "Ġw ary", + "d ue", + "ĠAb ove", + "H a", + "E LL", + "Ġnot or", + "ĠW on", + "T oo", + "Ġoccup ations", + "Ġposs essions", + "Ġinv iting", + "Ġpred ators", + "Ġacceler ated", + "Ġ15 7", + "uter te", + "ĠC ube", + "e ast", + "acc ount", + "G ive", + "Ġtrans plant", + "red ients", + "id able", + "Ġscreens hots", + "ĠG und", + "ĠF S", + "Ġtravel ers", + "Ġsens ory", + "ĠF iat", + "ĠRock ets", + "İ ĭ", + "_ {", + "F riend", + "Ġchar ming", + "AL S", + "Ġenjoy ment", + "m ph", + "Ġ5 000", + "ĠRE G", + "Ù Ĩ", + "b ia", + "Ġcomp ilation", + "ro st", + "ĠV P", + "ĠSch ne", + "201 9", + "Ġcop ying", + "M ORE", + "ĠFl ore", + "f alls", + "2 15", + "t otal", + "Ġdis ciples", + "d ouble", + "Ġexceed ing", + "Ġsm ashed", + "Ġconcept ual", + "ĠRom ania", + "ĠB rent", + "ĠI CE", + "ĠT ou", + "Ġg rap", + "Ġn ails", + "18 9", + "ãĥ ĺ", + "Ġproc ure", + "e ur", + "Ġconfir ming", + "ĠC ec", + "aw i", + "ĠEd en", + "Ġn g", + "Ġengine ered", + "at ics", + "Ġhook ed", + "Ġdisgust ing", + "ĠMur der", + "ãĤ ¿", + "L ibrary", + "Ġ16 8", + "Al most", + "hem atic", + "Men u", + "ĠNot re", + "ĠJ ur", + "Ġkidn apped", + "Ġhack er", + "ĠJ ade", + "Ġcreep y", + "Ġdraw ings", + "ĠSpons or", + "Ġcycl ists", + "ĠGob lin", + "Ġoptim ized", + "Ġst aged", + "ĠMc D", + "bet ween", + "A ge", + "en o", + "S ex", + "ĠW ide", + "n ings", + "av is", + "Ġincap able", + "ĠK ob", + "Ġreward ing", + "ĠL one", + "oles cent", + "Ġcontract ed", + "Ġstick y", + "J ose", + "B all", + "f est", + "ĠIn put", + "ĠRec ently", + "Ġto mat", + "squ are", + "App lication", + "Ġnit rogen", + "Ġdupl icate", + "ĠRec on", + "ĠD ear", + "L ondon", + "Ġint ra", + "Ġd ock", + "Ġout reach", + "ĠM illion", + "Ġmamm als", + "am pton", + "V AL", + "Ġsn aps", + "Ġd os", + "ĠWh ole", + "ĠRead y", + "T ry", + "ĠWinn ipeg", + "ear ance", + "Ġinc urred", + "ren ched", + "ĠNS W", + "il ot", + "rain e", + "Ġc ube", + "g ot", + "Ġrun way", + "etermin ed", + "ĠHaw ks", + "Ġsurviv or", + "ĠW ish", + "ĠD in", + "ĠDE F", + "ĠV ault", + "18 7", + "Ġmush rooms", + "Ġcris p", + "be y", + "ĠDisco very", + "Ġdevelopment al", + "Ġparad igm", + "Ġcha otic", + "ĠT su", + "Ġ3 33", + "b ons", + "Ġbacter ial", + "Ġcomm its", + "Ġcos mic", + "Ġme ga", + "oc ative", + "ĠP aint", + "ophob ic", + "Ġv ain", + "Ġcar ved", + "ĠTh ief", + "ĠG ul", + "ows hip", + "Ġc ites", + "ĠEd inburgh", + "Ġdimin ished", + "Ġacknowled ges", + "ĠK ills", + "Ġmic row", + "ĠHer a", + "Ġsen iors", + "Ġwhere by", + "H op", + "at ron", + "Ġun available", + "ĠN ate", + "Ġ4 80", + "Ġsl ated", + "ĠRe becca", + "ĠB attery", + "Ġgram mar", + "Ġhead set", + "Ġcurs or", + "Ġex cluding", + "any e", + "aunder ing", + "eb in", + "Ġfeas ible", + "ĠPub lishing", + "ĠLab s", + "ĠCl iff", + "ĠFerr ari", + "Ġp ac", + "vis ible", + "mark ed", + "pe ll", + "Ġpol ite", + "Ġstagger ing", + "ĠGal actic", + "Ġsuper st", + "Ġpar an", + "ĠOffic ers", + "ãĢ ģ", + "Ġspecific s", + "ul us", + "23 9", + "ĠP aste", + "AM P", + "ĠPan ama", + "ĠDe lete", + "angu ard", + "rest rial", + "Ġhero ic", + "ĠD y", + "ا ÙĦ", + "Ġincumb ent", + "Ġcr unch", + "t ro", + "Ġsc oop", + "Ġblog ger", + "Ġsell ers", + "ure n", + "Ġmedic ines", + "ĠC aps", + "ĠAnim ation", + "ox y", + "Ġout ward", + "Ġinqu iries", + "22 9", + "Ġpsych ologist", + "ĠS ask", + "ev il", + "Ġcontam inated", + "ãĤ ¨", + "he rence", + "Ġbrand ed", + "ĠAbd ul", + "z h", + "Ġparagraph s", + "Ġmin s", + "Ġcor related", + "er b", + "Ġimp art", + "Ġmil estone", + "ĠSol utions", + "ot le", + "Ġunder cover", + "Ġmar ched", + "ĠCharg ers", + "f ax", + "ĠSec rets", + "Ġr uth", + "we ather", + "Ġfemin ine", + "Ġsh am", + "Ġprest igious", + "igg ins", + "Ġs ung", + "hist ory", + "ett le", + "gg ie", + "Ġout dated", + "ol and", + "Ġper ceptions", + "ĠS ession", + "ĠDod gers", + "u j", + "ĠE ND", + "D oc", + "Ġdefic iency", + "Gr and", + "ĠJ oker", + "Ġretro spect", + "Ġdiagn ostic", + "Ġharm less", + "Ġro gue", + "ĠA val", + "E qu", + "Ġtrans c", + "ĠRoberts on", + "ĠDep ending", + "ĠBurn s", + "iv o", + "Ġhost ility", + "F eatures", + "ĵ ĺ", + "Ġdis comfort", + "ĠL CD", + "spec ified", + "ĠEx pect", + "3 40", + "Ġimper ative", + "ĠReg ular", + "Ch inese", + "Ġstate wide", + "Ġsy mm", + "Ġlo ops", + "Ġaut umn", + "N ick", + "Ġsh aping", + "Ġqu ot", + "Ġc herry", + "ĠCross ref", + "è¦ ļéĨĴ", + "Stand ard", + "he ed", + "ĠD ell", + "ĠViet namese", + "Ġo st", + "ĠV alkyrie", + "O A", + "Ass ad", + "Ġreb ound", + "ĠTra ffic", + "pl aces", + "æ ĺ", + "ĠB uc", + "17 2", + "Ġshel ters", + "Ġins isting", + "ĠCertain ly", + "ĠKenn eth", + "ĠT CP", + "Ġpen al", + "ĠRe play", + "he ard", + "Ġdial ect", + "iz a", + "ĠF Y", + "it cher", + "ĠD L", + "Ġspir al", + "Ġquarterback s", + "Ġh ull", + "Ġgo ogle", + "Ġto dd", + "ĠSter ling", + "ĠPl ate", + "Ġsp ying", + "mb ol", + "ĠReal m", + "ĠPro ced", + "ĠCr ash", + "Ġtermin ate", + "Ġprotest ing", + "C enter", + "gu ided", + "Ġun cover", + "Ġboy cott", + "Ġreal izes", + "s ound", + "Ġpret ending", + "ĠV as", + "19 80", + "Ġfram ed", + "Ġ13 9", + "Ġdesc ended", + "Ġrehab ilitation", + "Ġborrow ing", + "ĠB uch", + "Ġbl ur", + "R on", + "ĠFro zen", + "en za", + "Ch ief", + "ĠP oor", + "Ġtransl ates", + "M IN", + "Ġ2 12", + "J ECT", + "Ġerupt ed", + "Ġsuccess es", + "S EC", + "Ġpl ague", + "Ġg ems", + "d oms", + "Ġstret ches", + "ĠSp y", + "Ġstory telling", + "C redit", + "ĠP ush", + "Ġtra ction", + "Ġin effective", + "ĠL una", + "Ġt apes", + "Ġanaly tics", + "erc ise", + "Ġprogram mes", + "ĠCar bon", + "Ġbeh old", + "he avy", + "ĠConserv ation", + "ĠF IR", + "Ġs ack", + "ter min", + "ric ks", + "Ġhous ed", + "Ġunus ually", + "I ce", + "Ġexecut ing", + "ĠMor oc", + "ed ay", + "Ġed itions", + "Ġsm arter", + "ĠB A", + "Ġout law", + "Ġvan ished", + "ib a", + "AL SE", + "ĠSil va", + "23 8", + "C ould", + "Ġphilos opher", + "Ġevac uated", + "Sec ret", + "14 2", + "Ġvis as", + "ãĤ ¬", + "ĠM alt", + "ĠClear ly", + "ĠN iger", + "ĠC airo", + "ĠF ist", + "3 80", + "ĠX ML", + "aut o", + "it ant", + "Ġrein forced", + "Rec ord", + "ĠSurviv or", + "G Hz", + "Ġscrew s", + "parent s", + "Ġo ceans", + "ma res", + "Ġbra kes", + "vas ive", + "Ġhell o", + "ĠS IM", + "rim p", + "Ġo re", + "ĠArm our", + "24 7", + "Ġterr ific", + "Ġt ones", + "14 1", + "ĠMin utes", + "Ep isode", + "Ġcur ves", + "Ġinflamm atory", + "Ġbat ting", + "ĠBeaut iful", + "L ay", + "Ġunp op", + "v able", + "Ġr iots", + "ĠTact ics", + "b augh", + "ĠC ock", + "Ġorg asm", + "ĠS as", + "Ġconstruct or", + "et z", + "G ov", + "Ġant agon", + "Ġthe at", + "Ġde eds", + "ha o", + "c uts", + "ĠMc Cl", + "Ġu m", + "ĠScient ists", + "Ġgrass roots", + "ys sey", + "\"] =>", + "Ġsurf aced", + "Ġsh ades", + "Ġneighb ours", + "Ġad vertis", + "oy a", + "Ġmer ged", + "Up on", + "Ġg ad", + "Ġanticip ate", + "Any way", + "Ġsl ogan", + "Ġdis respect", + "I ran", + "ĠT B", + "act ed", + "Ġsubp oen", + "medi ately", + "OO OO", + "Ġwa iver", + "Ġvulner abilities", + "ott esville", + "ĠHuff ington", + "J osh", + "ĠD H", + "M onday", + "ĠEll en", + "K now", + "x on", + "it ems", + "22 8", + "Ġf ills", + "ĠN ike", + "Ġcum ulative", + "and als", + "I r", + "Ġ ì", + "Ġfr iction", + "ig ator", + "Ġsc ans", + "ĠVi enna", + "ld om", + "Ġperform ers", + "P rim", + "Ġb idding", + "M ur", + "Ġlean ed", + "ĠPri x", + "al ks", + "Ġ[ âĢ¦]", + "ĠTw itch", + "ĠDevelop er", + "ĠG ir", + "Ġcall back", + "Ab stract", + "Ġacc ustomed", + "Ġfreed oms", + "ĠP G", + "ur acy", + "Ġl ump", + "is man", + ",, ,,", + "19 92", + "ĠR ED", + "Ġwor m", + "M atch", + "ĠPl atinum", + "I J", + "ĠOwn er", + "Tri via", + "com pl", + "Ġnew born", + "Ġfant as", + "O wn", + "Ġ19 59", + "Ġsymp ath", + "Ġub iqu", + "Ġoutput s", + "Ġal lev", + "Ġpr ag", + "K evin", + "Ġfav ors", + "Ġbur ial", + "Ġn urt", + "so lete", + "c ache", + "Ġ15 6", + "Ġunl ocks", + "te chn", + "M aking", + "Ġcon quer", + "ad ic", + "æ ĸ", + "Ġel f", + "Ġelect orate", + "ĠKurd s", + "ĠSt ack", + "ĠSam urai", + "Ġâ ĺħ", + "Ġ{ }", + "ĠS aid", + "ĠFall out", + "Ġkind ness", + "ĠCustom s", + "ĠBou levard", + "Ġhelicop ters", + "ot ics", + "ĠVe get", + "com ment", + "Ġcritic ised", + "Ġpol ished", + "ĠRem ix", + "ĠC ultural", + "Ġrec ons", + "Ġdo i", + "at em", + "Sc reen", + "Ġbar red", + "Com ments", + "ĠGener ally", + "Ġsl ap", + "7 20", + "V ari", + "p ine", + "Ġem pt", + "Ġh ats", + "ĠPlay ing", + "l ab", + "a verage", + "form s", + "ĠC otton", + "Ġcan s", + "ĠD ON", + "ĠSom alia", + "C rypt", + "ĠIncre ases", + "E ver", + "mod ern", + "Ġsur geon", + "3 000", + "Ġrandom ized", + "================================ ================================", + "B ern", + "im pl", + "ĠC OR", + "Ġpro claim", + "th ouse", + "Ġto es", + "Ġam ple", + "Ġpres erving", + "Ġdis bel", + "gr and", + "B esides", + "Ġsil k", + "ĠPat tern", + "h m", + "Ġenter prises", + "Ġaffidav it", + "ĠAdvis ory", + "Ġadvert ised", + "ĠRel igious", + "se ctions", + "psy ch", + "ĠField s", + "aw ays", + "Ġhasht ag", + "ĠNight mare", + "Ġv ampire", + "Ġfore nsic", + "rosso ver", + "n ar", + "Ġn avy", + "Ġvac ant", + "ĠD uel", + "Ġhall way", + "Ġface book", + "ident ally", + "ĠN RA", + "Ġm att", + "Ġhur ricane", + "ĠKir by", + "ĠP uzzle", + "Ġsk irt", + "ou st", + "du llah", + "Ġanal ogy", + "in ion", + "Ġtomat oes", + "ĠN V", + "ĠPe ak", + "ĠMe yer", + "Ġappoint ments", + "Ġm asc", + "Ġal ley", + "re hend", + "Ġchar ities", + "Ġund o", + "Ġdest inations", + "ĠTest ing", + "\"> \"", + "c ats", + "* .", + "Ġgest ures", + "gener al", + "Le ague", + "Ġpack ets", + "ĠInspect or", + "ĠBer g", + "Ġfraud ulent", + "Ġcritic ize", + "F un", + "Ġbl aming", + "nd ra", + "Ġsl ash", + "ĠE ston", + "Ġpropos ing", + "Ġwh ales", + "Ġtherap ist", + "Ġsub set", + "Ġle isure", + "EL D", + "ĠC VE", + "ĠAct ivity", + "Ġcul min", + "sh op", + "ĠD AY", + "is cher", + "ĠAdmir al", + "ĠAtt acks", + "Ġ19 58", + "Ġmem oir", + "Ġfold ed", + "Ġsex ist", + "Ġ15 3", + "ĠL I", + "Ġread ings", + "Ġembarrass ment", + "ĠEmploy ment", + "w art", + "ch in", + "Ġcontin uation", + "l ia", + "Rec ently", + "Ġd uel", + "Ġevac uation", + "ĠKash mir", + "Ġdis position", + "ĠR ig", + "Ġbol ts", + "Ġins urers", + "4 67", + "M ex", + "Ġret aliation", + "Ġmis ery", + "Ġunre asonable", + "r aining", + "I mm", + "ĠP U", + "em er", + "Ġgen ital", + "ãĤ ³", + "ĠC andy", + "Ġon ions", + "ĠP att", + "lin er", + "Ġconced ed", + "Ġf a", + "Ġfor c", + "ĠH ernandez", + "ĠGe off", + "deb ian", + "ĠTe ams", + "Ġc ries", + "Ġhome owners", + "23 7", + "A BC", + "Ġst itch", + "Ġstat istic", + "Ġhead ers", + "ĠBi ology", + "Ġmot ors", + "ĠG EN", + "ĠL ip", + "Ġh ates", + "Ġhe el", + "S elf", + "i pl", + "ED IT", + "ort ing", + "Ġann ot", + "ĠSpe ech", + "old emort", + "ĠJ avascript", + "ĠLe Bron", + "Ġfoot print", + "Ġf n", + "Ġseiz ures", + "n as", + "h ide", + "Ġ19 54", + "ĠBe e", + "ĠDecl aration", + "ĠKat ie", + "Ġreserv ations", + "N R", + "f emale", + "Ġsatur ated", + "Ġb iblical", + "Ġtroll s", + "Dev ice", + "ph otos", + "Ġdr ums", + "ãĥīãĥ© ãĤ´ãĥ³", + "N ight", + "f ighter", + "ĠH ak", + "ri ber", + "Ġc ush", + "Ġdiscipl inary", + "ba um", + "ĠG H", + "ĠSch midt", + "ilib rium", + "Ġs ixty", + "ĠKush ner", + "ro ts", + "Ġp und", + "ĠR ac", + "Ġspr ings", + "Ġcon ve", + "Bus iness", + "F all", + "Ġqual ifications", + "Ġvers es", + "Ġnarc iss", + "ĠK oh", + "ĠW ow", + "ĠCharl ottesville", + "ed o", + "Ġinterrog ation", + "ĠW ool", + "36 5", + "B rian", + "Ġâľ ĵ", + "Ġalleg es", + "ond s", + "id ation", + "ĠJack ie", + "y u", + "Ġl akes", + "Ġworth while", + "Ġcryst als", + "ĠJud a", + "Ġcomp rehend", + "Ġfl ush", + "Ġabsor ption", + "ĠO C", + "Ġfright ened", + "ĠCh ocolate", + "Mart in", + "Ġbu ys", + "Ġbu cks", + "Ġapp ell", + "ĠChampions hips", + "Ġlist ener", + "ĠDef ensive", + "Ġc z", + "ud s", + "ĠM ate", + "Ġre play", + "Ġdecor ated", + "Ġs unk", + "ĠV IP", + "ĠAn k", + "Ġ19 5", + "aa aa", + "Nob ody", + "ĠMil k", + "ĠG ur", + "ĠM k", + "ĠS ara", + "Ġse ating", + "ĠW id", + "Tr ack", + "Ġemploy s", + "Ġgig antic", + "AP P", + "ãĤ §", + "in ventory", + "Ġtow el", + "at che", + "l asting", + "ĠT L", + "Ġlat ency", + "Ġkn e", + "B er", + "me aning", + "Ġup held", + "Ġplay ground", + "Ġm ant", + "S ide", + "Ġstere o", + "Ġnorth west", + "Ġexception ally", + "Ġr ays", + "Ġrec urring", + "D rive", + "Ġup right", + "Ġab duct", + "ĠMar athon", + "Ġgood bye", + "Ġal phabet", + "h p", + "Ġcourt room", + "ring ton", + "ot hing", + "T ag", + "Ġdiplom ats", + "Ġbar bar", + "ĠAqu a", + "18 3", + "33 33", + "Ġmat urity", + "Ġinst ability", + "ĠAp ache", + "Ġ= ==", + "Ġfast ing", + "ĠGr id", + "Mod Loader", + "Ġ15 2", + "A bs", + "ĠOper ating", + "ett i", + "Ġacqu aint", + "Don nell", + "ĠK em", + "ĠFor ge", + "Ġarm ored", + "M il", + "Ġphilos ophers", + "in vest", + "Pl ayers", + "â Ī", + "Ġmy riad", + "Ġcomr ades", + "R ot", + "Ġremember ing", + "Ġcorrespond s", + "Ġprogram mers", + "ĠLyn n", + "Ġo lig", + "Ġco herent", + "yn chron", + "ĠChem ical", + "Ġj ugg", + "p air", + "post s", + "E ye", + "ĠIn ner", + "Ġsem ester", + "ott est", + "ĠEmir ates", + "ric anes", + "or ously", + "m its", + "ĠW is", + "Ġd odge", + "l ocation", + "Ġf aded", + "Am azon", + "ĠPro ceed", + "ĠIN FO", + "j ournal", + "ĠTru ck", + "T en", + "Ġ2 17", + "Ġstat utes", + "m obile", + "ĠT ypes", + "Rec omm", + "b uster", + "pe x", + "Ġleg ends", + "Ġhead ache", + "f aced", + "ĠWi Fi", + "if ty", + "ĠH ER", + "Ġcirc uits", + "ER ROR", + "22 6", + "ol in", + "Ġcyl inder", + "osp ace", + "ik ers", + "P rem", + "Qu ant", + "Ġconflic ting", + "Ġslight est", + "Ġfor ged", + "ion age", + "Step hen", + "ĠK ub", + "ĠOpp ortun", + "ĠHe al", + "Ġbl o", + "Ġrul ers", + "Ġh uh", + "Ġsubmar ine", + "f y", + "ass er", + "Ġallow ance", + "ĠKas ich", + "ĠT as", + "ĠAustral ians", + "Forge ModLoader", + "ĠâĨ ij", + "ĠMat rix", + "am ins", + "Ġ12 00", + "ĠAc qu", + "23 6", + "D ocument", + "ĠBre aking", + "19 3", + "ĠSub st", + "ĠRoll er", + "ĠPro perties", + "ĠN I", + "t ier", + "Ġcr ushing", + "Ġadvoc ating", + "Further more", + "keep ers", + "Ġsex ism", + "x d", + "Ġcall er", + "ĠS ense", + "chie ve", + "ĠT F", + "Ġfuel ed", + "Ġreminis cent", + "Ġobs ess", + "ur st", + "Ġup hold", + "ĠF ans", + "het ics", + "Ġâ Ĺ", + "ĠB ath", + "Ġbe verage", + "Ġo scill", + "25 4", + "Ġpol es", + "Ġgrad ual", + "Ġex ting", + "ĠS uff", + "ĠS uddenly", + "Ġlik ing", + "Ġ19 49", + "un ciation", + "am ination", + "ĠO mar", + "ĠL V", + "ĠCon sequently", + "Ġsynt hes", + "ĠG IF", + "Ġp ains", + "Ġinteract ing", + "u ously", + "inc re", + "Ġrum or", + "ĠScient ology", + "19 7", + "ĠZ ig", + "Ġspe lling", + "ĠA SS", + "Ġexting u", + "ms on", + "Ġg h", + "Ġremark ed", + "ĠStrateg ic", + "ĠM ON", + "å ¥", + "g ae", + "ĠWH AT", + "E ric", + "ĠCamp us", + "Ġmeth ane", + "Ġimag in", + "J UST", + "ĠAl m", + "X T", + "i q", + "ĠR SS", + "Ġwrong doing", + "att a", + "Ġbig ot", + "Ġdemonstr ators", + "ĠCal vin", + "ĠV illa", + "Ġmembr ane", + "ĠAw esome", + "Ġbenef ic", + "26 8", + "Ġmagn ificent", + "ĠL ots", + "G reg", + "ĠBor is", + "Ġdetain ees", + "ĠH erman", + "Ġwhis pered", + "Ġa we", + "Prof essor", + "fund ing", + "Ġphys iological", + "ĠDest ruction", + "Ġlim b", + "Ġmanip ulated", + "Ġbub bles", + "Ġpse ud", + "Ġhyd ra", + "ĠBrist ol", + "Ġst ellar", + "ĠExp ansion", + "ĠK ell", + "ĠInterest ingly", + "Ġm ans", + "Ġdrag ging", + "Ġec ological", + "ĠF it", + "Ġg ent", + "Ġbenef ited", + "ĠHait i", + "Ġpoly g", + "ãĥ İ", + "Ġ20 30", + "Ġpro w", + "Ġrecon struction", + "Ġwas t", + "Ġpsych ic", + "ĠGree ks", + "Hand ler", + "16 2", + "ĠP ulse", + "Ġsol icit", + "Ġsy s", + "Ġinflu x", + "ĠG entle", + "per cent", + "Ġprolifer ation", + "Ġtax able", + "Ġdisreg ard", + "Ġesc aping", + "Ġg inger", + "Ġwith stand", + "Ġdevast ated", + "ĠD ew", + "ser ies", + "Ġinject ed", + "ela ide", + "Ġturn over", + "he at", + "Ļ Ĥ", + "H appy", + "ĠSil ent", + "ãĤ Ń", + "iv ism", + "Ġir rational", + "AM A", + "Ġre ef", + "r ub", + "Ġ16 2", + "Ġbank ers", + "ĠEth ics", + "v v", + "Ġcritic isms", + "K n", + "18 6", + "M ovie", + "ĠT ories", + "Ġno od", + "Ġdist ortion", + "F alse", + "od ore", + "Ġt asty", + "Res earch", + "ĠU ID", + "- )", + "Ġdivor ced", + "ĠM U", + "ĠHay es", + "ĠIs n", + "ian i", + "ĠH Q", + "Ġ\" #", + "ign ant", + "Ġtra umatic", + "ĠL ing", + "H un", + "Ġsab ot", + "on line", + "r andom", + "Ġren amed", + "ra red", + "K A", + "d ead", + "é t", + "ĠAss istance", + "Ġse af", + "++++ ++++", + "Ġse ldom", + "ĠWeb b", + "Ġbo olean", + "u let", + "Ġref rain", + "ĠDI Y", + "ru le", + "Ġshut ting", + "Ġutil izing", + "load ing", + "ĠPar am", + "co al", + "oot er", + "Ġattract ing", + "ĠD ol", + "Ġher s", + "ag netic", + "ĠRe ach", + "im o", + "Ġdisc arded", + "ĠP ip", + "01 5", + "ü r", + "Ġm ug", + "Im agine", + "C OL", + "Ġcurs ed", + "ĠSh ows", + "ĠCurt is", + "ĠSach s", + "spe aking", + "ĠV ista", + "ĠFram ework", + "ong o", + "Ġsub reddit", + "Ġcr us", + "ĠO val", + "R ow", + "g rowing", + "Ġinstall ment", + "Ġgl ac", + "ĠAdv ance", + "EC K", + "ĠLGBT Q", + "LE Y", + "Ġac et", + "Ġsuccess ive", + "ĠNic ole", + "Ġ19 57", + "Qu ote", + "Ġcircumst ance", + "ack ets", + "Ġ14 2", + "ort ium", + "Ġguess ed", + "ĠFr ame", + "Ġperpet rators", + "ĠAv iation", + "ĠBen ch", + "Ġhand c", + "A p", + "Ġ19 56", + "25 9", + "r and", + "Net Message", + "d in", + "urt les", + "h ig", + "ĠV III", + "ff iti", + "ĠSw ords", + "b ial", + "Ġkidn apping", + "dev ice", + "Ġb arn", + "ĠEl i", + "auc as", + "S end", + "Con structed", + "Ġ ½", + "Ġneed les", + "Ġad vertisements", + "Ġv ou", + "Ġexhib ited", + "ĠFort ress", + "As k", + "B erry", + "TY PE", + "Ġcan cers", + "ump ing", + "ĠTerrit ory", + "Ġpr ud", + "Ġn as", + "Ġathe ist", + "Ġbal ances", + "ãģ Ł", + "ĠSh awn", + "& &", + "Ġland sc", + "ĠR GB", + "Ġpet ty", + "Ġex cellence", + "Ġtransl ations", + "Ġpar cel", + "ĠChe v", + "E ast", + "ĠOut put", + "im i", + "Ġamb ient", + "ĠTh reat", + "Ġvill ains", + "Ġ5 50", + "IC A", + "Ġtall er", + "Ġle aking", + "c up", + "Ġpol ish", + "Ġinfect ious", + "ĠK C", + "Ġ@ @", + "back ground", + "Ġbureaucr acy", + "ĠS ai", + "un less", + "it ious", + "ĠSky pe", + "At l", + "ID ENT", + "00 8", + "Ġhyp ocr", + "Ġpit chers", + "Ġguess ing", + "ĠF INAL", + "Bet ween", + "Ġvill agers", + "Ġ25 2", + "f ashion", + "ĠTun is", + "Be h", + "ĠEx c", + "ĠM ID", + "28 8", + "ĠHas kell", + "19 6", + "ĠN OR", + "Ġspec s", + "Ġinv ari", + "Ġgl ut", + "ĠC ars", + "Ġimp ulse", + "Ġhon ors", + "g el", + "Ġjurisd ictions", + "ĠBund le", + "ul as", + "Calif ornia", + "ĠIncre ase", + "Ġp ear", + "Ġsing les", + "Ġc ues", + "Ġunder went", + "ĠW S", + "Ġexagger ated", + "Ġdub ious", + "Ġfl ashing", + "L OG", + ") ].", + "J ournal", + "t g", + "V an", + "ĠI stanbul", + "ĠIn sp", + "ĠFrank en", + "D raw", + "Ġsad ness", + "Ġiron ic", + "ĠF ry", + "x c", + "Ġ16 4", + "is ch", + "W ay", + "ĠProtest ant", + "h orn", + "Ġun aff", + "ĠV iv", + "ill as", + "ĠProduct ions", + "ĠH ogan", + "Ġper imeter", + "ĠS isters", + "Ġspont aneous", + "Ġdown side", + "Ġdescend ants", + "Ġor n", + "w orm", + "Japan ese", + "Ġ19 55", + "Ġ15 1", + "ĠDo ing", + "els en", + "umb les", + "Ġrad ically", + "ĠDr um", + "ĠB ach", + "Ġli abilities", + "ĠO B", + "ĠElement ary", + "Ġmem e", + "yn es", + "Ġfinger print", + "ĠGr ab", + "Ġundert ake", + "Mem bers", + "ĠRead er", + "ĠSim s", + "g od", + "Ġhypot hetical", + "s cient", + "ĠA J", + "Ġchar ism", + "Ġad missions", + "ĠMiss ile", + "tr ade", + "Ġexerc ising", + "ĠBack ground", + "W ritten", + "Ġvoc als", + "whe ther", + "Ġv i", + "ĠW inner", + "Ġl itter", + "ĠSh ooting", + "ST EM", + "ãĤ ¡", + "ĠA FL", + "Ġvari ability", + "Ġe ats", + "ĠD PS", + "b row", + "Ġeleph ants", + "Ġstr at", + "Ġ Å", + "Ġsett lers", + "Matt hew", + "Ġin advert", + "H I", + "ĠIM F", + "ĠGo al", + "Ġnerv es", + "John son", + "ey e", + "ablish ment", + "Th ursday", + "BIL ITY", + "H ad", + "am oto", + "het amine", + "ep s", + "Ġmit ochond", + "Ġcomp ressed", + "ĠTre vor", + "ĠAnim als", + "T ool", + "L ock", + "Ġtwe ak", + "Ġpin ch", + "Ġcancell ation", + "P ot", + "Ġfoc al", + "ĠAst ron", + "17 3", + "ĠA SC", + "ĠO THER", + "umn i", + "Ġdem ise", + "d l", + "Ù ħ", + "Sem itism", + "Ġcr acking", + "Ġcollabor ative", + "Ġexpl ores", + "s ql", + "Ġher bs", + "Ġconfig urations", + "m is", + "ĠRes ult", + "ace y", + "ĠSm oke", + "Ġsan ct", + "el ia", + "Ġdeg ener", + "Ġdeep est", + "Ġscream ed", + "Ġn ap", + "Soft ware", + "ĠST AR", + "E F", + "ĠX in", + "spons ored", + "mans hip", + "23 3", + "Ġprim aries", + "Ġfilter ing", + "Ġas semble", + "m il", + "ĠMy ers", + "b ows", + "Ġpun ched", + "M ic", + "Ġinnov ations", + "Ġfun c", + "and o", + "Ġfr acking", + "ĠV ul", + "о Ð", + "osh op", + "ĠIm mun", + "Ġsett ling", + "Ġadolesc ents", + "Ġreb uilding", + "Ġtransform ing", + "Ġpar ole", + "Ġhar bor", + "Ġbook ing", + "ot ional", + "onge vity", + "ĠY o", + "b ug", + "Ġemer ges", + "ĠMethod s", + "ĠCh u", + "P res", + "ĠDun geons", + "Ġtra iling", + "ĠR um", + "ĠH ugh", + "å¤ ©", + "ĠE ra", + "ĠBatt les", + "Res ults", + "ĠTr ading", + "Ġvers a", + "c ss", + "ax ies", + "he et", + "Ġgre ed", + "19 89", + "Ġgard ens", + "Ġconting ent", + "P ark", + "ĠLeaf s", + "h ook", + "ro be", + "Ġdiplom acy", + "ĠF uel", + "ĠInv asion", + "Ġupgr ading", + "M ale", + "Ġe lic", + "Ġrelent less", + "ĠCo venant", + "ap esh", + "ĠT rop", + "T y", + "pro duction", + "art y", + "Ġpun ches", + "ak o", + "cyclop edia", + "ĠR abbit", + "ĠHD MI", + "Ġ14 1", + "Ġf oil", + "Item Image", + "ĠF G", + "Ġimplement ations", + "ĠP om", + "ixt ures", + "Ġaw ait", + "Ġ3 30", + "am us", + "Ġumb rella", + "Ġfore see", + "se par", + "Ġcircum cision", + "Ġperipher al", + "S ay", + "ĠExper t", + "In c", + "Ġwithd rew", + "ĠAnd ers", + "f ried", + "Ġradio active", + "ĠOp ening", + "Ġboard ing", + "ĠN D", + "Ġover throw", + "Act iv", + "W P", + "ĠAct s", + "× Ļ", + "Ġmot ions", + "v ic", + "ĠM ighty", + "ĠDef ender", + "a er", + "Ġthank ful", + "ĠK illing", + "ĠBr is", + "mo il", + "Ġpredict ing", + "26 6", + "ch oice", + "Ġkill ers", + "Ġinc ub", + "ĠChe st", + "ather ing", + "Ġpro claimed", + "fl ower", + "oss om", + "umbled ore", + "ĠCy cling", + "ĠOccup y", + "AG ES", + "P en", + "ĠY ug", + "Ġpack aged", + "Ġheight ened", + "c ot", + "st ack", + "C ond", + "Ġst amps", + "m age", + "Ġpersu aded", + "Ġens l", + "ĠCard inal", + "Ġsol itary", + "Ġpossess ing", + "ĠC ork", + "Ġev id", + "ĠT ay", + "Ġbl ues", + "Ġextrem ism", + "Ġlun ar", + "Ġcl own", + "Te chn", + "Ġfest ivals", + "ĠPv P", + "ĠL ar", + "Ġconsequ ently", + "p resent", + "Ġsom eday", + "ç İĭ", + "ĠMet eor", + "Ġtour ing", + "c ulture", + "Ġbe aches", + "S hip", + "c ause", + "ĠFl ood", + "ãĥ ¯", + "Ġpur ity", + "th ose", + "Ġem ission", + "b olt", + "Ġch ord", + "ĠScript ure", + "L u", + "Ġ$ {", + "cre ated", + "Other s", + "25 8", + "Ġelement al", + "Ġannoy ed", + "ĠA E", + "d an", + "ĠS ag", + "Res earchers", + "Ġfair y", + "âĢĵ âĢĵ", + "======== ====", + "Sm art", + "GG GG", + "Ġskelet ons", + "Ġpup ils", + "link ed", + "Ġur gency", + "en abled", + "ĠF uck", + "Ġcoun cill", + "r ab", + "U AL", + "T I", + "Ġlif es", + "Ġconf essed", + "B ug", + "Ġharm on", + "ĠCON FIG", + "ĠNe utral", + "D ouble", + "Ġst aple", + "ĠSH A", + "Brit ish", + "ĠSN P", + "AT OR", + "oc o", + "Ġswing ing", + "ge x", + "ole on", + "pl ain", + "ĠMiss ing", + "ĠTro phy", + "v ari", + "ran ch", + "Ġ3 01", + "4 40", + "00000000 00000000", + "Ġrest oring", + "Ġha ul", + "uc ing", + "ner g", + "Ġfut ures", + "Ġstrateg ist", + "quest ion", + "Ġlater al", + "ĠB ard", + "Ġs or", + "ĠRhod es", + "ĠD owntown", + "????? -", + "ĠL it", + "ĠB ened", + "Ġco il", + "st reet", + "ĠPort al", + "FI LE", + "ĠG ru", + "* ,", + "23 1", + "ne um", + "Ġsuck ed", + "Ġr apper", + "Ġtend encies", + "ĠLaure n", + "cell aneous", + "26 7", + "Ġbrow se", + "Ġover c", + "head er", + "o ise", + "Ġbe et", + "ĠG le", + "St ay", + "Ġm um", + "Ġtyp ed", + "Ġdiscount s", + "T alk", + "ĠO g", + "ex isting", + "ĠS ell", + "u ph", + "C I", + "ĠAust rian", + "ĠW arm", + "Ġdismiss al", + "Ġaver ages", + "c amera", + "Ġalleg iance", + "L AN", + "=\" #", + "Ġcomment ators", + "ĠSet ting", + "ĠMid west", + "Ġpharm ac", + "ĠEX P", + "Ġstain less", + "Ch icago", + "Ġt an", + "24 4", + "Ġcountry side", + "ĠV ac", + "29 5", + "Ġpin ned", + "Ġcr ises", + "Ġstandard ized", + "T ask", + "ĠJ ail", + "ĠD ocker", + "col ored", + "f orth", + "\" },", + "Ġpat rons", + "Ġsp ice", + "Ġm ourn", + "ĠM ood", + "Ġlaund ry", + "Ġequ ip", + "ĠM ole", + "y ll", + "ĠTH C", + "n ation", + "ĠSher lock", + "Ġiss u", + "ĠK re", + "ĠAmeric as", + "ĠA AA", + "Ġsystem atically", + "Ġcont ra", + "ĠS ally", + "Ġrational e", + "Ġcar riage", + "Ġpe aks", + "Ġcontrad iction", + "ens ation", + "ĠFail ure", + "Ġpro ps", + "Ġnames pace", + "Ġc ove", + "field s", + "ãĤ ĭ", + "Ġw ool", + "ĠC atch", + "Ġpresum ed", + "ĠD iana", + "r agon", + "ig i", + "Ġh amm", + "Ġst unt", + "ĠG UI", + "ĠObserv atory", + "ĠSh ore", + "Ġsmell s", + "ann ah", + "Ġcock pit", + "ĠD uterte", + "8 50", + "Ġopp ressed", + "bre aker", + "ĠCont ribut", + "ĠPer u", + "ĠMons anto", + "ĠAtt empt", + "Ġcommand ing", + "Ġfr idge", + "ĠR in", + "ĠChe ss", + "ual ity", + "Ġo l", + "Republic an", + "ĠGl ory", + "ĠW IN", + ".... ...", + "ag ent", + "read ing", + "Ġin h", + "J ones", + "Ġcl icks", + "al an", + "Ġ[ ];", + "ĠMaj esty", + "ĠC ed", + "op us", + "ate l", + "à ª", + "AR C", + "ĠEc uador", + "ãĥ ł", + "ĠK uro", + "Ġritual s", + "Ġcapt ive", + "Ġoun ce", + "Ġdisag reement", + "Ġsl og", + "f uel", + "P et", + "M ail", + "Ġexerc ised", + "Ġsol ic", + "Ġrain fall", + "Ġdev otion", + "ĠAss essment", + "Ġrob otic", + "opt ions", + "ĠR P", + "ĠFam ilies", + "ĠFl ames", + "Ġassign ments", + "00 7", + "aked own", + "Ġvoc abulary", + "Re illy", + "Ġc aval", + "g ars", + "Ġsupp ressed", + "ĠS ET", + "ĠJohn s", + "Ġwar p", + "bro ken", + "Ġstat ues", + "Ġadvoc ated", + "Ġ2 75", + "Ġper il", + "om orph", + "ĠF emin", + "per fect", + "Ġh atch", + "L ib", + "5 12", + "Ġlif elong", + "3 13", + "Ġche eks", + "Ġnum bered", + "ĠM ug", + "B ody", + "ra vel", + "We ight", + "ĠJ ak", + "ĠHe ath", + "Ġkiss ing", + "ĠJ UST", + "Ġw aving", + "u pload", + "Ġins ider", + "ĠPro gressive", + "ĠFil ter", + "tt a", + "ĠBe am", + "Ġviol ently", + "ip ation", + "Ġskept icism", + "Ġ19 18", + "ĠAnn ie", + "ĠS I", + "Ġgen etics", + "Ġon board", + "at l", + "ĠFried man", + "ĠB ri", + "cept ive", + "Ġpir ate", + "ĠRep orter", + "27 8", + "Ġmyth ology", + "Ġe clipse", + "Ġsk ins", + "Ġgly ph", + "ing ham", + "F iles", + "C our", + "w omen", + "Ġreg imes", + "Ġphotograp hed", + "K at", + "ĠMA X", + "Offic ials", + "Ġunexpected ly", + "Ġimpress ions", + "F ront", + ";;;; ;;;;", + "Ġsuprem acy", + "Ġs ang", + "Ġaggrav ated", + "Ġabrupt ly", + "ĠS ector", + "Ġexc uses", + "Ġcost ing", + "ide press", + "St ack", + "ĠR NA", + "ob il", + "Ġghost s", + "ld on", + "at ibility", + "Top ics", + "Ġreim burse", + "ĠH M", + "ĠDe g", + "Ġth ief", + "y et", + "ogen esis", + "le aning", + "ĠK ol", + "ĠB asketball", + "Ġf i", + "ĠSee ing", + "Ġrecy cling", + "Ġ[ -", + "Cong ress", + "Ġlect ures", + "P sy", + "Ġne p", + "Ġm aid", + "Ġori ented", + "A X", + "Ġrespect ful", + "re ne", + "fl ush", + "ĠUn loaded", + "re quest", + "gr id", + "ĠAltern atively", + "ĠHug o", + "Ġdec ree", + "ĠBuddh ism", + "and um", + "And roid", + "ĠCong o", + "ĠJoy ce", + "Ġacknowled ging", + "hes ive", + "ĠTom orrow", + "ĠH iro", + "th ren", + "ĠM aced", + "Ġho ax", + "ĠIncre ased", + "ĠPr adesh", + "W ild", + "____ __", + "16 1", + "Ġa unt", + "Ġdistribut ing", + "ĠT ucker", + "ĠSS L", + "ĠW olves", + "B uilding", + "ou lt", + "ĠLu o", + "ĠY as", + "ĠSp ir", + "ĠSh ape", + "ĠCamb od", + "ĠIP v", + "Ġm l", + "Ġext rad", + "39 0", + "ĠPenn y", + "d ream", + "Ġstation ed", + "opt ional", + "ew orthy", + ". ", + "ĠWorks hop", + "ĠRet ail", + "ĠAv atar", + "6 25", + "N a", + "ĠV C", + "ĠSec ure", + "M Y", + "19 88", + "oss ip", + "Ġpro state", + "Ġund en", + "Ġg amer", + "ĠCont ents", + "ĠWar hammer", + "ĠSent inel", + "3 10", + "Ġse gregation", + "ĠF lex", + "ĠM AY", + "Ġdr ills", + "ĠDrug s", + "Islam ic", + "Ġsp ur", + "Ġca fe", + "Ġimag inary", + "Ġgu iding", + "Ġsw ings", + "ĠThe me", + "ob y", + "Ġn ud", + "Ġbe gging", + "Ġstr ongh", + "Ġreject ing", + "Ġpedest rians", + "ĠPro spect", + "R are", + "s le", + "Ġconcess ions", + "ĠConst itutional", + "Ġbe ams", + "Ġfib ers", + "p oon", + "Ġinstinct s", + "pro perty", + "ĠB IG", + "Sand ers", + "im ates", + "Ġco ating", + "Ġcorps es", + "ĠTR UE", + "check ed", + "Ġ16 6", + "A sh", + "ĠJ S", + "ĠF iction", + "Ġcommun al", + "Ġener getic", + "oooo oooo", + "Ġnow adays", + "IL D", + "ib o", + "ĠSU V", + "R en", + "Ġdwell ing", + "Sil ver", + "Ġt ally", + "ĠM oving", + "Ġcow ard", + "Ġgener als", + "Ġhorn s", + "Ġcirc ulated", + "Ġrob bed", + "ĠUn limited", + "Ġharass ed", + "Ġinhib it", + "Ġcomp oser", + "ĠSpot ify", + "Ġspread s", + "3 64", + "Ġsu icidal", + "Ġno ises", + "ĠSt ur", + "Ġs aga", + "ĠK ag", + "is o", + "Ġtheoret ically", + "M oney", + "Ġsimilar ity", + "Ġslic ed", + "ut ils", + "ing es", + "\" -", + "Ġan th", + "Ġimp ed", + "Mod ule", + "Through out", + "Ġmen us", + "comm ittee", + "and i", + "ob j", + "in av", + "f ired", + "ĠAb dullah", + "Ġund ead", + "Ġfont s", + "H old", + "EN G", + "Ġsustain ability", + "Ġfl ick", + "Ġr azor", + "ĠF est", + "ĠChar acters", + "Ġword ing", + "Ġpopul ist", + "Ġcritic izing", + "Ġm use", + "v ine", + "Ġcard board", + "Ġkind ly", + "Ġfr inge", + "ĠThe ft", + "icult ural", + "Ġgovern ors", + "Ġ ����", + "Ġ16 3", + "Ġtime out", + "ĠA uth", + "Child ren", + "A U", + "Ġred emption", + "ĠAl ger", + "Ġ19 14", + "Ġw aved", + "Ġastron auts", + "og rams", + "Ġsw amp", + "ĠFinn ish", + "Ġcand le", + "Ġton nes", + "ut m", + "Ġr ay", + "Ġsp un", + "Ġfear ful", + "art icles", + "Ġca us", + "or ically", + "ĠRequ ires", + "ĠG ol", + "Ġpop e", + "Ġinaug ural", + "Ġg le", + "AD A", + "ĠIS IL", + "ĠOff ensive", + "Ġwatch dog", + "Ġbal con", + "ent ity", + "ĠH oo", + "Ġgall on", + "AC C", + "Ġdoub ling", + "Ġimpl ication", + "ĠS ight", + "Ġdoct r", + "---- ---", + "Ġ\\ \\", + "Ġm alt", + "R oll", + "Ġâī ¥", + "Ġrec ap", + "add ing", + "u ces", + "ĠB end", + "fig ure", + "Ġtur key", + "Ġsoc ietal", + "ĠT ickets", + "Ġcommer cially", + "Ġsp icy", + "Ġ2 16", + "ĠR amp", + "Ġsuperior ity", + "à ¯", + "ĠTr acker", + "C arl", + "ĠC oy", + "ĠPatri ot", + "Ġconsult ed", + "Ġlist ings", + "Ġsle w", + "reens hot", + "ĠG one", + "Ġ[ ...]", + "30 9", + "Ġh ottest", + "Ø ±", + "Ġrock y", + "ĠD iaz", + "Ġmass age", + "Ġpar aly", + "Ġp ony", + "A z", + "Ġcart ridge", + "ĠN Z", + "Ġsn ack", + "ĠLam ar", + "ple ment", + "ĠLes lie", + "Ġm ater", + "Ġsn ipp", + "24 6", + "Ġjoint ly", + "ĠBris bane", + "ĠiP od", + "Ġpump ing", + "Ġgo at", + "ĠSh aron", + "eal ing", + "Ġcor on", + "Ġan omal", + "rah im", + "ĠConnect ion", + "Ġsculpt ure", + "Ġsched uling", + "ĠD addy", + "at hing", + "Ġeyeb rows", + "Ġcur ved", + "Ġsent iments", + "Ġdraft ing", + "D rop", + "( [", + "Ġnom inal", + "ĠLeaders hip", + "ĠG row", + "Ġ17 6", + "Ġconstruct ive", + "iv ation", + "Ġcorrupt ed", + "ger ald", + "ĠC ros", + "ĠChe ster", + "ĠL ap", + "ãģ ª", + "OT H", + "D ATA", + "Ġal mond", + "pro bably", + "I mp", + "Ġfe ast", + "ĠWar craft", + "F lor", + "Ġcheck point", + "Ġtrans cription", + "Ġ20 4", + "Ġtwe aks", + "Ġrel ieve", + "S cience", + "Ġperform er", + "Z one", + "Ġtur moil", + "ig ated", + "hib it", + "ĠC afe", + "the med", + "Ġflu or", + "ben ch", + "Ġde com", + "ĠU nt", + "ĠBar rett", + "ĠF acts", + "Ġt asting", + "ĠPTS D", + "ĠSe al", + "ĠJuda ism", + "ĠDynam ic", + "ĠC ors", + "V e", + "ĠM ing", + "ĠTrans form", + "v on", + "ĠDef enders", + "ĠTact ical", + "ĠV on", + "ĠUn ivers", + "Ġdist orted", + "ĠB reath", + "?' \"", + "Ġag on", + "ĠDead ly", + "Ġl an", + "ĠCy cle", + "orn ed", + "Ġrel iably", + "Ġgl or", + "ĠMon key", + "ãĥ ¡", + "Ġad ren", + "Ġmicrow ave", + "ĠAl ban", + "irc raft", + "dig it", + "sm art", + "ĠD read", + "¯¯¯¯¯¯¯¯ ¯¯¯¯¯¯¯¯", + "{ {", + "ĠRoc hester", + "Ġsimpl ified", + "Ġinf licted", + "Ġtake over", + "Ġyour selves", + "ad itional", + "Ġmus cular", + "K S", + "Ġing en", + "T ax", + "ĠFe ature", + "27 7", + "Ġcru c", + "Ġcr ate", + "Ġun identified", + "Ġacclaim ed", + "ĠM anga", + "ĠFr ances", + "ĠNep al", + "ĠG erald", + "ĠKu wait", + "Ġsl ain", + "ĠHe b", + "ĠG oku", + "ãģ® æ", + "28 6", + "M rs", + "ĠC ody", + "ĠSan ctuary", + "01 6", + "Ġdism ant", + "Ġdatas et", + "ĠH ond", + "b uck", + "ĠPat terson", + "Ġpal ette", + "ĠG D", + "ic ol", + "ĠL odge", + "Ġplanet ary", + "ak in", + "ĠRegist ered", + "ab we", + "ĠPeters burg", + "Ġha iled", + "ĠP iece", + "S che", + "ĠDO J", + "Ġen umer", + "18 1", + "ĠObs erver", + "ĠB old", + "f ounded", + "com merce", + "Ġexplo its", + "ĠF inding", + "UR N", + "ĠS ne", + "ĠAc id", + "ay ette", + "ĠVal ues", + "Ġdr astic", + "Ġarchitect ural", + "Ġ\" .", + "× ķ", + "ump ed", + "Ġwra pping", + "Ġwid ow", + "ĠSl ayer", + "l ace", + "on ce", + "German y", + "av oid", + "Ġtem ples", + "P AR", + "à ´", + "ĠLuc ifer", + "ĠFl ickr", + "l ov", + "for ces", + "Ġsc outing", + "Ġlou der", + "tes y", + "Ġbefore hand", + "Ä ĵ", + "ĠNe on", + "ĠW ol", + "ĠTyp ically", + "ĠPolit ico", + "-+ -+", + "Ġbuild er", + "Ġder ive", + "K ill", + "Ġp oker", + "Ġambig uous", + "Ġlif ts", + "Ġcy t", + "Ġrib s", + "ood le", + "ĠS ounds", + "h air", + "ĠSynd rome", + "t f", + "Ġproport ional", + "u id", + "Ġper taining", + "ĠKind le", + "ĠNeg ro", + "Ġreiter ated", + "ĠTon ight", + "oth s", + "ĠCorn ell", + "Ġo wing", + "Ġ20 8", + "elf are", + "oc ating", + "ĠB irds", + "Sub scribe", + "Ġess ays", + "Ġburd ens", + "Ġillust rations", + "ar ious", + "ER AL", + "ĠCal cul", + "Ġx en", + "ĠLink edIn", + "ĠJ ung", + "Ġredes ign", + "Con nor", + "29 6", + "Ġrevers al", + "ĠAd elaide", + "ĠL L", + "Ġs inking", + "Ġg um", + "US H", + "c apt", + "ĠGr imm", + "Ġfoot steps", + "ĠCB D", + "isp ers", + "Ġpro se", + "Wed nesday", + "ĠM ovies", + "ed in", + "Ġoverturn ed", + "Ġcontent ious", + "US B", + "~~~~~~~~ ~~~~~~~~", + "ĠCo pper", + "Ġpoint less", + "N V", + "val ues", + "olph in", + "d ain", + "Ġdepos ited", + "ĠG W", + "Ġpreced ed", + "ĠCl a", + "ĠGo lem", + "ĠN im", + "ĠÎ ²", + "ĠEngine ers", + "m iddle", + "Ġfl att", + "oper ative", + "Ġcouncil s", + "imb abwe", + "el in", + "Ġstress ful", + "ĠL D", + "Ġres h", + "l ake", + "Ġwheel chair", + "ĠAltern ative", + "Ġoptim ize", + "oper ation", + "Ġpe ek", + "Ġones elf", + "ig il", + "Ġtrans itions", + "op athy", + "bl ank", + "Ġ16 9", + "17 1", + "________________________________ ________________________________", + "Ġl aundering", + "En c", + "ĠD EC", + "Ġwork outs", + "Ġsp ikes", + "Ġdin osaurs", + "Ġdiscrim inatory", + "P ool", + "R ather", + "38 5", + "R NA", + "tes ters", + "et o", + "ĠIdent ity", + "Ġve in", + "ĠBur ton", + "Ġarc ade", + "4 20", + "Ult imately", + "ĠSad ly", + "à °", + "p ill", + "Ġcub ic", + "ĠSpect rum", + "the se", + "st ates", + "Ġun official", + "h awks", + "ĠEVER Y", + "Ġrain bow", + "Ġincarcer ation", + "and ing", + "Ġsy ll", + "ĠEver ton", + "Ġ17 9", + "ĠSer bia", + "Ġ18 9", + "m eter", + "ĠMic key", + "Ġant iqu", + "Ġfact ual", + "ne ck", + "ĠN are", + "n orm", + "m ust", + "Ġhigh ways", + "Ġgl am", + "Ġdivid ing", + "ĠSquad ron", + "ĠMar tha", + "Ġbirth s", + "C over", + "//////// ////////", + "ĠW ong", + "Ph ot", + "ĠA LS", + "ri o", + "ĠNon etheless", + "ĠL emon", + "Ġ20 6", + "ĠE E", + "Ġderiv ative", + "ĠWW II", + "v ote", + "Ġthere in", + "Ġsepar ating", + "44 6", + "sy nc", + "ĠStre ets", + "Ġr att", + "Ġmunicip ality", + "ĠShort ly", + "Ġmon k", + ") ,\"", + "Ġscr ub", + "Ġoper atives", + "Ne ither", + "Pl ace", + "ĠLim it", + "F emale", + "ĠAct or", + "Char acter", + "Ġconstit uted", + "35 7", + "Ġprotest ed", + "ĠSt raw", + "ĠHe ight", + "ild a", + "ĠTy ph", + "Ġflood s", + "Ġcos metic", + "W AY", + "pert ure", + "up on", + "t ons", + "ess ing", + "ĠP ocket", + "Ġro oft", + "ĠC aucas", + "Ġant idepress", + "Ġincomp atible", + "EC D", + "Ġoper a", + "ĠCont est", + "Ġgener ators", + "l ime", + "Def ense", + "19 87", + "for um", + "Ġsav age", + "ĠHung arian", + "n z", + "Ġmet allic", + "Ġex pelled", + "Ġres idency", + "Ġdress es", + "66 6", + "ĠC lement", + "f ires", + "C ategory", + "Ġge ek", + "al is", + "Ġc emetery", + "educ ated", + "Ġc rawl", + "ĠUn able", + "ĠT yson", + "ak is", + "Ġp ardon", + "ĠW ra", + "Ġstrengthen ed", + "ĠF ors", + "33 5", + "ĠH C", + "ĠM ond", + "Ġvisual s", + "ĠBeat les", + "ett lement", + "Ġ ï", + "g ro", + "Ġb ash", + "Ġpo orest", + "Ġex cel", + "Ġaspir ations", + "ĠM unicip", + "ens ible", + "Ġceremon ies", + "Ġintimid ation", + "ĠCON TR", + "be ck", + "ĠK ap", + "as u", + "Ġtradem arks", + "ĠS ew", + "ĠComp etition", + "net work", + "ĠAr ri", + "ĠT et", + "Ro aming", + "W C", + "D at", + "Ġso b", + "Ġpair ing", + "Ġoverd ose", + "SA Y", + "ab er", + "Ġrev olt", + "ĠF ah", + "act ing", + "e q", + "est ation", + "F ight", + "ĠMar ks", + "27 3", + "Ġ17 8", + "R aw", + "ãģ ĭ", + "34 9", + "bl ocks", + "Ġver ge", + "est ine", + "ĠPod esta", + "Ġinv asive", + "Ġprofound ly", + "ĠA o", + "e ach", + "Ġl est", + "inter pret", + "Ġshr inking", + "Ġerr one", + "Ġche es", + "ly s", + "ĠI vy", + "ĠDirect ory", + "Ġhint ed", + "V ICE", + "Ġcontact ing", + "ĠG ent", + "he i", + "Ġlabel ing", + "Ġmerc ury", + "ĠL ite", + "Ġexp ires", + "Ġdest abil", + "rit is", + "c u", + "Ġfeather s", + "Ġste er", + "Ġprogram med", + "ĠV ader", + "Go ing", + "ĠE lim", + "Ġy o", + "ĠMic he", + "Ġ20 3", + "Ġslee ves", + "Ġb ully", + "ĠHum ans", + "36 8", + "Ġcomp ress", + "ĠBan ner", + "AR S", + "Ġa while", + "Ġcal ib", + "Ġspons orship", + "ĠDiff iculty", + "ĠP apers", + "Ġident ifier", + "} .", + "Ġy og", + "ĠSh ia", + "Ġclean up", + "Ġvib e", + "int rodu", + "im ming", + "Austral ia", + "Ġout lines", + "ĠY outube", + "tr ain", + "ĠM akes", + "Ġde ported", + "Ġcent r", + "ĠD ug", + "ĠB oulder", + "ĠBuff y", + "Ġinj unction", + "ĠHar ley", + "ĠG roups", + "ĠD umbledore", + "ĠCl ara", + "Ġ\" -", + "Ġsacrific ed", + "ep h", + "Sh adow", + "ib ling", + "Ġfreel ance", + "Ġevident ly", + "ph al", + "Ġret ains", + "M ir", + "Ġfin ite", + "d ar", + "ĠC ous", + "Ġrep aired", + "Ġperiod ic", + "Ġchampions hips", + "Ġaster oid", + "bl ind", + "Ġexpress ly", + "ĠAst ros", + "Ġsc aled", + "Ġge ographical", + "ĠRap ids", + "En joy", + "Ġel astic", + "ĠMoh amed", + "Mark et", + "be gin", + "Ġdisco vers", + "Ġtele communications", + "Ġscan ner", + "Ġen large", + "Ġsh arks", + "Ġpsy chedel", + "ĠRou ge", + "Ġsnap shot", + "is ine", + "X P", + "Ġpestic ides", + "ĠL SD", + "ĠDist ribution", + "re ally", + "Ġde gradation", + "Ġdisgu ise", + "Ġbi om", + "ĠEX T", + "Ġequ ations", + "Ġhaz ards", + "ĠComp ared", + ") *", + "Ġvirt ues", + "Ġeld ers", + "Ġenh ancing", + "ĠAc ross", + "er os", + "ang ling", + "Ġcomb ust", + "ucc i", + "Ġconc ussion", + "Ġcontrace ption", + "ĠK ang", + "Ġexpress es", + "Ġa ux", + "ĠP ione", + "Ġexhib its", + "Deb ug", + "OT AL", + "ĠAl ready", + "ĠWheel er", + "Ġexp ands", + "? :", + "Ġreconc iliation", + "Ġpir ates", + "Ġpur se", + "Ġdiscour age", + "Ġspect acle", + "R ank", + "Ġwra ps", + "ĠTh ought", + "Ġimp ending", + "O pp", + "ĠAng lo", + "ĠE UR", + "Ġscrew ed", + "ret ched", + "Ġencour agement", + "mod els", + "Ġconf use", + "mm m", + "ĠVit amin", + "âĸij âĸij", + "C ru", + "Ġkn ights", + "Ġdisc ard", + "Ġb ishops", + "ĠW ear", + "ĠGar rett", + "k an", + "ãĥ Ł", + "Ġmascul ine", + "cap ital", + "ĠA us", + "Ġfat ally", + "th anks", + "ĠA U", + "ĠG ut", + "12 00", + "Ġ 00000000", + "Ġsur rog", + "ĠBI OS", + "ra its", + "ĠWat ts", + "Ġresur rection", + "ĠElect oral", + "ĠT ips", + "4 000", + "Ġnut rient", + "Ġdepict ing", + "Ġspr ink", + "Ġm uff", + "ĠL IM", + "ĠS ample", + "ps c", + "ib i", + "gener ated", + "Ġspec imens", + "Ġdiss atisf", + "Ġtail ored", + "Ġhold ings", + "ĠMonth ly", + "ĠE at", + "po ons", + "Ġne c", + "ĠC age", + "ĠLot us", + "ĠLan tern", + "Ġfront ier", + "Ġp ensions", + "Ġj oked", + "ĠHard y", + "=-=- =-=-", + "r ade", + "U ID", + "Ġr ails", + "Ġem it", + "Ġsl ate", + "Ġsm ug", + "Ġsp it", + "ĠCall s", + "ĠJac obs", + "f eat", + "ĠU E", + "Ġrest ruct", + "Ġregener ation", + "Ġenerg ies", + "ĠCon nor", + "OH N", + "ĠChe ese", + "Ġg er", + "Ġresur rect", + "man agement", + "N W", + "Ġpres ently", + "ĠBru ins", + "M ember", + "ĠM ang", + "id an", + "Ġboost ing", + "w yn", + "+ .", + "requ isite", + "ĠNY PD", + "ĠMe gan", + "ĠCond itions", + "Ġp ics", + "nes ium", + "ĠR ash", + "Ġ17 4", + "ĠD ucks", + "Ġemb ro", + "z u", + "on ian", + "rel igious", + "Ġc raz", + "ĠAC A", + "ĠZ ucker", + "EM A", + "ĠPro s", + "We apon", + "ĠKn ox", + "ĠAr duino", + "Ġst ove", + "Ġheaven s", + "ĠP urchase", + "Ġher d", + "Ġfundra iser", + "Dig ital", + "5 000", + "Ġprop onents", + "/ âĢĭ", + "Ġj elly", + "ĠVis a", + "Ġmon ks", + "Ġadvance ment", + "ĠW er", + "Ġ18 7", + "e us", + "ert ility", + "Ġfet al", + "Ġ19 36", + "L o", + "Ġout fits", + "Ġstair case", + "b omb", + "Ġcustom ized", + "cl air", + "T ree", + "Ġm apped", + "ĠConsider ing", + "ĠTor res", + "Ġmeth yl", + "Ġapprox imate", + "Ġdo om", + "ĠHans en", + "Ġc rossover", + "Ġstand alone", + "ä ¼", + "Ġinv ites", + "Ġgra veyard", + "Ġh p", + "Donald Trump", + "Ġesc ort", + "G ar", + "Ġpredec essors", + "Ġh ay", + "Ġen zyme", + "ĠStra ight", + "vis ors", + "I ng", + "ane ously", + "ĠApp lied", + "Ġf ec", + "ĠDur ant", + "Ġout spoken", + "or b", + "Ġz eal", + "Ġdisgr ace", + "' ).", + "ĠChe ng", + "28 9", + "ĠRen a", + "ĠSu icide", + "29 4", + "Ġout raged", + "ĠNew man", + "ĠN vidia", + "ĠA ber", + "ĠB ers", + "Ġrecre ation", + "Wind ow", + "ĠD P", + "x e", + "Ġped oph", + "Ġfall out", + "ambo o", + "Ġpresent ations", + "ĠApp s", + "Ġh tml", + "3 45", + "ĠX XX", + "Ġrub bing", + "ĠLe ather", + "Ġhum idity", + "se ys", + "est ablished", + "ĠUn its", + "64 6", + "Ġrespect able", + "A uto", + "Ġthri ving", + "ĠInn ovation", + "ang s", + "Ext ra", + "reg ulation", + "29 8", + "p ick", + "Ex amples", + "ĠC J", + "Att ack", + "Ġdr acon", + "L T", + "Ġstick er", + "re rs", + "Ġsun ny", + "I ss", + "reg ulated", + "d im", + "ĠAb stract", + "Ġhus bands", + "Off ice", + "om ination", + "it ars", + "AN GE", + "asc al", + "ĠK ris", + "ĠInf antry", + "Ġm alf", + "ĠA the", + "ĠR ally", + "bal anced", + "................ ........", + "OU P", + "Ġmole cule", + "met ics", + "ĠSpl it", + "ĠInstruct ions", + "ĠN ights", + "c ards", + "Ġt ug", + "Ġcon e", + "å Ń", + "Ġt x", + "ĠDisc ussion", + "Ġcatast rophe", + "pp e", + "g io", + "Ġcommun ism", + "Ġhal ted", + "ĠGu ant", + "cle an", + "ĠSc hed", + "ĠK anye", + "Ġw ander", + "ĠSer iously", + "Ġ18 8", + "enn ial", + "f ollow", + "product ive", + "ĠFl ow", + "ĠS ail", + "Ġc raw", + "Ġsim ulations", + "or u", + "ang les", + "ĠN olan", + "Ġmen stru", + "4 70", + "Ġ20 7", + "aj a", + "Ġcas ually", + "board ing", + "Ġ2 22", + "ov y", + "ĠN umbers", + "um at", + "O E", + "28 7", + "ĠCle mson", + "Ġcert s", + "Ġsl id", + "ĠT ribe", + "Ġto ast", + "Ġfort unes", + "Ġf als", + "ĠComm ittees", + "Ġg p", + "Ġf iery", + "ĠN ets", + "ĠAn ime", + "Pack age", + "ĠComp are", + "l aughter", + "in fect", + "Ġatroc ities", + "Ġjust ices", + "Ġins ults", + "ĠVern on", + "Ġsh aken", + "Ġperson a", + "est amp", + "36 7", + "br ain", + "Ġexperiment ing", + "K en", + "ĠElect ronics", + "Ġ16 1", + "dom ain", + "Ġgraph ical", + "b ishop", + "Ġwho pping", + "ĠEv angel", + "Ġadvertis ers", + "ĠSpe ar", + "Ġb ids", + "Ġdestro ys", + "ut z", + "Ġunders c", + "ĠAD D", + "Ġan ts", + "ĠC um", + "ipp les", + "ĠF ill", + "Ġgl anced", + "Ġind icted", + "ĠE ff", + "Ġmis con", + "ĠDes ktop", + "Ġab ide", + "ãĥ Ģ", + "ĠI o", + "ĠC oul", + "Ġcaps ule", + "ĠCh rys", + "M ON", + "Ġund es", + "ĠI RA", + "Ġc itation", + "Ġdict ate", + "ĠNet works", + "ĠConf lict", + "ĠSt uff", + "x a", + "is ec", + "ĠChem istry", + "Ġquarter ly", + "William s", + "an an", + "O pt", + "ĠAlexand ria", + "out heastern", + "ĠSpring field", + "ĠBlack s", + "Ġge ography", + "24 2", + "Ġut most", + "ĠEx xon", + "ab outs", + "E VA", + "ĠEn able", + "ĠBar r", + "Ġdisag reed", + "ĠCy prus", + "Ġdement ia", + "Ġlab s", + "Ġubiqu itous", + "ĠLO VE", + "Ġconsolid ated", + "s r", + "Ġcream y", + "ĠTim ber", + "Reg ardless", + "ĠCert ificate", + "Ġ\" ...", + "ogen ous", + "Capt ain", + "Ġinsult ing", + "ĠSor os", + "ĠInst r", + "ĠBulgar ia", + "bet ter", + "Ġsuck ing", + "ĠDavid son", + "at z", + "Ġcoll ateral", + "g if", + "Ġplag ued", + "ĠC ancel", + "ĠGard ner", + "R B", + "Ġsix teen", + "Rem ove", + "ur istic", + "c ook", + "R od", + "Ġcompr ising", + "f le", + ") âĢĶ", + "ĠVik ing", + "g rowth", + "agon al", + "Ġsr f", + "af ety", + "m ot", + "N early", + "st own", + "ĠF actor", + "Ġautom obile", + "Ġproced ural", + "m ask", + "amp ires", + "Ġdisapp ears", + "j ab", + "3 15", + "Ġ19 51", + "ne eded", + "Ġd aring", + "le ader", + "Ġp odium", + "Ġun healthy", + "Ġm und", + "Ġpy ramid", + "oc re", + "Ġkiss ed", + "Ġdream ed", + "ĠFant astic", + "ĠG ly", + "å Ĭ", + "Ġgreat ness", + "Ġsp ices", + "Ġmet ropolitan", + "Ġcomp uls", + "i ets", + "101 6", + "ĠSh am", + "ĠP yr", + "fl ies", + "ĠMid night", + "Ġswall owed", + "Ġgen res", + "ĠL ucky", + "ĠRew ards", + "Ġdisp atch", + "ĠI PA", + "ĠApp ly", + "Ġa ven", + "al ities", + "3 12", + "th ings", + "Ġ( ).", + "Ġm ates", + "ĠS z", + "ĠC OP", + "ol ate", + "O FF", + "Ġre charge", + "c aps", + "ĠYork er", + "ic one", + "Ġgal axies", + "ile aks", + "D ave", + "ĠP uzz", + "ĠCelt ic", + "ĠA FC", + "27 6", + "ĠS ons", + "Ġaffirm ative", + "H or", + "Ġtutorial s", + "ĠC ITY", + "ĠR osa", + "ĠExt ension", + "Ser ies", + "Ġf ats", + "Ġr ab", + "l is", + "Ġun ic", + "Ġe ve", + "ĠSp in", + "Ġadul thood", + "ty p", + "Ġsect arian", + "Ġcheck out", + "ĠCy cl", + "S ingle", + "Ġmart yr", + "Ġch illing", + "88 8", + "ou fl", + "Ġ] ;", + "Ġcongest ion", + "m k", + "ĠWhere as", + "Ġ19 38", + "ur rencies", + "er ion", + "Ġbo ast", + "ĠPat ients", + "Ġch ap", + "ĠB D", + "real DonaldTrump", + "Ġexam ines", + "h ov", + "Ġstart ling", + "ĠBab ylon", + "w id", + "om ew", + "br ance", + "ĠOd yssey", + "w ig", + "Ġtor ch", + "ĠV ox", + "ĠMo z", + "ĠT roll", + "ĠAn s", + "Similar ly", + "ĠF ul", + "00 6", + "Un less", + "ĠAl one", + "st ead", + "ĠPub lisher", + "r ights", + "t u", + "ĠDoes n", + "Ġprofession ally", + "Ġcl o", + "ic z", + "Ġste als", + "Ġ á", + "19 86", + "Ġst urdy", + "ĠJoh ann", + "Ġmed als", + "Ġfil ings", + "ĠFr aser", + "d one", + "Ġmult inational", + "Ġf eder", + "Ġworth less", + "Ġp est", + "Yes terday", + "ank ind", + "Ġg ays", + "Ġb orne", + "ĠP OS", + "Pict ure", + "Ġpercent ages", + "25 1", + "r ame", + "Ġpot ions", + "AM D", + "ĠLeban ese", + "Ġr ang", + "ĠL SU", + "ong s", + "Ġpen insula", + "ĠCl ause", + "AL K", + "oh a", + "ĠMac Book", + "Ġunanim ous", + "Ġl enders", + "Ġhang s", + "Ġfranch ises", + "ore rs", + "ĠUp dates", + "Ġisol ate", + "and ro", + "S oon", + "Ġdisrupt ive", + "ĠSur ve", + "Ġst itches", + "ĠSc orp", + "ĠDomin ion", + "Ġsupp lying", + "Ar g", + "Ġtur ret", + "ĠL uk", + "Ġbr ackets", + "* )", + "ĠRevolution ary", + "ĠHon est", + "Ġnot icing", + "ĠSh annon", + "Ġafford ed", + "Ġth a", + "ĠJan et", + "! --", + "ĠNare ndra", + "ĠPl ot", + "H ol", + "se ver", + "e enth", + "Ġobst ruction", + "Ġ10 24", + "st aff", + "j as", + "or get", + "sc enes", + "l aughs", + "ĠF argo", + "cr ime", + "Ġorche str", + "Ġde let", + "ili ary", + "rie ved", + "Ġmilit ar", + "ĠGreen e", + "âĹ ı", + "ãģ ¦", + "ĠGu ards", + "Ġunle ashed", + "ĠWe ber", + "Ġadjust able", + "Ġcal iber", + "Ġmotiv ations", + "Ġà ł", + "m Ah", + "ĠL anka", + "hand le", + "Ġp ent", + "ĠR av", + "ĠAng ular", + "ĠK au", + "umb ing", + "Ġphil anthrop", + "Ġde hyd", + "Ġtox icity", + "e er", + "ĠY ORK", + "w itz", + "å ¼", + "ĠI E", + "commun ity", + "ĠA H", + "Ġret ali", + "Ġmass ively", + "ĠDani els", + "ĠD EL", + "Ġcar cin", + "Ur l", + "Ġrout ing", + "ĠNPC s", + "ĠR AF", + "ry ce", + "Ġwa ived", + "ĠGu atem", + "Every body", + "Ġco venant", + "Ġ17 3", + "Ġrelax ing", + "Ġqu art", + "al most", + "Ġguard ed", + "ĠSold iers", + "ĠPL AY", + "Ġout going", + "L AND", + "Ġre write", + "ĠM OV", + "ĠIm per", + "ĠS olution", + "Ġphenomen al", + "Ġl ongevity", + "Ġimp at", + "ĠN issan", + "ir ie", + "Ġod or", + "ĠZ ar", + "ok s", + "Ġmilit ias", + "ĠSP EC", + "Ġtoler ated", + "ars er", + "ĠBrad ford", + "+ ,", + "Ġsur real", + "s f", + "Can adian", + "Ġresemb lance", + "Ġcarbohyd rate", + "VI EW", + "Ġaccess ory", + "me al", + "larg est", + "ieg el", + "Some one", + "Ġtoug hest", + "os o", + "Ġfun nel", + "Ġcondemn ation", + "lu ent", + "Ġw ired", + "ĠSun set", + "Jes us", + "ĠP ST", + "ĠP ages", + "ĠTy coon", + "ĠP F", + "Ġselect ions", + "Ġ à¤", + "part isan", + "Ġhigh s", + "ĠR une", + "Ġcraft s", + "le ad", + "ĠParent s", + "Ġre claim", + "ek er", + "ĠAll ied", + "ae per", + "Ġlo oming", + "Ġbenefic iaries", + "ĠH ull", + "Stud ents", + "Jew ish", + "d j", + "Ġp act", + "tem plate", + "ĠOffic ials", + "ĠBay lor", + "Ġhe mp", + "Ġyouth s", + "ĠLevel s", + "ĠX iao", + "ĠC hes", + "Ġende avor", + "ĠRem oved", + "Ġhipp ocamp", + "H ell", + "ãĤ Ĭ", + "80 5", + "Ġd inosaur", + "ĠWr ath", + "ĠIndones ian", + "Ġcalcul ator", + "ĠD ictionary", + "Ġ4 20", + "ĠM AG", + "( _", + "! ,", + "t arians", + "Ġrestrict ing", + "rac use", + "Ġweek day", + "OU NT", + "Ġsh rugged", + "leg round", + "Ġb ald", + "ĠDo ctors", + "Ġt outed", + "ĠMax well", + "Ġ2 14", + "Ġdiplom at", + "Ġrep ression", + "Ġconstitu ency", + "v ice", + "r anked", + "ĠNap oleon", + "g ang", + "ĠFore ver", + "t un", + "Ġbul b", + "ĠPD T", + "ĠC isco", + "V EN", + "Ġres umed", + "Ste ven", + "ĠManit oba", + "Ġfab ulous", + "ĠAg ents", + "19 84", + "Ġam using", + "ĠMyster ies", + "Ġor thodox", + "fl oor", + "Ġquestion naire", + "Ġpenet rate", + "Ġfilm makers", + "ĠUn c", + "Ġst amped", + "Ġth irteen", + "Ġout field", + "Ġforward ed", + "Ġapp ra", + "Ġa ided", + "t ry", + "Ġunf ocused", + "ĠL iz", + "ĠWend y", + "ĠSc ene", + "Ch arg", + "Ġreject s", + "Ġleft ist", + "ĠProv idence", + "ĠBr id", + "reg n", + "Ġprophe cy", + "ĠL IVE", + "4 99", + "Ġfor ge", + "ĠF ML", + "Ġintrins ic", + "ĠF rog", + "Ġw ont", + "ĠH olt", + "Ġfam ed", + "CL US", + "aeper nick", + "ĠH ate", + "ĠC ay", + "Ġregister ing", + "ort ality", + "rop y", + "ocaly ptic", + "a an", + "n av", + "Ġfasc ist", + "IF IED", + "Ġimpl icated", + "ĠRes ort", + "ĠChand ler", + "ĠBr ick", + "P in", + "ys c", + "Us age", + "ĠHel m", + "us ra", + "âĺħ âĺħ", + "ĠAb bas", + "Ġunanim ously", + "Ġke eper", + "Ġadd icted", + "?? ?", + "Ġhelm ets", + "Ġant ioxid", + "aps ed", + "80 8", + "gi ene", + "Ġwa its", + "Ġmin ion", + "ra ved", + "ĠP orsche", + "Ġdream ing", + "Ġ17 1", + "ĠC ain", + "Ġun for", + "ass o", + "ĠConfig uration", + "k un", + "hard t", + "Ġn ested", + "ĠL DS", + "L ES", + "Ġt ying", + "en os", + "Ġc ue", + "ĠMar qu", + "sk irts", + "Ġclick ed", + "Ġexp iration", + "ĠAccording ly", + "ĠW C", + "Ġbless ings", + "Ġaddict ive", + "ĠN arr", + "y x", + "ĠJagu ars", + "Ġrent s", + "ĠS iber", + "Ġt ipped", + "ous se", + "ĠFitz gerald", + "Ġhier arch", + "out ine", + "Ġwa velength", + "> .", + "ch id", + "ĠProcess ing", + "/ +", + "r anking", + "E asy", + "ĠConst ruct", + "Ġt et", + "ins ured", + "H UD", + "Ġqu oting", + "Ġcommun icated", + "in x", + "Ġin mate", + "Ġerect ed", + "ĠAbs olutely", + "ĠSure ly", + "Ġun im", + "ĠThr one", + "he id", + "Ġcl aws", + "Ġsuper star", + "ĠL enn", + "ĠWh is", + "U k", + "ab ol", + "Ġsk et", + "ĠN iet", + "Ġper ks", + "Ġaff inity", + "Ġopen ings", + "phas is", + "Ġdiscrim inate", + "T ip", + "v c", + "Ġgr inding", + "ĠJenn y", + "Ġast hma", + "hol es", + "ĠHom er", + "Ġreg isters", + "ĠGl ad", + "Ġcre ations", + "Ġlith ium", + "Ġappl ause", + "unt il", + "Just ice", + "ĠTur ks", + "Ġsc andals", + "Ġb ake", + "t ank", + "M ech", + "ĠMe ans", + "ĠM aid", + "Republic ans", + "is al", + "wind ows", + "ĠSant os", + "Ġveget ation", + "33 8", + "t ri", + "Ġfl ux", + "ins ert", + "Ġclar ified", + "Ġmort g", + "ĠCh im", + "ĠT ort", + "Ġdiscl aim", + "met al", + "ĠAs ide", + "Ġindu ction", + "Ġinf l", + "Ġathe ists", + "amp h", + "Ġe ther", + "ĠV ital", + "ĠBu ilt", + "M ind", + "Ġweapon ry", + "S ET", + "Ġ18 6", + "ad min", + "g am", + "cont ract", + "af a", + "Ġderiv atives", + "Ġsn acks", + "Ġch urn", + "E conom", + "Ġca pped", + "ĠUnder standing", + "ĠH ers", + "ĠI z", + "Ġd uct", + "I ENT", + "augh ty", + "Ġâľ Ķ", + "ĠN P", + "Ġsa iling", + "In itialized", + "Ġt ed", + "Ġreact ors", + "ĠL omb", + "Ġcho ke", + "ĠW orm", + "Ġadm iration", + "Ġsw ung", + "ens ibly", + "Ġr ash", + "ĠGo als", + "ĠImport ant", + "Sh ot", + "ĠR as", + "Ġtrain ers", + "ĠB un", + "Work ing", + "Ġhar med", + "ĠPand ora", + "ĠL TE", + "Ġmush room", + "ĠCH AR", + "ĠF ee", + "ĠM oy", + "B orn", + "ol iberal", + "ĠMart ial", + "Ġgentle men", + "Ġling ering", + "Offic ial", + "Ġgra ffiti", + "ĠN ames", + "D er", + "Ġqu int", + "ist rate", + "aze era", + "ĠNOT ICE", + "ĠFlore nce", + "Ġpay able", + "Ġdep icts", + "ĠSpe cies", + "He art", + "âĶĢâĶĢâĶĢâĶĢ âĶĢâĶĢâĶĢâĶĢ", + "Ġencl osed", + "Incre ases", + "D aily", + "ĠL is", + "Ġenact ment", + "ĠB acon", + "ĠSt eele", + "dem and", + "Ġ18 3", + "Ġmouth s", + "Ġstr anded", + "Ġenhance ment", + "01 1", + "ĠWh ats", + "Ġhe aled", + "en y", + "ĠR ab", + "Ġ3 40", + "ĠLab yrinth", + "ro ach", + "ĠY osh", + "ĠCl ippers", + "Ġconcert s", + "Intern et", + "35 5", + "Ġstick ers", + "Ġter med", + "ĠAx e", + "Ġgrand parents", + "Fr ance", + "ĠCl im", + "ĠU h", + "ul ic", + "Ġthr ill", + "cent ric", + "ĠOver view", + "ĠCond uct", + "Ġsubstant ive", + "Ġ18 2", + "m ur", + "Ġstr ay", + "ĠCo ff", + "Ġrep etitive", + "ĠFor gotten", + "Ġqual ification", + "ew itness", + "ĠZ imbabwe", + "Ġsim ulated", + "ĠJ D", + "25 3", + "ĠW are", + "Ġun sc", + "T imes", + "Ġsum mons", + "Ġdis connected", + "Ġ18 4", + "ci us", + "ĠGu jar", + "od ka", + "Ġer ase", + "ĠTob acco", + "elect ed", + "Ġun cont", + "ĠShe pard", + "ĠL amp", + "Ġalert ed", + "Ġoper ative", + "arn a", + "u int", + "Ġneglig ence", + "ac ements", + "Ġsup ra", + "Ġprev ail", + "ĠSh ark", + "Ġbel ts", + "ãģ «", + "Ġt ighter", + "Engine ers", + "Ġin active", + "Ġexp onent", + "ĠWill ie", + "a ples", + "Ġhe ir", + "ĠH its", + "ian n", + "ĠS ays", + "Ġcurrent s", + "ĠBeng al", + "Ġar ist", + "B uffer", + "Ġbree ze", + "ĠWes ley", + "Col a", + "Ġpron oun", + "Ġde ed", + "ĠK ling", + "Ġof t", + "Ġinf lict", + "Ġpun ishing", + "Ġn m", + "ik u", + "OD UCT", + "01 4", + "Ġsubsid y", + "ĠDE A", + "ĠHer bert", + "ĠJ al", + "B ank", + "Ġdef erred", + "Ġship ment", + "B ott", + "Ġal le", + "b earing", + "HT ML", + "Off line", + "Ġ2 13", + "Ġscroll ing", + "Ġsc anned", + "ĠLib yan", + "ĠT OP", + "ch rom", + "d t", + "col umn", + "Psy NetMessage", + "Z ero", + "Ġtor so", + "0 50", + "âķ IJ", + "Ġimp erson", + "ĠSchw artz", + "ud ic", + "Ġpiss ed", + "ĠS app", + "25 7", + "ĠIS Ps", + "og l", + "Ġsuper vised", + "Ġad olescent", + "Ġatt ained", + "ĠDel ivery", + "ĠB unny", + "Ġ19 37", + "Ġmini ature", + "Ġo s", + "Ġ3 70", + "60 8", + "ĠMour inho", + "Ġinn ate", + "Ġtem po", + "ĠN M", + "ĠFall en", + "00 9", + "Ġprov ocative", + "Stream er", + "ĠBened ict", + "ĠBol she", + "Ġt urtle", + "ĠPC B", + "ĠEqu al", + "Direct or", + "ĠR end", + "Ġflu ids", + "Author ities", + "Ġcous ins", + "requ ency", + "ĠNeigh bor", + "s ets", + "sh ared", + "Char les", + "pass word", + "Ġg ears", + "Ġ2 11", + "ĠHard ware", + "ri ka", + "Ġup stream", + "H om", + "Ġdisproportion ately", + "iv ities", + "Ġund efined", + "Ġelect rons", + "Ġcommem or", + "Event ually", + "Ġ> <", + "Ġir responsible", + "2 18", + "ĠRe leased", + "ĠO VER", + "ĠI GN", + "ĠB read", + "st ellar", + "ĠS age", + "tt ed", + "dam age", + "ed ition", + "ĠPre c", + "Ġl ime", + "Ġconf inement", + "Ġcal orie", + "we apon", + "Ġdiff ering", + "ĠS ina", + "m ys", + "am d", + "Ġintric ate", + "k k", + "ĠP AT", + "ã o", + "st ones", + "lin ks", + "Ġr anch", + "Sem itic", + "Ġdifferent iate", + "ĠS inger", + "occup ied", + "Ġfort ress", + "c md", + "Ġinter ception", + "ĠAnk ara", + "Ġre pt", + "ĠSol itaire", + "Ġrem ake", + "p red", + "Ġd ared", + "aut ions", + "ĠB ACK", + "Run ning", + "Ġdebug ging", + "Ġgraph s", + "3 99", + "ĠNig el", + "Ġb un", + "Ġpill ow", + "Ġprog ressed", + "fashion ed", + "Ġob edience", + "ER N", + "Ġrehe ars", + "C ell", + "t l", + "S her", + "Ġher ald", + "ĠPay ment", + "ĠC ory", + "ĠDe pt", + "Ġrep ent", + "ĠWe ak", + "uck land", + "Ġple asing", + "Ġshort ages", + "Ġjur ors", + "ĠK ab", + "q qa", + "Ant i", + "Ġw ow", + "ĠRC MP", + "Ġt sun", + "ĠS ic", + "Ġcomp rises", + "Ġsp ies", + "Ġprec inct", + "n u", + "Ġur ges", + "Ġtim ed", + "Ġstrip es", + "ĠB oots", + "Ġy en", + "Adv anced", + "Ġdisc rete", + "ĠArch angel", + "employ ment", + "D iff", + "Ġmon uments", + "Ġ20 9", + "work er", + "Ġ19 6", + "ĠI g", + "utter stock", + "T PS", + "J ac", + "Ġhomeless ness", + "Ġcomment ator", + "Ġrac ially", + "f ing", + "se ed", + "E le", + "ell ation", + "Ġeth anol", + "Ġpar ish", + "ĠD ong", + "ĠAw akening", + "Ġdev iation", + "ĠB earing", + "ĠTsu k", + "Ġrec ess", + "Ġl ymph", + "ĠCann abis", + "å ľ", + "ĠNEW S", + "Ġd ra", + "ĠStef an", + "ĠWr ong", + "ĠS AM", + "Ġloose ly", + "Ġinterpre ter", + "ĠPl ain", + "Go vernment", + "Ġbigot ry", + "Ġgren ades", + "ave z", + "pict ured", + "Ġmand ated", + "ĠMon k", + "ĠPed ro", + "Ġl ava", + "27 4", + "Ġcyn ical", + "ĠScroll s", + "l ocks", + "M p", + "Ġcon gregation", + "orn ings", + "ph il", + "ĠI bid", + "Ġf erv", + "Ġdisapp earing", + "Ġarrog ant", + "sy n", + "ĠMa ver", + "ĠSu it", + "24 1", + "Ġab bre", + "ack ers", + "P a", + "ĠY el", + "Whe never", + "Ġ23 5", + "ĠV ine", + "ĠAn at", + "Ġext inct", + "LE T", + "Ġexecut able", + "V ERS", + "ox ide", + "D NA", + "ĠP rel", + "Ġresent ment", + "Ġcompr ise", + "ĠAv iv", + "Ġinter ceptions", + "Ġprol ific", + "IN A", + "ĠEr in", + "though t", + "2 19", + "ĠPsychiat ry", + "un ky", + "chem ist", + "H o", + "ĠMcC oy", + "Ġbr icks", + "L os", + "ri ly", + "ĠUS SR", + "Ġr ud", + "Ġl aud", + "ĠW ise", + "ĠEmer ald", + "Ġrev ived", + "Ġdam ned", + "ĠRep air", + "id em", + "ct ica", + "Ġpatri arch", + "ĠN urs", + "me g", + "Ġcheap est", + "re ements", + "empt y", + "ĠCele br", + "Ġdepri vation", + "ch anted", + "ĠTh umbnails", + "E nergy", + "ĠEth an", + "ĠQ ing", + "Ġopp oses", + "W IND", + "v ik", + "ĠM au", + "ĠS UB", + "66 7", + "G RE", + "ĠVol unte", + "nt on", + "C ook", + "å IJ", + "es que", + "Ġplum met", + "Ġsu ing", + "Ġpron ounce", + "Ġresist ing", + "ĠF ishing", + "ĠTri als", + "Ġy ell", + "Ġ3 10", + "Ġin duct", + "Ġpersonal ized", + "oft en", + "R eb", + "EM BER", + "Ġview point", + "Ġexist ential", + "() )", + "rem ove", + "MENT S", + "l asses", + "Ġev apor", + "Ġa isle", + "met a", + "Ġreflect ive", + "Ġentit lement", + "Ġdev ised", + "mus ic", + "asc ade", + "Ġwind ing", + "off set", + "Ġaccess ibility", + "ke red", + "Bet ter", + "ĠJohn ston", + "th inking", + "S now", + "ĠCroat ia", + "ĠAt omic", + "27 1", + "34 8", + "Ġtext book", + "ĠSix th", + "Ġ اÙĦ", + "Ġsl ider", + "ĠBur ger", + "b ol", + "S ync", + "Ġgrand children", + "Ġc erv", + "+ )", + "Ġe ternity", + "Ġtweet ing", + "Ġspec ulative", + "Ġpiv otal", + "ĠW P", + "ĠT ER", + "ynam ic", + "Ġu pl", + "ĠC ats", + "per haps", + "Ġclass mates", + "Ġblat ant", + "' -", + "Ġl akh", + "ant ine", + "ĠB org", + "i om", + "/ (", + "ĠAthlet ic", + "Ġs ar", + "OT A", + "ĠHoff man", + "Never theless", + "Ġad orable", + "Ġspawn ed", + "Ass ociated", + "ĠDom estic", + "Ġimpl ant", + "ĠLux em", + "ĠK ens", + "Ġp umps", + "ĠS AT", + "Att ributes", + "50 9", + "av our", + "Ġcentral ized", + "ĠT N", + "Ġfresh ly", + "ĠA chieve", + "Ġouts iders", + "her ty", + "ĠRe e", + "ĠT owers", + "ĠD art", + "ak able", + "Ġm p", + "ĠHeaven ly", + "Ġr ipe", + "ĠCarol ine", + "ry an", + "Ġclass ics", + "Ġret iring", + "Ġ2 28", + "Ġa h", + "Ġdeal ings", + "Ġpunch ing", + "ĠChap man", + "O ptions", + "max well", + "vol ume", + "Ġst al", + "Ġex ported", + "ĠQu ite", + "Ġnumer ical", + "B urn", + "F act", + "ĠKey stone", + "Ġtrend ing", + "Ġalter ing", + "ĠAfric ans", + "47 8", + "ĠM N", + "ĠKn ock", + "Ġtempt ation", + "Ġprest ige", + "Over view", + "ĠTrad itional", + "ĠBah rain", + "Priv ate", + "ĠH OU", + "Ġbar r", + "ĠT at", + "C ube", + "US D", + "ĠGrand e", + "ĠG at", + "ĠFl o", + "Ġres ides", + "Ġind ec", + "vol ent", + "Ġperpet ual", + "ub es", + "Ġworld view", + "ĠQuant um", + "Ġfil tered", + "Ġen su", + "orget own", + "ERS ON", + "ĠM ild", + "37 9", + "OT T", + "à ¥", + "Ġvit amins", + "Ġrib bon", + "Ġsincere ly", + "ĠH in", + "Ġeight een", + "Ġcontradict ory", + "Ġgl aring", + "Ġexpect ancy", + "Ġcons pir", + "Ġmon strous", + "Ġ3 80", + "re ci", + "Ġhand ic", + "Ġpump ed", + "Ġindic ative", + "Ġr app", + "Ġav ail", + "ĠLEG O", + "ĠMar ijuana", + "19 85", + "ert on", + "Ġtwent ieth", + "################ ################", + "ĠSw amp", + "Ġval uation", + "Ġaffili ates", + "adjust ed", + "ĠFac ility", + "26 2", + "Ġenz ymes", + "itud inal", + "Ġimp rint", + "S ite", + "Ġinstall er", + "ĠT RA", + "m ology", + "lin ear", + "ĠCollect ive", + "ig ating", + "ĠT oken", + "Ġspec ulated", + "K N", + "ĠC ly", + "or ity", + "Ġdef er", + "Ġinspect ors", + "appro ved", + "R M", + "ĠSun s", + "Ġinform ing", + "ĠSy racuse", + "ib li", + "7 65", + "Ġgl ove", + "Ġauthor ize", + "âĢ¦âĢ¦âĢ¦âĢ¦ âĢ¦âĢ¦âĢ¦âĢ¦", + "ĠCru ise", + "Ġcontract ing", + "she ll", + "IF E", + "ĠJew el", + "p ract", + "ĠPhot oshop", + "ĠKnow ing", + "h arm", + "Ġattract ions", + "ad an", + "et us", + "01 8", + "w agen", + "Al t", + "Ġmultip ly", + "Ġequ ilibrium", + ": {", + "ĠF ighters", + "ĠEd gar", + "Ġfour teen", + "Go vern", + "Ġmis use", + "Ġab using", + "Ġancest ry", + "ram er", + "64 4", + "Ġwor ms", + "Ġthick er", + "ĠComb ine", + "Ġpeas ants", + "Ġv ind", + "Ġcon quest", + "Ġm ocked", + "Ġc innamon", + "ĠC ald", + "ĠGall up", + "Ġavoid ance", + "Ġincarn ation", + "ĠStr at", + "Ġt asted", + "ent a", + "ĠN eal", + "p ared", + "Ġtermin ology", + "ject ion", + "Scient ists", + "ĠIN S", + "ĠDe e", + "Ġdirect ories", + "R oad", + "ĠSh ap", + "br ight", + "ĠDirect ors", + "ĠCol umn", + "Ġb ob", + "Ġprefer ably", + "Ġgl itch", + "f urt", + "Ġe g", + "id is", + "C BC", + "Ġsur rendered", + "Ġtest ament", + "33 6", + "ug gest", + "ĠN il", + "an other", + "Ġpat hetic", + "ĠDon na", + "Ġ2 18", + "ĠA very", + "Ġwhis key", + "Ġf ixture", + "ĠCon quest", + "Ġbet s", + "O cc", + "ĠLe icester", + "] .\"", + "Ġ) );", + "Ġfl ashes", + "45 6", + "Ġmask ed", + "ge bra", + "Ġcomput ed", + "che l", + "aud er", + "Ġdefe ats", + "ĠLiber ation", + "ĠOs ama", + "ĠV ive", + "Ch anges", + "Ch annel", + "Ġtar iffs", + "Ġm age", + "ĠS ax", + "Ġinadvert ently", + "ĠC RE", + "ĠRe aper", + "ink y", + "gr ading", + "Ġstere otyp", + "Ġcur l", + "ĠF ANT", + "Ġfram eworks", + "M om", + "ĠAn ch", + "Ġflav our", + "car bon", + "Ġperm itting", + "let cher", + "ĠMo zilla", + "ĠPark ing", + "ĠCh amp", + "Sc roll", + "Ġmurd erer", + "Ġrest ed", + "Ġow es", + "ĠP oss", + "AD D", + "IF F", + "res olution", + "ĠMin ing", + "Ġcompar ative", + "D im", + "Ġneighbour ing", + "ĠA ST", + "ĠT oxic", + "Ġbi ases", + "Ġgun fire", + "ur ous", + "ĠMom ent", + "19 83", + "Ġper vasive", + "tt p", + "ĠNorm ally", + "r ir", + "S arah", + "ĠAlb any", + "Ġun sett", + "ĠS MS", + "ip ers", + "l ayer", + "ĠWh ites", + "up le", + "Ġtur bo", + "ĠLe eds", + "Ġthat s", + "ĠMin er", + "M ER", + "ĠRe ign", + "Ġper me", + "ĠBl itz", + "Ġ19 34", + "Ġintimid ating", + "t ube", + "Ġecc entric", + "ab olic", + "box es", + "ĠAssoci ates", + "v otes", + "Ġsim ulate", + "um bo", + "aster y", + "Ġship ments", + "FF FF", + "an th", + "Ġseason ed", + "Ġexperiment ation", + "âĸ ł", + "law s", + "Me et", + "idd les", + "ant ics", + "R ating", + "IS IS", + "h ift", + "Ġfront s", + "b uf", + "01 7", + "Ġun att", + "ĠD il", + "le ases", + "ĠGard ens", + "77 7", + "t ouch", + "ve ll", + "45 8", + "Ġ= ====", + "s aving", + "Ġer osion", + "ĠQu in", + "Ġearn s", + "Ġaccomplish ment", + "ĠWe i", + "Ġ< [", + "____ _", + "Ġir rig", + "ĠT eddy", + "Ġconqu ered", + "ĠArm ored", + "Ġassert s", + "Ġmanip ulating", + "r é", + "Ġtranscript s", + "G allery", + "Ġplot ting", + "Ne il", + "Ġbetray al", + "load er", + "ĠS ul", + "Ġdispl acement", + "Ġroy alty", + "ĠW I", + "he it", + "ĠDev ices", + "alle l", + "Ġmunicipal ities", + "Ġcan al", + "St ars", + "ĠU AE", + "Ġ\" âĢ¦", + "ĠC U", + "ab ove", + "Ġreson ance", + "ĠguiActive Un", + "add ed", + "ĠBra ves", + "ĠI bn", + "Ġhere by", + "ĠB RE", + "Ġshare holder", + "ĠH ir", + "ĠJ i", + "Ġstrange ly", + "Ġadm ired", + "Ġpl ight", + "Ġb achelor", + "ĠP ole", + "cipl inary", + "T ony", + "ĠArmen ian", + "Ġun man", + "ĠZion ist", + "St age", + "isco ver", + "Ġautom otive", + "Ġs idelines", + "Ġsl ick", + "ĠRena issance", + "ĠF UN", + "Im ages", + "ĠH aj", + "Ġp ing", + "Ġshort cut", + "ĠBl vd", + "ĠLook s", + "Ġbur sts", + "Ġcl amp", + "Ġm ish", + "Ġsort ing", + "Ġpatri ot", + "Ġcorrect ness", + "ĠScand inav", + "ĠCaval iers", + "p ython", + "az ar", + "Ġ3 75", + "ĠJa une", + "40 9", + "Ġdetrim ental", + "Ġstab bing", + "Ġpoison ed", + "Ġf ountain", + "oc ent", + "or st", + "ĠMar i", + "Ġr ains", + "ĠO vers", + "ĠInst itution", + "ud get", + "AM Y", + "t ale", + "ĠK R", + "ĠPr ices", + "Ġhead aches", + "Ġlands l", + "ĠA ura", + "Bon us", + "ĠZ hao", + "ĠH ip", + "Ġhop s", + "ĠKurd istan", + "Ġexplo iting", + "ry n", + "Ġhypocr isy", + "op ening", + "Ġgun shot", + "Ġw ed", + "inter stitial", + "Inter stitial", + "Ġam en", + "Bre aking", + "Ġmarket ed", + "W ire", + "ĠC rowd", + "Contin ue", + "ĠK nown", + "ĠEffect ive", + "ore an", + "iz ons", + "Jose ph", + "Ġescal ation", + "us ername", + "Ġcur tain", + "AT ES", + "ĠP AR", + "ĠM iy", + "Ġcounter fe", + "l ene", + "Ġcont enders", + "d aily", + "ĠAs c", + "ĠPhill ip", + "most ly", + "Ġfil ename", + "he ne", + "Ġresemb ling", + "Ġst aging", + "ĠCh loe", + "Ġw iring", + "H on", + "ĠRen ew", + "ott age", + "ĠHy brid", + "m uch", + "Ġstro kes", + "Ġpolicy makers", + "AP TER", + "ĠArk ham", + "pl ot", + "Ġassist ants", + "Ġde port", + "ĠSe ga", + "Ġinflu enza", + "ĠC ursed", + "ĠK obe", + "Ġskin ny", + "Prov ider", + "ĠR ip", + "Ġincrement al", + "product s", + "B F", + "Ġd ome", + "ĠC redits", + "Ġlos ers", + "int s", + "ĠBet ty", + "ĠTal ent", + "ĠD AM", + "L v", + "E ss", + "Ġd ens", + "tem p", + "J udge", + "od ic", + "Ġ' (", + "UR ES", + "ets k", + "V O", + "Ġretrie ved", + "Ġarchitect s", + "Ù ĩ", + "Ġeth ic", + "ĠSecond ary", + "st ocks", + "ad ia", + "Ġ3 25", + "ĠOp inion", + "Ġsimultane ous", + "Ġd izz", + "ul p", + "Ġsmugg ling", + "ipp ery", + "R andom", + "f acing", + "ĠD as", + "Ġstock p", + "Ġdiscl osures", + "po inter", + "Ġcor al", + "ĠSe lection", + "ĠP ike", + "ival ent", + "Ġruth less", + "ĠR im", + "Ġensu ing", + "ĠExper iment", + "Ġcongress man", + "Ġbelie ver", + "Ġun specified", + "ĠM ord", + "Ġknowledge able", + "ĠV ERY", + "T X", + "Ġstra ps", + "Ġtur f", + "apesh ifter", + "Ġmar ital", + "Ġfl ock", + "ãģ Ĩ", + "26 3", + "AM ES", + "ĠOpp osition", + "Ġtre asures", + "ĠG OD", + "Ġmodel ed", + "ĠWOR LD", + "Ġ( [", + "ĠUs age", + "H F", + "Ġ$ (", + "uss ed", + "Ġpione er", + "E ight", + "par se", + "b read", + "rit z", + "ĠMir anda", + "ĠK ant", + "++ )", + "ore n", + "Ġprov oked", + "Ġbre eds", + "ĠIn cludes", + "ĠPast ebin", + "ĠFl ip", + "J ava", + "Ġbr ink", + "Ġrum ored", + "Ġun seen", + "Ġgar nered", + "ĠDef in", + "al ted", + "Ġtatt oos", + "Ġhes itation", + "is itions", + "ĠWe aver", + "ĠReport ing", + "Ġtherap ies", + "Ġconsult ants", + "Ġresid ual", + "ĠMal i", + "ĠRom a", + "i ago", + "ĠRes idents", + "ub i", + "Ġremed ies", + "Ġadapt ive", + "ĠAl ive", + "ĠBar cl", + "Ġwal lets", + "c rypt", + "etermin ation", + "ĠPel osi", + "Ġsl ipping", + "oton in", + "Ġall iances", + "pat rick", + "ir is", + "Ġor th", + "ĠPer kins", + "ĠDe V", + "ĠG ets", + "Ġdry ing", + "ge e", + "fore st", + "ĠFor get", + "ore m", + "33 9", + "Ġvague ly", + "ĠD ion", + "ĠP orn", + "ĠH OW", + "Ġp neum", + "Ġrub ble", + "ĠT aste", + "enc ia", + "ĠG el", + "Ġd st", + "Ġ24 5", + "ĠMoroc co", + "inf lamm", + "ĠTw ins", + "Ġb ots", + "d aughter", + "ĠB alk", + "Ġbre thren", + "Ġlog os", + "Ġgo bl", + "f ps", + "Ġsub division", + "Ġp awn", + "Ġsquee zed", + "Ġmor ale", + "ĠD W", + "' \"", + "Ġkn ot", + "ook y", + "Ġdiv isive", + "Ġboost ed", + "ch y", + "ãĥ IJ", + "if act", + "Ġnewcom ers", + "ĠWrest ling", + "Ġsc outs", + "w olves", + "R at", + "Ġnin eteenth", + "ĠOs borne", + "St ats", + "Ġem powered", + "Ġpsych opath", + "ĠO EM", + "ugg age", + "ĠP K", + "ĠMoh ammad", + "P ak", + "Ġanarch ists", + "ĠExt ract", + "est hes", + "ĠStock holm", + "l oo", + "ĠG raph", + "Ġdeploy ing", + "ĠStr anger", + "ĠM old", + "Ġstaff er", + "Ġdiscount ed", + "uck le", + "ple ase", + "ĠLand ing", + "ÃŃ a", + "Ġ19 3", + "Ġan te", + "Ġrep etition", + "Ġ+ /-", + "Ġpar ody", + "Ġlive ly", + "AA A", + "ĠHor us", + "Ġp its", + "ind ers", + "L OC", + "ĠVen ice", + "40 6", + "ĠDis cover", + "â Ĩ", + "ellect ual", + "Ġp ens", + "Ġey el", + "ig uous", + "Im pl", + "Ġj oking", + "Ġinv al", + "ĠBel fast", + "Ġcredit ors", + "ĠSky walker", + "ov sky", + "Ġcease fire", + "Ġse als", + "is oft", + ") ).", + "ĠFel ix", + "IT S", + "Ġt resp", + "ĠBlock chain", + "ew are", + "ĠSch war", + "en ne", + "mount ed", + "ĠBe acon", + "les h", + "Ġimmense ly", + "Ġche ering", + "Em ploy", + "sc ene", + "ish ly", + "atche wan", + "ĠNic olas", + "Ġdr ained", + "ĠEx it", + "ĠAz erb", + "j un", + "Ġflo ated", + "u ania", + "De ep", + "Ġsuper v", + "Ġmyst ical", + "ĠD ollar", + "ĠApost le", + "ĠR EL", + "ĠProv ided", + "ĠB ucks", + "ãĥ ´", + "cut ting", + "Ġenhance ments", + "ĠPengu ins", + "ĠIsa iah", + "Ġj erk", + "ĠW yn", + "Ġst alled", + "Ġcryptoc urrencies", + "ĠR oland", + "sing le", + "Ġl umin", + "ĠF ellow", + "ĠCap acity", + "ĠKaz akh", + "W N", + "Ġfin anced", + "38 9", + "Ġt id", + "Ġcoll usion", + "ĠMy r", + "î Ģ", + "Sen ator", + "Ġped iatric", + "Ġneat ly", + "Ġsandwic hes", + "ĠArchitect ure", + "Ġt ucked", + "Ġbalcon y", + "Ġearthqu akes", + "qu ire", + "F uture", + "Ġhe fty", + "é Ĺ", + "Ġspecial izes", + "Ġstress es", + "Ġs ender", + "Ġmisunder standing", + "Ġep ile", + "Ġprov oke", + "ĠCol ors", + "Ġdis may", + "uk o", + "[ _", + "58 6", + "ne utral", + "Ġdon ating", + "ĠRand all", + "Mult i", + "Ġconvenient ly", + "ĠS ung", + "ĠC oca", + "Ġt ents", + "ĠAc celer", + "Ġpart nered", + "27 2", + "ir ming", + "ĠB AS", + "s ometimes", + "Ġobject ed", + "ub ric", + "p osed", + "LC S", + "gr ass", + "Ġattribut able", + "V IS", + "Israel i", + "Ġrepe ats", + "ĠR M", + "v ag", + "ut a", + "in ous", + "Ġin ert", + "ĠMig uel", + "æ Ń", + "ĠHawai ian", + "B oard", + "Ġart ific", + "ĠAzerb ai", + "as io", + "ĠR ent", + "A IN", + "Ġappl iances", + "Ġnational ity", + "Ġass hole", + "ĠN eb", + "Ġnot ch", + "h ani", + "ĠBr ide", + "Av ailability", + "Ġintercept ed", + "Ġcontin ental", + "Ġsw elling", + "ĠPers pect", + "b ies", + ". <", + "ith metic", + "ĠL ara", + "Ġtempt ing", + "add r", + "Ġoversee ing", + "cl ad", + "ĠD V", + "ĠGing rich", + "Ġm un", + "ĠApp ropri", + "Ġalter ations", + "ĠPat reon", + "Ġha voc", + "Ġdiscipl ines", + "Ġnotor iously", + "aku ya", + "ier i", + "? ).", + "ĠW ent", + "Ġsil icon", + "Ġtre mb", + "Cont ainer", + "K nown", + "Ġmort ar", + "est e", + "ick a", + "Ar thur", + "ĠPre viously", + "ĠMart y", + "Ġsp arse", + "g ins", + "Ġin ward", + "ĠParticip ant", + "C opy", + "ĠM isc", + "Ġantib iotic", + "ĠRet ro", + "Ġel usive", + "Ġass ail", + "ĠBatt alion", + "ĠB ought", + "Ġdimin ish", + "ĠEuro pa", + "s ession", + "ĠDanger ous", + "ies el", + "Ġdisbel ief", + "Ġbl asts", + "ext reme", + "ĠBoy d", + "ĠProject s", + "ĠGu ys", + "Ġunder gone", + "Ġgr ill", + "ĠDw ight", + "Ġ19 7", + "US ER", + "Ġfiles ystem", + "Ġcl ocks", + "T aylor", + "Ġwra pper", + "Ġfold ing", + "ous and", + "ĠPhilipp ine", + "ATION AL", + "ĠPer th", + "Ġas hes", + "Ġaccum ulate", + "ĠGate way", + "Sh op", + "orks hire", + "H an", + "ĠBar rel", + "ĠLe h", + "ĠX V", + "Ġwh im", + "Ġrep o", + "ĠC G", + "ĠM am", + "Ġincorpor ating", + "Ġbail out", + "Ġlingu istic", + "Ġdis integ", + "C LE", + "Ġcinem atic", + "ĠF iber", + "S yn", + "il ion", + "ĠCom pos", + "c hens", + "Ġne oc", + "Ġbo iled", + "F INE", + "on o", + "un cle", + "ik en", + "ĠB M", + "Î ¹", + "Ġreceipt s", + "Ġdisp osed", + "ĠTh irty", + "ĠR ough", + "ĠA BS", + "Ġnot withstanding", + "oll en", + "# $", + "Ġunrel iable", + "Ġbl oom", + "Ġmedi ocre", + "Ġtr am", + "ĠTas man", + "Ġsh akes", + "Ġmanifest o", + "ĠM W", + "Ġsatisf actory", + "Ġsh ores", + "Ġcomput ation", + "Ġassert ions", + "orm ons", + "ar ag", + "ab it", + "Dem ocrats", + "ĠL oot", + "ĠVol ks", + "ha ired", + "Ġgrav itational", + "S ing", + "ĠM iz", + "Ġthro ttle", + "Ġtyr anny", + "ĠView s", + "Ġrob ber", + "ĠMinor ity", + "Ġsh rine", + "sc ope", + "pur pose", + "Ġnucle us", + "our cing", + "ĠUS DA", + "ĠD HS", + "w ra", + "ĠBow ie", + "Sc ale", + "ĠB EL", + "x i", + "I ter", + "Ġ( ),", + "w right", + "Ġsail ors", + "ous ed", + "NAS A", + "ĠPro of", + "ĠMin eral", + "t oken", + "ĠF D", + "R ew", + "Ġe ll", + "6 30", + "Ġchance llor", + "ĠG os", + "Ġamount ed", + "ĠRec re", + "ome z", + "ĠOpt im", + "ĠOl ive", + "Ġtrack er", + "ow ler", + "ĠUn ique", + "R oot", + "Ġmar itime", + "ĠQur an", + "ĠAd apt", + "Ġecosystem s", + "ĠRe peat", + "ĠS oy", + "ĠI MP", + "Ġgrad uating", + "and em", + "P ur", + "ĠRes et", + "ĠTr ick", + "ĠPh illy", + "ĠT ue", + "ĠMalays ian", + "Ġclim ax", + "Ġb ury", + "Ġcons pic", + "ĠSouth ampton", + "ĠFl owers", + "Ġesc orted", + "ĠEduc ational", + "ĠI RC", + "Ġbrut ally", + "e ating", + "Ġpill ar", + "ĠS ang", + "ĠJ ude", + "ar ling", + "ĠAm nesty", + "Ġrem inding", + "ĠAdminist rative", + "hes da", + "Ġfl ashed", + "ĠP BS", + "per ate", + "fe ature", + "Ġsw ipe", + "Ġgra ves", + "oult ry", + "26 1", + "bre aks", + "ĠGu er", + "Ġsh rimp", + "ĠV oting", + "qu ist", + "Ġanaly tical", + "Ġtables poons", + "ĠS OU", + "Ġresear ched", + "Ġdisrupt ed", + "Ġj our", + "Ġrepl ica", + "Ġcart oons", + "b ians", + "} )", + "c opy", + "G ot", + "ou ched", + "P UT", + "Ġsw arm", + "not ations", + "s aid", + "Ġreb uilt", + "Ġcollabor ate", + "Ġr aging", + "Ġn ar", + "Ġdem ographics", + "ĠD DR", + "Ġdist rust", + "oss ier", + "ĠK ro", + "Ġpump kin", + "Ġreg rets", + "Ġfatal ities", + "ĠL ens", + "ĠO le", + "p d", + "Ġpupp et", + "ĠOut look", + "ĠSt am", + "O l", + "F air", + "U U", + "Ġre written", + "Ä ±", + "Ġfasc inated", + "Ġve ctors", + "Ġtrib unal", + "u ay", + "ĠM ats", + "ĠCo ins", + "[ [", + "Ġ18 1", + "Ġrend ers", + "ĠK aepernick", + "Ġesp ionage", + "Ġsum m", + "Ġd itch", + "Acc ount", + "Ġspread sheet", + "Ġmut ant", + "p ast", + "40 7", + "Ġd ye", + "Ġinit iation", + "Ġ4 000", + "Ġpunish able", + "Ġth inner", + "ĠKh al", + "Ġinter medi", + "D un", + "ĠGoth am", + "Ġeager ly", + "Ġvag inal", + "p owers", + "V W", + "ĠWATCH ED", + "Ġpred ator", + "ams ung", + "Ġdispar ity", + "Ġ[ *", + "Ġam ph", + "Ġout skirts", + "ĠSpir its", + "Ġskelet al", + "Ð »", + "ĠR ear", + "Ġissu ance", + "ĠLog ic", + "re leased", + "Z Z", + "ĠB ound", + "Ent ry", + "Ġex its", + "is ol", + "ĠFound er", + "Ġw re", + "ĠGreen land", + "ĠM MO", + "t aker", + "IN C", + "ãģ ¾", + "Ġhour ly", + "hen ko", + "Ġfantas ies", + "Ġdis ob", + "Ġdemol ition", + "ãĥ ĭ", + "Ġen listed", + "rat ulations", + "Ġmis guided", + "Ġens ured", + "Ġdiscour aged", + "m ort", + "Ġfl ank", + "Ġc ess", + "Ġreact s", + "ĠS ere", + "s ensitive", + "ĠSer pent", + "ass ad", + "Ġ24 7", + "Ġcalm ly", + "b usters", + "Ġble ed", + "ĠSt ro", + "Ġamuse ment", + "ĠAntar ctica", + "Ġs cept", + "ĠG aw", + "a q", + "ason ic", + "Ġsp rawling", + "n ative", + "atur ated", + "ĠBattle field", + "IV ERS", + "E B", + "ĠG ems", + "ĠNorth western", + "ĠFil ms", + "ĠAut omatic", + "Ġappre hend", + "ãģ ¨", + "Ġgui Name", + "Ġback end", + "Ġevid enced", + "ge ant", + "01 2", + "ĠS iege", + "Ġexternal To", + "Ġunfocused Range", + "ĠguiActiveUn focused", + "Ġgui Icon", + "ĠexternalTo EVA", + "ĠexternalToEVA Only", + "F ri", + "ch ard", + "en aries", + "Ġchief s", + "Ġc f", + "ĠH UD", + "Ġcorro bor", + "Ġd B", + "ĠT aken", + "ĠPat ricia", + "ra il", + "ĠCh arm", + "ĠLiber tarian", + "rie ve", + "Person al", + "ĠO UR", + "ger ies", + "Ġdump ing", + "Ġneurolog ical", + "it imate", + "ĠClint ons", + "raft ed", + "ĠM olly", + "Ġtermin als", + "reg ister", + "Ġfl are", + "Ġenc oded", + "Ġautop sy", + "p el", + "m achine", + "Ġexempt ions", + "ĠRoy als", + "d istance", + "Ġdraft s", + "Ġl ame", + "ĠC unning", + "Ġsp ouses", + "ĠMark ets", + "ĠCar rier", + "Ġimp lying", + "ĠY ak", + "s id", + "Ġl oser", + "Ġvigil ant", + "Ġimpe achment", + "Ġaug mented", + "ĠEmploy ees", + "Ġunint ended", + "tern ally", + "ĠW att", + "Ġrecogn izable", + "ess im", + "æ Ŀ", + "Ġco ated", + "r ha", + "Ġlie utenant", + "ĠLegisl ation", + "pub lished", + "44 4", + "01 3", + "Ġide ally", + "ĠPass word", + "Ġsimpl ify", + "ĠMet a", + "ĠM RI", + "Ġple ading", + "organ ized", + "hand ler", + "Ġun ravel", + "cor rect", + "Ġ icy", + "Ġparan oid", + "Ġpass er", + "Ġinspect ions", + "of er", + "ĠHealth care", + "28 3", + "ĠBr ut", + "iol a", + "for ge", + "ĠMed ieval", + "MS N", + "ie vers", + "ĠProgram ming", + "å ī", + "Ġ2 23", + "m u", + "ĠC LE", + "ug a", + "Ġsho ppers", + "Ġinform ative", + "ĠPl ans", + "Ġsupplement ation", + "ĠT ests", + "ty ard", + "ocy tes", + "ĠVeg a", + "ĠGujar at", + "erman ent", + "Ex cept", + "ĠL OT", + "all a", + "ĠC umm", + "ĠO sw", + "Ġven om", + "ĠDeb t", + "ĠD OWN", + "Ġreun ion", + "Ġm uc", + "ĠRel ief", + "Ġge op", + "ĠðŁ ĺ", + "al ogue", + "An th", + "ech o", + "Ġcor ros", + "Ġrepl ication", + "ĠBl azing", + "ĠD aughter", + "Ġinf lic", + "ĠLind sey", + "Ù Ī", + "28 4", + "Ex it", + "Ġgl oom", + "TA IN", + "Ġundermin ing", + "Ġadv ising", + "h idden", + "Ġover flow", + "Ġg or", + "urd ue", + "Ġe choes", + "enh agen", + "Ġimp uls", + "d rug", + "c ash", + "Ġas ync", + "Ġmir ac", + "at ts", + "p unk", + "Ġpiv ot", + "ĠLegisl ative", + "Ġblog gers", + "ĠCl aw", + "s burg", + "d yl", + "ĠRecomm end", + "Ġver te", + "Ġprohib iting", + "ĠPant her", + "Jon athan", + "Ġo min", + "Ġhate ful", + "28 1", + "ĠOr che", + "ĠMurd och", + "down s", + "Ġas ymm", + "G ER", + "Al ways", + "Ġinform s", + "ĠW M", + "ĠP ony", + "ĠApp endix", + "ĠAr lington", + "J am", + "Ġmedic inal", + "ĠS lam", + "IT IES", + "Ġre aff", + "ĠR i", + "F G", + "S pring", + "b ool", + "Ġthigh s", + "Ġmark ings", + "ĠRa qqa", + "ĠL ak", + "p oll", + "ts ky", + "ĠMort y", + "ĠDef inition", + "Ġdeb unk", + "end ered", + "ĠLe one", + "a vers", + "Ġmortg ages", + "App arently", + "N ic", + "ha us", + "ĠTh ousands", + "au ld", + "Ġm ash", + "sh oot", + "Ġdi arr", + "Ġconscious ly", + "H ero", + "e as", + "ĠN aturally", + "ĠDestroy er", + "Ġdash board", + "serv ices", + "R og", + "Ġmillenn ials", + "Ġinv ade", + "- (", + "Ġcomm issions", + "ĠA uckland", + "Ġbroadcast s", + "Ġfront al", + "Ġcr ank", + "ĠHist oric", + "Ġrum ours", + "CT V", + "Ġster il", + "Ġboost er", + "rock et", + "ãĤ ¼", + "ut sche", + "ĠP I", + "Ġ2 33", + "ĠProdu cer", + "ĠAnaly tics", + "Ġinval uable", + "Ġunint ention", + "ĠC Y", + "Ġscrut in", + "Ġg igg", + "Ġeng ulf", + "Ġprolet ariat", + "Ġh acks", + "ĠH ew", + "ar ak", + "ĠSl ime", + "ield ing", + "ag her", + "ĠEll iot", + "Ġtele com", + "Ġ2 19", + "ult an", + "ĠAr bor", + "ĠSc outs", + "B an", + "Ġlifes pan", + "Ġbl asp", + "38 8", + "Ġjud iciary", + "ĠContin ental", + "ask ing", + "Mc C", + "L ED", + "Ġbag gage", + "ĠSorce rer", + "Ġrem nants", + "ĠGriff ith", + "ets u", + "ĠSub aru", + "ĠPerson ality", + "des igned", + "ush ima", + "agn ar", + "Ġrec oil", + "Ġpass ions", + "\\ \":", + "Ġte e", + "Ġabol ition", + "ĠCreat ing", + "j ac", + "Ġ19 4", + "01 9", + "Ġpill ars", + "ric hed", + "/ \"", + "t k", + "Ġlive lihood", + "Ġro asted", + "ah on", + "ĠH utch", + "ass ert", + "Ġdivid end", + "Ġkn it", + "Ġd aunting", + "Ġdisturb ance", + "Ġsh ale", + "Ġcultiv ated", + "Ġrefriger ator", + "L B", + "ĠN ET", + "Ġcommercial s", + "Ġthink ers", + "45 5", + "Ġch op", + "B road", + "Ġsuspic ions", + "Ġtag ged", + "l ifting", + "Ġsty lish", + "ĠShield s", + "Short ly", + "Ġt ails", + "A uth", + "ST E", + "ĠG AME", + "Ġse ism", + "ĠK is", + "olog ne", + "Ġcow ork", + "Ġforc ibly", + "Ġthy roid", + "ĠP B", + "AN E", + "mar ried", + "h orse", + "Ġpoly mer", + "ĠCh al", + "od or", + "DE BUG", + "ĠCon text", + "Ġbl iss", + "Ġpin point", + "ĠMat hemat", + "leg ram", + "ĠWeek end", + "Ġlab elled", + "Ġb art", + "it les", + "Ġest rogen", + "âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ", + "\" '", + "Ġvis ibly", + "Ġouts ider", + "aid a", + "Are a", + "Ġdisse min", + "Ġdish onest", + "ĠCl osed", + "ĠBullet in", + "ĠRam sey", + "sw ord", + "ĠX I", + "our ced", + "S ame", + "34 6", + "ĠRe pe", + "ĠK ou", + "c ake", + "em is", + "C ache", + "ĠMe aning", + "ĠEn light", + "onom y", + "Ġmanifest ation", + "sw orth", + "J ay", + "Ġch ore", + "ö r", + "D ream", + "Ġsanction ed", + "Ġcult urally", + "ĠA ra", + "N av", + "Ġthe ological", + "Ġstr ut", + "ĠV O", + "ĠHand book", + "Ġconstruct ing", + "Ġ ¶", + "ĠBenef its", + "ĠPsych ological", + "s ac", + "å ¸", + "p olicy", + "ĠMat ters", + "ĠReport ed", + "ĠBy te", + "Ġvit ro", + "ĠM aiden", + "Ġl am", + "ĠJenn ings", + "Ġgar ment", + "ĠRut gers", + "ĠStaff ord", + "ĠWell ington", + "Ġinter mitt", + "Ġn pm", + "Ġord eal", + "Ġplug ged", + "o oming", + "in ished", + "fram ework", + "Ġtim ber", + "Ġc ass", + "Ġ8 50", + "il ess", + "ĠRed ux", + "7 68", + "St re", + "Ġsurpass ed", + "w hel", + "Ġparalle ls", + "Ġve il", + "ĠG I", + "ĠR EST", + "Ġread iness", + "s ort", + "Ġmod ifying", + "ĠSl ate", + "ru ff", + "Ġmar ble", + "Ġinf rared", + "Ġaud itor", + "ĠFANT ASY", + "ĠP overty", + "ĠS PD", + "Ġ\" (", + "K y", + "RA Y", + "Ġexecut ions", + "ĠBever ly", + "ĠMarx ism", + "ĠBur st", + "ĠK ali", + "est ones", + "Clear ly", + "E ll", + "ãģ §", + "ĠProceed ings", + "T oken", + "IF IC", + "ñ a", + "Cent ral", + "ĠH aley", + "ĠD rama", + "Ġform ations", + "OR N", + "Book s", + "Ġdom inating", + "ĠFly ers", + "ĠCompan ion", + "Ġdiscipl ined", + "ĠYug oslav", + "ĠSpell s", + "Ġv engeance", + "Ġland lords", + "L en", + "ĠO gre", + "ano ia", + "Ġpier cing", + "Ġcon greg", + "Ġscore r", + "ob ia", + "Ġnic kel", + "ĠLear ns", + "Ġre jo", + "Ġmaster piece", + "Fl ash", + "Ġinhab ited", + "ĠOpen GL", + "ĠD ud", + "ĠI CO", + "Ġar ter", + "Ġpl ur", + "Ġmaster y", + "Ġlong standing", + "st ed", + "Ġw ines", + "Ġtelev ised", + "ĠSh rine", + "ĠBay ern", + "Ġâ ĵĺ", + "Ġencl osure", + "j ohn", + "Ġprophe ts", + "ĠRes urrection", + "ĠOrd ers", + "Ġun even", + "r als", + "Ġd wind", + "ĠL ah", + "ĠSl oven", + "37 8", + "Ġins istence", + "aff le", + "ĠCl one", + "Ġhard ship", + "ĠCongress man", + "Ġple ad", + "Ġreview ers", + "Ġc ured", + "Ġ19 35", + "as ley", + "f ake", + "ĠTh inking", + "yd ia", + "P ART", + "ĠD ota", + "o it", + "Ġwh ipped", + "Ġb ouncing", + "ĠHispan ics", + "com ings", + "Ġcann abin", + "ĠCh ambers", + "ĠZ ack", + "Option al", + "Ġco ats", + "Ġprow ess", + "ĠNort on", + "Ġplain ly", + "Ġfre ight", + "Ġinhib ition", + "Ġcl am", + "Ġ30 3", + "ke f", + "ale igh", + "L uke", + "Ġpsych o", + "ator ium", + "M ED", + "Ġtreat ies", + "Ġind isc", + "Ġd c", + "OP S", + "Ġresil ient", + "ĠInter state", + "Ġsl ack", + "Ġmund ane", + "Ġestab lishes", + "35 9", + "Ġstr ained", + "Ġn ond", + "S us", + "Ġcast e", + "ar ate", + "ie ving", + "Ġunfair ly", + "Ġpars er", + "on ial", + "urs ive", + "V ia", + "ĠOtt o", + "ĠAuthor ities", + "stro ke", + "K R", + "ĠMer cy", + "Ġfurn ished", + "Ġout set", + "Ġmet ic", + "19 82", + "olith ic", + "ĠT ent", + "og ical", + "ĠA ircraft", + "Ġh ides", + "ĠBec ame", + "Ġeduc ators", + "re aching", + "Ġvol atility", + "Ġtodd ler", + "ĠNAS CAR", + "ĠTw elve", + "ĠHigh lights", + "Ġgra pe", + "Ġspl its", + "Ġpe asant", + "Ġre neg", + "ĠMS I", + "Tem p", + "st ars", + "Ġtre k", + "ĠHy de", + "b inding", + "Ġreal ism", + "Ġox ide", + "ĠH os", + "Ġmount s", + "Ġbit ing", + "Ġcollaps ing", + "Ġpost al", + "Ġmuse ums", + "Ġdet ached", + "Ġrespect ing", + "Ġmonop ol", + "Ġwork flow", + "ĠC ake", + "Tem plate", + "ĠOrgan isation", + "Ġpers istence", + "36 9", + "C oming", + "B rad", + "Ġredund ant", + "ĠG TA", + "Ġb ending", + "Ġrev oked", + "Ġoff ending", + "Ġfram ing", + "Ġprint f", + "Comm un", + "mem bers", + "Out side", + "Ġconst rued", + "Ġc oded", + "F ORE", + "Ġch ast", + "Ch at", + "Ind ian", + "ĠY ard", + "? !\"", + "ĠP orts", + "ĠX avier", + "ĠR ET", + "' .\"", + "ĠBo at", + "iv ated", + "ich t", + "umer able", + "D s", + "ĠDun n", + "Ġcoff in", + "Ġsecure ly", + "ĠRapt ors", + "ĠB es", + "Install ation", + "Ġin ception", + "ĠHealth y", + "end ants", + "Ġpsych ologists", + "ĠShe ikh", + "c ultural", + "ĠBlack Berry", + "sh ift", + "F red", + "oc he", + "Ġc akes", + "ĠS EO", + "ĠG ian", + "ĠAs ians", + "og ging", + "e lement", + "Ġpund its", + "ĠV augh", + "ĠG avin", + "Ġh itter", + "Ġdrown ed", + "Ġch alk", + "ĠZ ika", + "Ġmeas les", + "80 2", + "âĢ¦ ..", + "ĠAW S", + "] \"", + "Ġdist ort", + "ĠM ast", + "Ġantib odies", + "ĠM ash", + "Mem ory", + "ĠUg anda", + "ĠPro b", + "Ġvom iting", + "ĠTurn s", + "Ġoccup ying", + "Ġev asion", + "ĠTher apy", + "Ġprom o", + "Ġelect r", + "Ġblue print", + "ĠD re", + "pr iced", + "ĠDep ot", + "Ġallev iate", + "ĠSom ali", + "m arg", + "n ine", + "Ġnostalg ia", + "ĠShe pherd", + "Ġcaval ry", + "Ġtor ped", + "ĠBlood y", + "x b", + "Ġs ank", + "Ġgo alt", + "report print", + "embed reportprint", + "clone embedreportprint", + "ĠIn itially", + "ĠF ischer", + "Ġnot eworthy", + "c ern", + "Ġin efficient", + "raw download", + "rawdownload cloneembedreportprint", + "c ation", + "ĠD ynasty", + "l ag", + "D ES", + "Ġdistinct ly", + "ĠEston ia", + "Ġopen ness", + "Ġg ossip", + "ru ck", + "W idth", + "ĠIb rahim", + "Ġpet roleum", + "Ġav atar", + "ĠH ed", + "ath a", + "ĠHog warts", + "Ġc aves", + "67 8", + "Ġsafegu ard", + "ĠM og", + "iss on", + "ĠDur ham", + "sl aught", + "ĠGrad uate", + "Ġsub conscious", + "ĠEx cellent", + "ĠD um", + "---- -", + "Ġp iles", + "ĠW ORK", + "ĠG arn", + "ĠF ol", + "ĠAT M", + "Ġavoid s", + "ĠT ul", + "Ġble ak", + "EL Y", + "iv ist", + "light ly", + "P ers", + "ĠD ob", + "ĠL S", + "Ġins anity", + "Î µ", + "atal ie", + "En large", + "Ġtw ists", + "Ġfault y", + "Ġpir acy", + "Ġimp over", + "Ġrug ged", + "ĠF ashion", + "Ġs ands", + "' ?", + "sw ick", + "Ġn atives", + "Ġhe n", + "ĠNo ise", + "ãĥ Ĺ", + "Ġg reens", + "Ġfree zer", + "Ġd ynasty", + "ĠFather s", + "ĠNew ark", + "Ġarchae ological", + "Ġo t", + "ob ar", + "Ġblock ade", + "Ġall erg", + "L V", + "Ġdeb it", + "ĠR FC", + "ĠMil ton", + "ĠPress ure", + "Ġwill ingly", + "Ġdisproportion ate", + "Ġopp ressive", + "Ġdiamond s", + "Ġbelong ings", + "19 70", + "Ġbell s", + "Ġimperial ism", + "Ġ2 27", + "Ġexpl oding", + "ĠE clipse", + "Ġ19 19", + "Ġr ant", + "Ġnom inations", + "34 7", + "Ġpeace fully", + "ric a", + "ĠF UCK", + "Ġvib ration", + "mal ink", + "Ġro pes", + "ĠIv anka", + "ĠBrew ery", + "ĠBook er", + "ĠOw ens", + "go ers", + "Serv ices", + "ĠSn ape", + "Ġ19 1", + "39 5", + "Ġ2 99", + "just ice", + "Ġb ri", + "Ġdisc s", + "Ġprom inently", + "Ġvul gar", + "Ġsk ipping", + "l ves", + "Ġtsun ami", + "37 4", + "ĠU rug", + "ĠE id", + "rec ated", + "p hen", + "Ġfault s", + "ĠStart ed", + "9 50", + "Ġp i", + "Ġdetect or", + "Ġbast ard", + "Ġvalid ated", + "Space Engineers", + "OUR CE", + "Ġ( ~", + "Ġuns ur", + "Ġaff irmed", + "Ġfasc ism", + "Ġres olving", + "ĠCh avez", + "ĠC yn", + "Ġdet ract", + "L ost", + "Ġrig ged", + "Ġhom age", + "ĠBrun o", + "55 5", + "ec a", + "Ġpress es", + "Ġhum our", + "Ġsp acing", + "Ġ' /", + "olk ien", + "C oun", + "OP ER", + "T re", + "S on", + "ĠCambod ia", + "ier re", + "m ong", + "o zy", + "Ġliquid ity", + "ĠSov iets", + "ĠFernand o", + "Ġ2 29", + "Ġsl ug", + "ĠCatal an", + "elect ric", + "Ġsc enery", + "ĠH earth", + "Ġconst rained", + "Ġgoal ie", + "ĠGu idelines", + "ĠAm mo", + "ĠPear son", + "Ġtax ed", + "Ġfet us", + "Resp onse", + "ĠAlex is", + "th ia", + "G uy", + "Ġrecon struct", + "Ġextrem es", + "Ġconclud ing", + "ĠP eg", + "ook s", + "Ġded uctions", + "R ose", + "Ġground breaking", + "ĠT arg", + "ãĥ ģ", + "ĠRe ve", + "res ource", + "Ġmo ons", + "Ġelectrom agnetic", + "Ġamid st", + "ĠVik tor", + "N ESS", + "B ACK", + "Ġcomm ute", + "ĠAna heim", + "Ġfluct uations", + "6 40", + "Ġnood les", + "ĠCop enhagen", + "ĠT ide", + "ĠGri zz", + "ĠS EE", + "Ġpip elines", + "Ġsc ars", + "end o", + "ag us", + "ĠE TF", + "/ #", + "ĠBec ome", + "44 8", + "Ġvis c", + "ĠRecomm ended", + "Ġj umper", + "Ġcogn ition", + "Ġassass in", + "Ġwitness ing", + "ĠSet up", + "Ġl ac", + "v im", + "IS M", + "p ages", + "SS L", + "35 8", + "Ġad ject", + "indust rial", + "l ore", + "cher y", + "Ġgl itter", + "Ġc alf", + "Flor ida", + "Ġspoil ers", + "Ġsucceed s", + "Ġch anting", + "Ġslog ans", + "ĠTr acy", + "Vis it", + "rol ogy", + "Ġm ornings", + "Ġline age", + "Ġs ip", + "Ġintense ly", + "Ġflour ish", + "ĠSle eping", + "ĠF em", + "or por", + "ĠK lan", + "ĠDar th", + "h ack", + "ĠNi elsen", + "Ġtum ors", + "Ġprocure ment", + "ĠY orkshire", + "Ġra ided", + "K Y", + "An na", + "Ġ// [", + "ĠDis order", + "ĠMust ang", + "ĠW en", + "ĠTry ing", + "s q", + "Ġdeliver ies", + "Ġshut ter", + "Ġcere bral", + "Ġbip olar", + "ĠC N", + "l ass", + "j et", + "Ġdeb ating", + "> :", + "Ġe agle", + "gr ades", + "ĠD ixon", + "UG C", + "M AS", + "ĠDr aco", + "ĠMach ines", + "aff er", + "Ġem an", + " ²", + "pr on", + "ĠG ym", + "Ġcompar atively", + "ĠTrib unal", + "PR O", + "Ġle x", + "Ġfert ile", + "Ġdep ressing", + "Ġsuperf icial", + "ess ential", + "ĠHun ters", + "g p", + "Ġprom inence", + "L iber", + "ĠAn cest", + "ote chnology", + "Ġm ocking", + "ĠTra ff", + "ĸ ļ", + "Med ium", + "I raq", + "Ġpsychiat rist", + "Quant ity", + "ĠL ect", + "Ġno isy", + "5 20", + "G Y", + "Ġsl apped", + "ĠM TV", + "Ġpar a", + "p ull", + "Mult iple", + "as her", + "Ġn our", + "ĠSe g", + "Spe ll", + "v ous", + "ord ial", + "Sen ior", + "ĠGold berg", + "ĠPl asma", + "ne ed", + "Ġmess enger", + "ere t", + "Ġteam ed", + "Ġliter acy", + "ĠLe ah", + "ĠD oyle", + "Ġem itted", + "U X", + "Ġev ade", + "Ġm aze", + "Ġwrong ly", + "ĠL ars", + "Ġstere otype", + "Ġpled ges", + "Ġarom a", + "ĠM ET", + "Ġac re", + "ĠO D", + "Ġf f", + "Ġbrew eries", + "ĠH ilton", + "und le", + "ĠK ak", + "ĠThank fully", + "ĠCan ucks", + "in ctions", + "ĠApp ears", + "Ġco er", + "Ġundermin ed", + "ro vers", + "And re", + "Ġbl aze", + "um ers", + "Ġfam ine", + "amp hetamine", + "ulk an", + "Am ount", + "Ġdesper ation", + "wik ipedia", + "develop ment", + "ĠCor inth", + "uss ia", + "Jack son", + "L I", + "N ative", + "R s", + "Oh io", + "ĠKath leen", + "F ortunately", + "Ġattend ant", + "ĠPre ferred", + "ĠDid n", + "ĠV s", + "M is", + "Ġrespond ent", + "Ġb oun", + "st able", + "Ġp aved", + "Ġunex pl", + "ĠChe ney", + "L M", + "ĠC ull", + "bl own", + "Ġconfront ing", + "oc ese", + "serv ing", + "W i", + "ĠLith uania", + "ann i", + "Ġst alk", + "h d", + "Ġv ener", + "AP H", + "ynchron ous", + "UR R", + "um ably", + "hist oric", + "H alf", + "H ay", + "Ġresil ience", + "spe ction", + "Ġabandon ing", + "O bs", + "ĠDeb bie", + "Ġgrad ient", + "ĠPl aint", + "ĠCan al", + "AR CH", + "Ġexpans ive", + "Ġfun g", + "Ġb ounced", + "U nd", + "Ġprec autions", + "Ġclar ification", + "Ġd agger", + "Ġgri ps", + "Ġ µ", + "ĠRiver a", + "ĠUnd ead", + "is ites", + "ĠFIR ST", + "ñ o", + "aud i", + "Ġhost ages", + "Ġcompl iant", + "Ġal umni", + "Se ven", + "Ġcyber security", + "e ither", + "Col lect", + "Ġinvari ably", + "ĠS oci", + "Ġlaw maker", + "Ġa le", + "ĠPerson ally", + "N azi", + "Ġcustom ization", + "ĠPro c", + "ĠSask atchewan", + "eat uring", + "Ġsp ared", + "Ġdiscontin ued", + "Ġcomput ational", + "ĠMotor ola", + "Ġsuprem acist", + "government al", + "Ġparad ise", + "ĠDown ing", + "ĠNik on", + "Ġcat alyst", + "ber ra", + "Tor onto", + "8 75", + "bet a", + "ĠMac ron", + "Ġunreal istic", + "ve ctor", + "ĠVeh icles", + "it iveness", + "ĠR V", + "ĠCol bert", + "s in", + "o ji", + "ent in", + "ĠKr ish", + "hell o", + "ff ield", + "ok y", + "ĠT ate", + "Ġmap le", + "Ġa ids", + "chem ical", + "33 4", + "n uts", + "ĠWar p", + "Ġx x", + "ĠRob b", + "umer ous", + "_- _", + "ft ime", + "ĠV W", + "Ġw inger", + "ĠD ome", + "t ools", + "ĠP V", + "ĠGe orgetown", + "Ġg eared", + "Ġjihad ists", + "Ġc p", + "Ġster oids", + "M other", + "cler osis", + "ĠDR M", + "nes ia", + "Ġl inger", + "Ġimm ersive", + "ĠC OUN", + "Ġoutwe igh", + "ens ual", + "B and", + "Ġtransform s", + "mat ched", + "ps ons", + "ĠJud icial", + "f actor", + "Ġrefer ral", + "Ġodd ly", + "ĠW enger", + "B ring", + "ĠB ows", + "60 2", + "IC LE", + "Ġl ions", + "ĠAcad emic", + "ĠTh orn", + "ĠRa ider", + "kef eller", + "St orage", + "L ower", + "ĠOr t", + "ĠEqu ality", + "AL T", + "ĠS OC", + "T ypes", + "Ġl yn", + "ĠAss et", + "co at", + "TP P", + "C VE", + "ĠPione er", + "app lication", + "Mod ern", + "ĠH K", + "En vironment", + "Al right", + "R ain", + "IP P", + "ĠShi ite", + "Ġm ound", + "ĠAb ilities", + "cond ition", + "St aff", + "Ġcompet ence", + "ĠM oor", + "ĠDi ablo", + "Ġwith held", + "Ġost ensibly", + "ĠB rom", + "Ġms g", + "Ġden omin", + "ĠRef erences", + "ĠF P", + "Ġplun ged", + "Ġp amph", + "m oving", + "cent ral", + "Ġdown right", + "Ġf ading", + "T al", + "T yp", + "ĠTh y", + "uk es", + "it he", + "Ġo ve", + "Ġbatt led", + "Ġseaf ood", + "Ġfig ur", + "ĠR D", + "c rop", + "Ġsqu ads", + "{ \\", + "à ¹", + "ĠE h", + "Ġinterview ing", + "ĠQ in", + "Ġas piring", + "PL IC", + "Ġcla uses", + "ĠG ast", + "ĠN ir", + "Ġl uggage", + "Ġh ose", + "Ġsystem d", + "Ġdesc ending", + "ĠRev ised", + "ĠR ails", + "al ign", + "70 9", + "33 7", + "Ġf ug", + "charg ing", + "t ags", + "Ġut er", + "k ish", + "WAR NING", + "49 0", + "prof its", + "Ġvoy age", + "Ġa ce", + "ĠV anguard", + "ĠT anks", + "ĠM uk", + "Ġ2 26", + "S afe", + "Ar mor", + "Ġvolcan ic", + "Ġwom b", + "ĠM IL", + "Ġbegin ner", + "ĠRec ogn", + "ĠA AP", + "PL AY", + ") !", + "Ġdetect ing", + "c n", + "Ġbre aches", + "Bas ically", + "ĠP ag", + "ĠMunicip al", + "ĠInd ie", + "ĠL af", + "ĠDis able", + "ĠOl son", + "Ġrest rained", + "Ġrul ings", + "Ġhum ane", + "ev ents", + "ĠCinem a", + "display Text", + "ĠH atch", + "action Date", + "onna issance", + "Ġassault ing", + "ĠL ug", + "CH AT", + "Ġvig orous", + "ĠPer se", + "Ġintoler ance", + "ĠSnap chat", + "ĠSh arks", + "Ġd ummy", + "ĠDi agn", + "ĠGu itar", + "im eters", + "40 3", + "RE G", + "A x", + "Ġsepar ates", + "ĠMah m", + "Ġt v", + "j ah", + "O OL", + "C irc", + "ĠWinds or", + "uss ian", + "Ġintu ition", + "Ġdis dain", + "ĠDon ovan", + "Ġ2 21", + "E mb", + "Ġcondem ning", + "Ġgener osity", + "zz y", + "Ġpant ies", + "ĠPre vent", + "Action Code", + "AN A", + "34 2", + "external ActionCode", + "Ġspec ifying", + "Ġcryst all", + "J ere", + "Ġru pt", + "ĠApp rentice", + "Ġprof iling", + "Ð º", + "St rike", + "Ġsid eline", + "Ġoblig ated", + "Ġocc ult", + "Ġbureaucr atic", + "ant ically", + "rupt ed", + "neg ative", + "ĠEthiop ia", + "ĠC ivic", + "Ġins iders", + "el igible", + "ĠTV s", + "ĠB AR", + "ĠT I", + "i ologist", + "ĠA IR", + "Ġsubstit uted", + "Ar ab", + "ĠS aul", + "ĠY og", + "p rem", + "Ġbuild ers", + "Ġstation ary", + "Ġdoubt ful", + "Ġvig orously", + "Ġthr illing", + "Ph ysical", + "ĠCare y", + "ĠHyd ra", + "geon ing", + "ĠS ly", + "y ton", + "Ġborrow ers", + "ĠPark inson", + "Ġ ë", + "ĠJama ica", + "Ġsat ir", + "Ġinsurg ents", + "ĠF irm", + "Ġis ot", + "ĠK arn", + "our ning", + "ak ens", + "doc s", + "l ittle", + "ĠMon aco", + "CL ASS", + "Tur key", + "L y", + "ĠCon an", + "ass ic", + "Ġstar red", + "ĠPac ers", + "et ies", + "Ġt ipping", + "M oon", + "ĠR w", + "s ame", + "Ġcav ity", + "Ġgo of", + "ĠZ o", + "Sh ock", + "um mer", + "Ġemphas izes", + "Ġreg rett", + "Ġnovel ty", + "Ġen vy", + "ĠPass ive", + "r w", + "50 5", + "Ġind ifferent", + "ĠR ica", + "ĠHim self", + "ĠFred die", + "Ġad ip", + "ä¸ Ģ", + "Ġbreak out", + "Ġhur ried", + "ĠHu ang", + "ĠD isk", + "Ġro aming", + "?????- ?????-", + "U V", + "ĠRick y", + "ĠS igma", + "Ġmarginal ized", + "Ġed its", + "Ġ30 4", + "mem ory", + "Ġspec imen", + "29 3", + "ãģ ¯", + "Ġvert ically", + "Ġaud ition", + "ĠHe ck", + "Ġc aster", + "ĠHold ings", + "ad al", + "ĠC ron", + "ĠL iam", + "Ġdef lect", + "P ick", + "ĠDeb ug", + "RE F", + "Ġvers atility", + "ot hes", + "class ified", + "ĠMah ar", + "ĠH ort", + "C ounter", + "st asy", + "not iced", + "33 1", + "ĠSh im", + "f uck", + "ĠB ie", + "Ġair ing", + "ĠPro tein", + "ĠHold ing", + "Ġspect ators", + "ili ated", + "ĠThat cher", + "n osis", + "ãĥ¼ ãĥ³", + "Te le", + "B oston", + "ĠTem pl", + "st ay", + "Ġdecl arations", + "47 9", + "Vol ume", + "ĠDesign er", + "ĠOver watch", + "id ae", + "Ġon wards", + "Ġn ets", + "ĠMan ila", + "part icularly", + "Ġpolit ic", + "o other", + "Ġport raits", + "Ġpave ment", + "c ffff", + "Ġs aints", + "Ġbegin ners", + "ES PN", + "Ġshort comings", + "âķIJ âķIJ", + "Ġcom et", + "ĠOrgan ic", + "qu el", + "Ġhospital ized", + "Bre ak", + "Ġpe el", + "dyl ib", + "asp x", + "ur ances", + "ĠT IM", + "P g", + "Ġread able", + "ĠMal ik", + "Ġm uzzle", + "Ġbench marks", + "d al", + "ĠV acc", + "ĠH icks", + "60 9", + "ĠB iblical", + "he ng", + "Ġover load", + "ĠCivil ization", + "Ġimm oral", + "Ġf ries", + "ãĤ Ĵ", + "Ġreprodu ced", + "Ġform ulation", + "j ug", + "ire z", + "g ear", + "Ġco ached", + "Mp Server", + "ĠS J", + "ĠK w", + "In it", + "d eal", + "ĠO ro", + "ĠL oki", + "ĠSong s", + "Ġ23 2", + "ĠLou ise", + "asion ally", + "Ġunc ond", + "olly wood", + "Ġprogress ives", + "ĠEn ough", + "ĠDo e", + "Ġwreck age", + "Ġbr ushed", + "ĠBase Type", + "Ġz oning", + "ish able", + "het ically", + "ĠC aucus", + "ĠH ue", + "Ġk arma", + "ĠSport ing", + "Ġtrad er", + "Ġseem ing", + "ĠCapt ure", + "4 30", + "b ish", + "Ġt unes", + "Ġindo ors", + "ĠSp here", + "ĠD ancing", + "TER N", + "Ġno b", + "ĠG ST", + "m aps", + "Ġpe ppers", + "F it", + "Ġoverse es", + "ĠRabb i", + "ĠR uler", + "vert ising", + "off ice", + "xx x", + "Ġra ft", + "Ch anged", + "Ġtext books", + "L inks", + "ĠO mn", + "ãĢ ij", + "Ġinconven ience", + "ĠDon etsk", + "= ~", + "Ġimplicit ly", + "Ġboost s", + "ĠB ones", + "ĠBo om", + "Cour tesy", + "Ġsens ational", + "AN Y", + "Ġgre edy", + "ed en", + "Ġinex per", + "ĠL er", + "ĠV ale", + "Ġtight en", + "ĠE AR", + "ĠN um", + "Ġancest or", + "S ent", + "ĠH orde", + "urg ical", + "all ah", + "Ġsa p", + "amb a", + "ĠSp read", + "tw itch", + "Ġgrand son", + "Ġfract ure", + "Ġmoder ator", + "ĠSe venth", + "ĠRe verse", + "Ġestim ation", + "Cho ose", + "Ġpar ach", + "Ġbar ric", + "ãĢ IJ", + "Ġcomp ass", + "Ġall ergic", + "âĢ ķ", + "OT HER", + "err illa", + "Ġw agon", + "Ġz inc", + "Ġrub bed", + "ĠFull er", + "ĠLuxem bourg", + "ĠHoo ver", + "Ġli ar", + "ĠEven ing", + "ĠCob b", + "est eem", + "Ġselect or", + "ĠB rawl", + "is ance", + "ĠE k", + "Ġtro op", + "Ġg uts", + "ĠApp eal", + "ĠTibet an", + "Ġrout ines", + "ĠM ent", + "Ġsummar ized", + "steam apps", + "Ġtr anqu", + "Ġ19 29", + "or an", + "ĠAut hent", + "Ġg maxwell", + "Ġappre hens", + "Ġpo ems", + "Ġsa usage", + "ĠWeb ster", + "ur us", + "Ġthem ed", + "Ġl ounge", + "Ġcharg er", + "Sp oiler", + "Ġsp illed", + "h og", + "ĠSu nder", + "ĠA in", + "ĠAng ry", + "Ġdis qual", + "ĠFrequ ency", + "ĠEther net", + "Ġhel per", + "Per cent", + "Ġhorr ifying", + "Ġa il", + "ĠAll an", + "EE E", + "ĠCross ing", + "44 9", + "Ġh olog", + "ĠPuzz les", + "ĠGo es", + "eren n", + "60 4", + "ãģ ı", + "ĠRaf ael", + "Ġatt en", + "ĠE manuel", + "Ġup ro", + "ĠSus p", + "P sych", + "ĠTr ainer", + "ĠN ES", + "ĠHun ts", + "bec ue", + "Ġcounsel or", + "R ule", + "Ġtox ins", + "Ġb anners", + "r ifice", + "Ġgreet ing", + "Ġfren zy", + "Ġall ocate", + "Ġ* )", + "ex pr", + "50 3", + "ĠCh ick", + "ĠT orn", + "Ġconsolid ation", + "ĠF letcher", + "sw itch", + "fr ac", + "cl ips", + "ĠMcK in", + "ĠLun ar", + "Mon th", + "IT CH", + "Ġscholar ly", + "rap ed", + "39 8", + "Ġ19 10", + "Ġe greg", + "Ġin secure", + "Ġvict orious", + "cffff cc", + "Ġsing led", + "Ġel ves", + "ĠW ond", + "bur st", + "Ġcam oufl", + "ĠBL ACK", + "Ġcondition ed", + "ç ī", + "ans wered", + "Ġcompuls ory", + "asc ist", + "Ġpodcast s", + "ĠFrank furt", + "bn b", + "Ġne oliberal", + "ĠKey board", + "ĠBel le", + "w arm", + "Ġtrust s", + "Ġins ured", + "ĠBu cc", + "us able", + "60 7", + "ĠPl ains", + "Ġ18 90", + "Ġsabot age", + "Ġlod ged", + "f elt", + "Ġg a", + "ĠN arc", + "ĠSal em", + "Ġsevent y", + "ĠBl ank", + "p ocket", + "Ġwhis per", + "Ġm ating", + "om ics", + "ĠSal man", + "ĠK ad", + "Ġan gered", + "Ġcoll isions", + "Ġextraord inarily", + "Ġcoerc ion", + "G host", + "b irds", + "è Ģ", + "k ok", + "Ġper missible", + "avor able", + "Ġpo inters", + "Ġdiss ip", + "ac i", + "Ġtheat rical", + "ĠCos mic", + "Ġforget ting", + "Ġfinal ized", + "å¤ §", + "y out", + "l ibrary", + "Ġbo oming", + "ĠBel ieve", + "ĠTe acher", + "ĠL iv", + "ĠGOOD MAN", + "ĠDomin ican", + "OR ED", + "ĠPart ies", + "Ġprecip itation", + "ĠSl ot", + "R oy", + "ĠComb ined", + "Ġinteg rating", + "Ġch rome", + "Ġintest inal", + "ĠRe bell", + "Ġmatch ups", + "Ġblock buster", + "ĠLore n", + "ĠLe vy", + "Ġpre aching", + "ĠS ending", + "ĠPur pose", + "ra x", + "f if", + "Ġauthor itative", + "ĠP ET", + "ast ical", + "Ġdish on", + "Ġchat ting", + "Ġ\"$ :/", + "Connect ion", + "Ġrecre ate", + "Ġdel inqu", + "Ġbro th", + "ĠD irty", + "ĠAd min", + "z man", + "Ġscholars hips", + "Ġ25 3", + "cont act", + "als a", + "7 67", + "c reen", + "abb age", + "Ġ19 15", + "Ġbl ended", + "Ġal armed", + "L anguage", + "35 6", + "Ġbl ends", + "ĠCh anged", + "W olf", + "Ġhe pat", + "Creat ing", + "Ġper secut", + "Ġsweet ness", + "art e", + "Ġforfe iture", + "ĠRober to", + "im pro", + "N FL", + "ĠMag net", + "Det ailed", + "Ġinsign ificant", + "ĠPOL IT", + "ĠBB Q", + "ĠC PS", + "Ġse aw", + "amin er", + "m L", + "end if", + "f inals", + "Ġ26 5", + "u ish", + "Ġ} )", + "ĠPro blems", + "Ġem blem", + "Ġserious ness", + "Ġpars ing", + "Ġsubst itution", + "Ġpress ured", + "Ġrecy cled", + "ale b", + "Rub y", + "Ġprof iciency", + "Dri ver", + "ĠW ester", + ": '", + "AF TA", + "Ġm antle", + "ĠClay ton", + "fl ag", + "Ġpractition er", + "c overed", + "ĠSt ruct", + "add afi", + "4 25", + "ĠTown ship", + "ĠHyd ro", + "Lou is", + "34 3", + "Ġcond o", + "ĠT ao", + "Ġutil ization", + "Ġnause a", + "ĠDem s", + "rid ges", + "p ause", + "Ġform ulas", + "Ġchall enger", + "37 6", + "Ġdefect ive", + "ĠRail way", + "ĠPub Med", + "Ġyog urt", + "l bs", + "ĠNor folk", + "OP E", + "ĠMood y", + "Ġdistribut or", + "Ġscroll s", + "Ġextract s", + "St an", + "Ġv iability", + "Ġexp oses", + "Ġstar vation", + "ĠStep s", + "ĠD odd", + "f ew", + "ST D", + "33 2", + "Ġclos ures", + "Ġcomplement ary", + "ĠS asha", + "ump y", + "Ġmon et", + "Ġartic ulate", + "ĠDo ct", + "k iller", + "Ġsc rim", + "Ġ2 64", + "Ġprost itutes", + "Ġse vered", + "Ġattach ments", + "Ġcool ed", + "L ev", + "ĠF alk", + "f ail", + "Ġpolic eman", + "ĠD ag", + "Ġpray ed", + "ĠK ernel", + "Ġcl ut", + "Ġc ath", + "Ġan omaly", + "St orm", + "em aker", + "ĠBreak fast", + "ul i", + "o ire", + "J J", + "h z", + "Oper ation", + "ĠS ick", + "35 4", + "ĠGuatem ala", + "R ate", + "Ġexp osures", + "f aces", + "ĠArch ae", + "ra f", + "ĠM ia", + "Ġ20 25", + "Ġop aque", + "Ġdisgu ised", + "ĠHead quarters", + "S ah", + "Ġp ots", + "9 78", + "ĠM alf", + "Ġfrown ed", + "Ġpoison ous", + "ĠCon vers", + "ee ks", + "Ġcr ab", + ".\" \"", + "Ġtre ason", + "Ġr anc", + "Ġescal ating", + "Ġwar r", + "Ġmob s", + "Ġl amps", + "ĠSun shine", + "ĠBrun swick", + "Ph ones", + "Ġspe lled", + "ĠSk ip", + "Ġ20 50", + "Ġ19 11", + "ĠPl uto", + "ĠAm end", + "Ġme ats", + "38 7", + "Ġst omp", + "ĠZh ou", + "ĠLevi athan", + "ĠHaz ard", + "ad v", + "ĠOr well", + "Ġal oud", + "Ġb umper", + "ĠAn arch", + "ub untu", + "ĠSer ious", + "f itting", + "ĠOption al", + "ĠCec il", + "RE AM", + "Ġser otonin", + "Ġcultiv ate", + "ag ogue", + "} \\", + "Ġmos ques", + "ĠSun ny", + "Ġre active", + "rev olution", + "ĠL up", + "ĠFed ora", + "Ġdefense man", + "ĠV ID", + "ist ine", + "Ġdrown ing", + "ĠBroad casting", + "Ġthr iller", + "ĠS cy", + "Ġacceler ating", + "Ġdirect s", + "od ied", + "b ike", + "d uration", + "Ġpain fully", + "R edd", + "Ġproduct ions", + "Ġg ag", + "Ġwh ist", + "Ġs ock", + "Ġinf initely", + "ĠConc ern", + "ĠCit adel", + "Ġlie u", + "Ġcand les", + "ogene ous", + "arg er", + "Ġheaven ly", + "inflamm atory", + "Per formance", + "C s", + "ruct ose", + "az aki", + "Ġp essim", + "Ġinf erence", + "Ġpow d", + "ĠZ oe", + "Ġpain ts", + "Ġd azz", + "pt a", + "-------- ---", + "Ġins pir", + "ĠExper imental", + "ĠKn ife", + "reg or", + "b ors", + "Ġshow ers", + "rom eda", + "Ġs aint", + "Ġben ign", + "ĠJ iang", + "Ġenvision ed", + "Ġsh roud", + "IF T", + "H O", + "Ġsh uff", + "ĠI CC", + "Ġse greg", + "Ġrevis it", + "ighth ouse", + "L i", + "Ġsub strate", + "ĠSe as", + "ĠRew ard", + "ĠH ep", + "ĠBr ass", + "s bm", + "Ġelim inates", + "Ġst amina", + "ĠV AT", + "ĠLo an", + "Ġconst raint", + "Ġappropri ated", + "Ġp es", + "ĠA LE", + "r anging", + "Ġ40 4", + "39 2", + "Ġintellectual s", + "ach u", + "Ġrestruct uring", + "ĠLe vin", + "Ġrun es", + "Ġdelight ful", + "Ġcarbohyd rates", + "ĠMod els", + "ĠExp o", + "Ġtransport ing", + "all oc", + "Ġring ing", + "S amsung", + "Ġscarce ly", + "ĠURL s", + "ĠM AS", + "Ġprot otypes", + "Ġnarr ator", + "ĠCPU s", + "cd n", + "ĠBart on", + "Ġdecided ly", + "ĠSh u", + "ix ir", + "oc ious", + "ĠMy st", + "N intendo", + "Ġre use", + "Ġforg iven", + "F ew", + "in ical", + "n at", + "Ġseam less", + "ĠEv a", + "ĠE VE", + "ĠJ O", + "land ers", + "Ġso fter", + "neg ie", + "Ġtrans ient", + "Ġorb ital", + "Ġfulf il", + "ĠK om", + "Hop efully", + "Ġdynam ically", + "ĠHun ger", + "å Ľ", + "ĠArmen ia", + "el man", + "ber to", + "Ġp ige", + "ĠID s", + "lim it", + "Ġve ins", + "Ġso aring", + "p acks", + "Gold en", + "ĠCr ab", + "ist or", + "ĠR PM", + "Ġ$ $", + "g ression", + "Ġjihad ist", + "Ġgam ble", + "Ġcare g", + "Ġinf lated", + "F ace", + "ĠFire arms", + "ĠEm manuel", + "â Ŀ", + "Ġsh ocks", + "gr ab", + "Ġspl end", + "ĠHP V", + "ab ortion", + "Ab ove", + "Ent ity", + "play ers", + "Ġcomm enced", + "ul ence", + "Ġfulfill ment", + "Ġembod iments", + "ĠW elfare", + "Ġha il", + "Ġ< @", + "tt en", + "Ġcat cher", + "ĠJ azeera", + "Ġvolcan o", + "Ġstabil ize", + "ĠHand ler", + "Ġintens ified", + "ĠAb rams", + "Ġhum iliation", + "p aced", + "60 5", + "ĠCent OS", + "Spe cific", + "Ġhe ed", + "ĠC AM", + "ĠGal ile", + "D ie", + "Ġabol ished", + "ĠThom son", + "ĠTe achers", + "ĠW ass", + "j ong", + "ĠIS BN", + "ĠAll ies", + "sh ake", + "å ·", + "v ict", + "How ard", + "Ġde em", + "Ġexceed ingly", + "ĠSmart stocks", + "ib e", + "Ġdoor way", + "Ġcompet ed", + "ig mat", + "Ġnational ists", + "Ġg room", + "ĠKe en", + "Ġdispos able", + "de cl", + "ĠT olkien", + "ĠSche me", + "Ġb iod", + "Ġav id", + "ĠEl on", + "ag ar", + "ĠT SA", + "R oman", + "Ġartific ially", + "Ġadvis ors", + "X L", + "ĠInf erno", + "36 6", + "Ġted ious", + "ĠPhot ography", + "ĠCar rie", + "Ġtro pe", + "ĠSand ra", + "Ġdec imal", + "Que en", + "ĠGund am", + "ĠO M", + "ote ch", + "N BA", + "Ġ19 32", + "Ġent renched", + "ĠMar ion", + "Ġfr aternity", + "Lab our", + "Hen ry", + "Ġlat itude", + "E ither", + "Ġenh ances", + "ĠPot ential", + "Ġsh ines", + "id ad", + "Ġbread th", + "Ġcapac ities", + "ĠðŁ ĻĤ", + "ĠBron x", + "Ġsex es", + "Ġdifferent iation", + "Ġheavy weight", + "ĠT aj", + "d ra", + "Ġmigr ate", + "Ġexhaust ion", + "ĠR UN", + "els ius", + "ĠCu omo", + "Ġgu itars", + "Ġcl ones", + "ĠSom ew", + "ĠP ry", + "------------ -", + "Ġwarr anted", + "cy cles", + "Ġsalv age", + "Ġdis ks", + "R ANT", + "ĠNGO s", + "ĠMart ian", + "\":[ {\"", + "Ġadd icts", + "oj ure", + "il let", + "Ġamazing ly", + "art ments", + "p ixel", + "ĠGPU s", + "Lay out", + "è £", + "ĠTam il", + "ĠBas il", + "Ġimpart ial", + "ĠSt ructure", + "f ork", + "b ryce", + "Ġr idge", + "ĠHamb urg", + "ri ous", + "Ġbl itz", + "cig arettes", + "Ġcan ned", + "40 2", + "Ġiron ically", + "Ġcompassion ate", + "ĠHaw kins", + ". #", + "ĠCat hedral", + "Ġrall ied", + "in ternal", + "Ġqu ota", + "st akes", + "T EXT", + "m om", + "Ġcomple tes", + "Ġ23 8", + "Ġsh rug", + "ãĥ ij", + "ĠN inth", + "Ġrev ise", + "ĠProv ider", + "Ġtre acher", + "Ġqu asi", + "ĠPR ES", + "Ġdep osition", + "Ġconfidential ity", + "iss ors", + "Ġim balance", + "Ġspan ning", + "Ġang ular", + "ĠC ul", + "commun ication", + "ĠNor a", + "ĠGen ius", + "op ter", + "Ġs acked", + "Sp ot", + "Ġfine ly", + "ĠCH R", + "28 2", + "w aves", + "Pal est", + "ĠRo hing", + "N L", + "è ¿", + "Ġsh itty", + "ĠSc alia", + "4 75", + "Pro gress", + "Ġreferen cing", + "Ġclass rooms", + "ab ee", + "Ġs od", + "hes ion", + "70 8", + "ĠZucker berg", + "ĠFin ish", + "ĠScot ia", + "ĠSav ior", + "ĠInstall ation", + "an tha", + "( -", + "Ġ30 2", + "ĠP unk", + "Ġcr ater", + "yout u", + "Ġro ast", + "Ġinflu encing", + "Ġd up", + "ĠJ R", + "ĠG rav", + "Ġstat ure", + "Ġbath rooms", + "A side", + "W iki", + "me an", + "ĠZ ak", + "ĠOn es", + "ĠN ath", + "Ġhyper t", + "Ġcommence ment", + "C ivil", + "Ġmoder ately", + "Ġdistribut ors", + "Ġbreast feeding", + "Ġ9 80", + "ĠS ik", + "ĠC ig", + "ĠAM ER", + "R IP", + "ĠCare er", + "ust ing", + "Ġmess ed", + "Ġe h", + "ĠJ ensen", + "/ $", + "Ġblack mail", + "Ġconvers ions", + "Ġscientific ally", + "Ġmant ra", + "p aying", + "Ġiv ory", + "ĠCour ts", + "OU GH", + "aunt let", + "Ser ial", + "B row", + "ĠH undreds", + "3 23", + "Ġpe e", + "Ġlin ux", + "Ġsub mer", + "ĠPrinc ipal", + "48 5", + "ĠD SL", + "ĠCous ins", + "Ġdoctr ines", + "ĠAthlet ics", + "Ġ3 15", + "ĠK arma", + "Ġatt ent", + "ur ger", + "Ġpresc ribe", + "Ġenc aps", + "ĠC ame", + "Ġsecret ive", + "ĠCr imes", + "d n", + "C lean", + "ĠEgypt ians", + "ĠCar penter", + "Ġ ll", + "H um", + "ĠMil o", + "Ġcapital ists", + "Ġbrief ed", + "T we", + "ĠBas in", + "elve t", + "M os", + "Ġplun ge", + "ĠKa iser", + "ĠFu j", + "ill in", + "Ġsafegu ards", + "Ġo ste", + "ĠOpportun ity", + "ĠM afia", + "ĠCall ing", + "ap a", + "ur ban", + "br ush", + "ill ard", + "c é", + "int elligence", + "ĠL ob", + "ĠDru id", + "Ġsm oother", + "Ġfoot ing", + "Ġmotor ists", + "arc ity", + "Ġmascul inity", + "Ġm ism", + "Ġabdom inal", + "ĠTa vern", + "ĠR oh", + "Ġesc apes", + "s igned", + "Anth ony", + "Ġsacrific ing", + "Ġintim acy", + "Ġan terior", + "ĠK od", + "Ġmot if", + "Ġg raz", + "Ġvisual ization", + "Ġguitar ist", + "ĠTro tsky", + "m agic", + "D ar", + "ĠMor i", + "Ġw ards", + "Ġtoile ts", + "l est", + "Ġtele port", + "ĠSund ays", + "ĠPl at", + "ET S", + "Ġe Sports", + "Pat rick", + "ĠK atherine", + "en ko", + "Ġhas sle", + "ĠM ick", + "gg les", + "Ġh ob", + "aint ain", + "Ġair borne", + "Ġsp ans", + "Ġch ili", + "Ġa perture", + "Ġvolunte ered", + "ĠInc ident", + "ĠF res", + "ĠVeter an", + "augh tered", + "ing o", + "Ġun insured", + "CL OSE", + "Ġf use", + "Ġer otic", + "Ġadvert ise", + "ra ising", + "Text ure", + "Ġatt ends", + "ĠRE AL", + "udd led", + "Ġsm oot", + "Ġ30 5", + "ĠWill is", + "Ġbl ond", + "An alysis", + "ĠV T", + "on ica", + "Ġstrongh old", + "R F", + "N M", + ". >>", + "Ġprosper ous", + "Ġbo asted", + "29 2", + "ĠManufact uring", + "PR ESS", + "g ren", + "Ġpharm acy", + "ĠRoc kefeller", + "k ai", + "Ġth umbs", + "ĠH ut", + "Ġmother board", + "Ġguard ians", + "ĠAl ter", + "ll ular", + "Ġsh ack", + "Ġwise ly", + "Ġback bone", + "erv a", + "Ġsu icides", + "ĠMcG regor", + "ij ah", + "E mer", + "ĠB rav", + "Ġdesign ate", + "P OST", + "produ ced", + "Ġcleans ing", + "irl wind", + "ex istent", + "ĠHum ph", + "ĠPay ne", + "Ġv ested", + "Å ¡", + "Ġstring ent", + "ion a", + "Ġuns ub", + "Ġsum med", + "ĠHer cules", + "sub ject", + "ĠR agnar", + "ĠN os", + "Ġcharacter ization", + "Ġsav vy", + "ĠDaw son", + "ĠCas ino", + "Ġf ri", + "ĠBar rier", + "Ġmis information", + "Ġins ulation", + "Ġcorrid ors", + "Ġair planes", + "ĠNo ct", + "ah i", + "Ġ19 16", + "k b", + "arm ac", + "Ġsh un", + "Ġsche ma", + "Ġhorr ified", + "Ġ23 9", + "aund ers", + "N B", + "i ates", + "er ity", + "ĠSh ard", + "Ġr arity", + "Ġgroup ed", + "ĠGh ana", + "again st", + "ĠBi ological", + "ĠA ware", + "ow ell", + "Ï Ħ", + "ĠBe au", + "sh aw", + "H ack", + "ĠJul ius", + "US S", + "ol son", + "aun a", + "c ru", + "ĠMaur ice", + "ĠI k", + "Ġsequ encing", + "Ġradical s", + "Ġ( ?,", + "v irtual", + "Ġany ways", + "Ġreper c", + "Ġhand lers", + "Ġhes itant", + "é ĥ", + "ĠM F", + "ple mentation", + "ass ociated", + "Ġcampaign ed", + "ĠY ue", + "ut ations", + "ĠY oga", + "Ġsim mer", + "Ġro ds", + "Ġmel ody", + "Ġconv oy", + "v ideos", + "Ġscreen ed", + "N eg", + "ochem ical", + "Ġ( ))", + "Ġultr as", + "Ġant ip", + "ĠIsland ers", + "70 4", + "Ġfet ish", + "Ġridic ulously", + "ĠK art", + "Ġmitochond rial", + "Ġinterf ering", + "Build er", + "Ġover fl", + "Ġac ne", + "ĠM ud", + "ĠK err", + "f lex", + "ĠPost al", + "ĠBalt ic", + "47 7", + "ĠPers ons", + "our age", + "H B", + "ĠM use", + "ĠImm ortal", + "ĠDri ving", + "Ġpet itions", + "Ġsubsc ript", + "Ġs orce", + "ĠProcess or", + "ut on", + "S ony", + "Ġph on", + "Ġr aced", + "ĠAnth rop", + "Ġday time", + "ĠEx ercise", + "Add ing", + "Ġeng ages", + "ĠQual comm", + "Ġmir acles", + "Ġmem es", + "ĠDr ink", + "ĠOri oles", + "Ġhair s", + "ĠPol ar", + "ath om", + "Ġsl ippery", + "ĠR emy", + "Ġcar amel", + "ĠY EAR", + "Ġal k", + "I gn", + "a ution", + "ĠMer lin", + "ĠC ran", + "Ġap ologies", + "Ġ4 10", + "Ġout ing", + "ĠMem ories", + "app ointed", + "Ġcount ered", + "u ld", + "pos ing", + "Ġfire wall", + "ĠW ast", + "ĠW et", + "work ed", + "se ller", + "Ġrepe aled", + "ere o", + "ass uming", + "BL IC", + "m ite", + "ĠCEO s", + "ĠChap el", + "ellig ent", + "________________ ________", + "D og", + "Ġw art", + "Ġsubsc riber", + "s ports", + "Ġbe gged", + "ĠM V", + "Ġsem if", + "eth ical", + "Ġpre ach", + "Ġrev ital", + "Ġpun itive", + "Ġshort cuts", + "Ġinstit uted", + "ĠWars aw", + "Ġabdom en", + "ĠK ING", + "Ġsuper intendent", + "Ġf ry", + "ĠGe o", + "T OR", + "Ġcontrad ictions", + "apt ic", + "Ġlandsc apes", + "b ugs", + "Ġcl ust", + "Ġvol ley", + "c ribed", + "Ġt andem", + "Ġrob es", + "WH AT", + "Ġpromot er", + "Ġel oqu", + "review ed", + "ĠD K", + "ĠPl ato", + "Ġf ps", + "T ank", + "ĠDer rick", + "Ġpriorit ize", + "as per", + "ĠHond uras", + "ĠCom pleted", + "ne c", + "Ġm og", + "n ir", + "ĠMay o", + "DE F", + "st all", + "in ness", + "ĠVolks wagen", + "Ġprec aution", + "ĠM ell", + "i ak", + "ist ries", + "Ġ24 8", + "Ġoverl apping", + "Sen ate", + "ĠEnh ance", + "res y", + "rac ial", + "OR TS", + "ĠM ormons", + "Str ong", + "ĠCo ch", + "Mex ico", + "ĠMad uro", + "Ġj ars", + "Ġcan e", + "W ik", + "oll a", + "iff erence", + "Ġphysic ist", + "ĠMag gie", + "Ġ28 5", + "Ġdep iction", + "ĠMcL aren", + "J u", + "Ġsl ows", + "Ġcommission ers", + "ĠWill ow", + "ĠExpl os", + "hov ah", + "Ġtechn ician", + "Ġhom icides", + "ĠFl av", + "ĠTr uman", + "Ġ100 00", + "u ctor", + "Ġsh ader", + "News letter", + "45 7", + "Ġre ver", + "Ġhard ened", + "Ġwhere abouts", + "Ġrede velop", + "Ġcar bs", + "Ġtra vers", + "Ġsqu irrel", + "Ġfoll ower", + "Ġs ings", + "50 8", + "Ġrabb its", + "emon ium", + "Ġdocument ing", + "Ġmisunder stood", + ") '", + "R ick", + "gg ies", + "Ġprem ie", + "Ġsk ating", + "Ġpass ports", + "Ġf ists", + "aged don", + "H aw", + "AC P", + "0 80", + "ĠThough ts", + "ĠCarl son", + "Ġpriest hood", + "h ua", + "Ġdun geons", + "ĠLo ans", + "Ġant is", + "Ġfamiliar ity", + "ĠS abb", + "op al", + "ĠIn k", + "st rike", + "Ġc ram", + "Ġlegal ized", + "Ġcu isine", + "Ġfib re", + "Tra vel", + "ĠMon ument", + "OD Y", + "eth y", + "Ġinter state", + "ĠP UR", + "em porary", + "ĠArab ian", + "develop ed", + "Ġsadd le", + "Ġg ithub", + "ĠOff er", + "ĠIS P", + "ro let", + "ĠSUP ER", + "ĠDen is", + "Ġmultipl ier", + "Ġstir red", + "Interest ingly", + "Ġcustom ary", + "Ġbill ed", + "he x", + "Ġmultipl ied", + "Ġfl ipping", + "ĠCros by", + "Ġfundament als", + "ia e", + "ĠPlay ed", + "ĠAt om", + "am azon", + "ĠFl am", + "ee z", + "activ ated", + "Ġtables poon", + "Ġliberal ism", + "ĠPal in", + "ĠP atel", + "N um", + "ĠT AM", + "Ġs urn", + "ĠRel oaded", + "Ġco ined", + "\" ],", + "ĠCl ash", + "ĠAg u", + "Ġprag matic", + "ĠActiv ate", + "Ġ8 02", + "Ġtrail ers", + "Ġsil hou", + "Ġprob es", + "Ġcirc us", + "ĠB ain", + "ĠLind say", + "ĠAb bey", + "Del ivery", + "Ġconcess ion", + "Ġgast ro", + "ĠSpr ite", + "Ä Ł", + "and el", + "Ġg imm", + "Ġaut obi", + "ĠT urtle", + "Ġwonder fully", + "ĠHar am", + "ĠWorld wide", + "ĠHand le", + "Ġtheor ists", + "Ġsle ek", + "ĠZh u", + "ograph ically", + "EG A", + "ĠOwn ers", + "ath s", + "ĠAntar ctic", + "n atal", + "=\" \"", + "fl ags", + "`` ``", + "Ġs ul", + "K h", + "Ġpot assium", + "Ġlinem an", + "Ġcere al", + "ĠSe asons", + "Ġ20 22", + "Ġmat hematic", + "Ġastron omers", + "prof essional", + "Ġf ares", + "cknow led", + "Ġch i", + "Ġyoung sters", + "Ġmistaken ly", + "Ġhem isphere", + "ĠDiv inity", + "r one", + "Ġ\" ,", + "r ings", + "Ġattract s", + "v ana", + "å ¹", + "C AP", + "Ġplay list", + "Ġpor ch", + "ãģ £", + "Ġincorpor ates", + "Ġso ak", + "Ġassert ing", + "ĠTerror ism", + "ĠP ablo", + "J a", + "ces ter", + "Ġfear ing", + "ĠPr ayer", + "Ġescal ated", + "G W", + "Ġro be", + "ĠBright on", + "ac ists", + "ĠSym phony", + "ĠDwar f", + "ĠPar ade", + "ĠLe go", + "Ġinex pl", + "Ġl ords", + "le af", + "RA G", + "l iber", + "Ġcig ars", + "ĠJe hovah", + "60 6", + "WIND OWS", + "ĠLiber ia", + "eb us", + "He avy", + "Ġl ubric", + "ĠR W", + "angu ages", + "Ġnarrow ed", + "com puter", + "ĠE mber", + "Ġmurder ing", + "Ġdown stream", + "ĠT uls", + "ĠT ables", + "Top ic", + "ĠAcc uracy", + "= /", + "l ost", + "ĠRe i", + "Ġprogress es", + "b ear", + "Ġestablish ments", + "Just in", + "ĠPe ach", + "ĠG omez", + "å ¿", + "ĠTri angle", + "Id ent", + "ĠH ive", + "Res ources", + "Ġmix es", + "ĠAss uming", + "M u", + "Ġhyp oc", + "Ġs ane", + "ĠW an", + "id ious", + "Su ccess", + "Ġ io", + "Ang el", + "Ġdanger ously", + "ĠCreat ure", + "W ORK", + ": [", + "ĠKat rina", + "List ener", + "M iller", + "ĠId lib", + "h ang", + "Ġcircum vent", + "h ref", + "Ġcel estial", + "ĠWe eks", + "ĠP ug", + "ĠDal ton", + "Ġsubpoen a", + "uk u", + "Ġpers isted", + "pe i", + "old ing", + "ĠDoc uments", + "ĠH ast", + "ĠC ENT", + "Ġprim er", + "Ġsyn onymous", + "Ġn ib", + "om bs", + "Ġnot ation", + "ĠD ish", + "ĠAt mosp", + "Ġforb id", + "ĠAN G", + "pat tern", + "l os", + "Ġproject iles", + "b rown", + ".\" ,", + "ĠVen om", + "Ġfierce ly", + "ub lished", + "ĠU ran", + "ĠNic arag", + "4 10", + "ĠC AL", + "OT OS", + "ĠMir acle", + "ĠEn chant", + "Ġguard ing", + "app end", + "Att ach", + "Ġlevel ed", + "Ġcond oms", + "ih ilation", + "64 9", + "Ġnight mares", + "ĠTHE Y", + "ĠST ART", + "ĠK inn", + "Ġroomm ate", + "Ġhy giene", + "o pping", + "J ob", + "Ġl vl", + "ĠV ER", + "ĠKe eping", + "ab etic", + "Ġformat ting", + "eral a", + "Ġrev isions", + "Ġres urg", + "T el", + "ĠGood man", + "35 3", + "p od", + "Ġind isp", + "ĠTrans lation", + "Ġg own", + "ĠM und", + "Ġc is", + "Ġby stand", + "col lect", + "ĠPun jab", + "act ively", + "ĠG amb", + "te ll", + "Ġimport ing", + "g encies", + "Ġloc om", + "ĠBr ill", + "H oly", + "ĠBer ger", + "Ġshow down", + "Ġrespond ers", + "IL Y", + "Ġt akedown", + "le ted", + "Ġmat tered", + "Ġpredict ive", + "Ġover lay", + "G PU", + "ĠV ick", + "Ġconvey ed", + "T ab", + "pe er", + "Sc an", + "Ġdefensive ly", + "v ae", + "Ġappro ving", + "Ġt iers", + "ĠV ia", + "quer ade", + "ĠSaud is", + "Ġdemol ished", + "ĠProp he", + "Ġmon o", + "Ġhospital ity", + "H AM", + "ĠAri el", + "M OD", + "ĠTor ah", + "Ġbl ah", + "ĠBel arus", + "erent ial", + "ĠT uc", + "Ġbank er", + "39 7", + "Ġmosqu it", + "ĠScient ist", + "ĠMus ical", + "Ġh ust", + "Sh ift", + "Ġtor ment", + "Ġstand off", + "E duc", + "ĠF og", + "Ġampl ifier", + "Sh ape", + "Inst ance", + "ĠCrit ics", + "Ġda emon", + "H ouston", + "Ġmatt ress", + "ĠID F", + "Ġobsc ene", + "ĠA mer", + "hett i", + "Ġcomp iling", + "35 2", + "vere tt", + "ĠRed uction", + "ist ration", + "ĠBl essed", + "ĠB achelor", + "3 16", + "Ġpr ank", + "ĠVul can", + "dd ing", + "Ġm ourning", + "ĠQu int", + "ĠBl aster", + "test ing", + "Ġsed iment", + ">> >", + "ĠE ternity", + "ĠWH ERE", + "ĠM aze", + "Ġreact ing", + "ĠAl v", + "oms day", + "ĠC RA", + "Ġtransl ator", + "Ġbog us", + "at u", + "We bsite", + "oll s", + "Ġbapt ism", + "Ġs ibling", + "ĠAut umn", + "ve z", + "ãģ® é", + "gu ards", + "Ge org", + "assad ors", + "ĠFre ud", + "Ġcontin ents", + "ĠReg istry", + "Bern ie", + "ĸļ 士", + "Ġtoler ant", + "ĠU W", + "Ġhor ribly", + "99 5", + "ĠMID I", + "Ġimpat ient", + "oc ado", + "er i", + "ĠWor st", + "ĠNor ris", + "ĠTalk ing", + "Ġdef ends", + "ens able", + "Ġ20 21", + "Ġanat omy", + "L ew", + "Ġdraw er", + "ĠCan berra", + "Ġpatri otic", + "é¾įå ĸļ士", + "ĠAv g", + "AR M", + "Ġundis closed", + "Ġfare well", + "45 9", + "b able", + "ĠAll ison", + "OL OG", + "Ġcon co", + "t ight", + "ĠAC PI", + "ĠM ines", + "l ich", + "ĠâĶ ľ", + "represent ed", + "200 000", + "Ġenthusi ast", + "OT S", + "b il", + "ĠIng redients", + "Ġinvent or", + "ĠMy SQL", + "³³ Âł", + "ĠAB OUT", + "with in", + "Ġm k", + "B ul", + "ĠF ake", + "Ġdracon ian", + "W a", + "hel m", + "ĠTer ran", + "erv ille", + "Ġcommon place", + "SI ZE", + "Ġ\" <", + "re place", + "ograph s", + "ĠSE LECT", + "inc ible", + "ĠMost ly", + "ĠShe ffield", + "ĠID E", + "ugg le", + "Ġcit ations", + "h urst", + "ĠUn ix", + "Ġunle ash", + "ĠP iper", + "ĠN ano", + "Ġsucc umb", + "Ġreluct ance", + "Ġ25 00", + "ĠMer chant", + "Ġwire t", + "Ġcomb os", + "ĠBirth day", + "Ġchar coal", + "ĠU PS", + "ĠFair fax", + "Ġdrive way", + "ĠT ek", + "ĠP itch", + "ove re", + "Ġtechn icians", + "ĠAct ual", + "fl ation", + "ĠF iscal", + "ĠEm pty", + "an amo", + "Ġmag nesium", + "Ġsl ut", + "Ġgrow ers", + "Invest igators", + "( ):", + "ĠS atellite", + "ĠKe ynes", + "miss ive", + "l ane", + "Ġb orough", + "3 44", + "ĠTE AM", + "ĠBet hesda", + "C V", + "h ower", + "ĠR AD", + "Ġch ant", + "ĠR iy", + "Ġcompos itions", + "Ġmild ly", + "Ġmedd ling", + "Ġag ility", + "ane ers", + "5 01", + "Ġsyn th", + "ling er", + "29 1", + "Ġex claimed", + "Part y", + "Ġcont amin", + "ĠMan or", + "ĠResp ond", + "Ġpra ising", + "Ġman ners", + "fle et", + "Sum mer", + "ĠLy nd", + "ĠDef initely", + "gr im", + "Ġbow ling", + "st ri", + "ç Ľ", + "y nt", + "Ġmand ates", + "D IV", + "Ġreconc ile", + "view s", + "ĠDam on", + "vet te", + "F lo", + "ĠGreat est", + "il on", + "ic ia", + "Ġportray al", + "Ġcush ion", + "50 4", + "19 79", + "oss al", + "App lic", + "sc ription", + "Ġmit igation", + "AT S", + "p ac", + "Ġer ased", + "Ġdefic iencies", + "ĠHolland e", + "ĠX u", + "Ġb red", + "Ġpregn ancies", + "f emin", + "Ġem ph", + "Ġpl anners", + "Ġout per", + "utter ing", + "Ġperpet rator", + "Ġm otto", + "ĠEll ison", + "ĠNE VER", + "Ġadmitted ly", + "AR I", + "ĠAzerbai jan", + "Ġmill isec", + "Ġcombust ion", + "ĠBott le", + "ĠL und", + "ĠP s", + "ĠD ress", + "Ġfabric ated", + "Ġbat tered", + "Ġs idel", + "ĠNot ting", + "Fore ign", + "ĠJer ome", + "0 20", + "ĠAr bit", + "Ġkn ots", + "ĠR IGHT", + "M oving", + "ãģ Ļ", + "Ġsur geries", + "Ġcour thouse", + "Ġm astered", + "Ġhover ing", + "ĠBr an", + "ĠAl ison", + "Ġsaf est", + "m ilitary", + "Ġbull ied", + "Ġbar rage", + "Read er", + "ES E", + "ĠGe ographic", + "T ools", + "3 14", + "ĠGe ek", + "ro th", + "gl ers", + "ĠF IN", + "Ï ģ", + "ĠA ston", + "al tern", + "48 8", + "Ġveter in", + "G amer", + "Ġint el", + "ren ches", + "Sh ield", + "Ġam nesty", + "ĠB har", + "Ġp iled", + "Ġhonor able", + "ĠInst itutes", + "Ġso aked", + "Ġcom a", + "ĠE FF", + "34 1", + "by tes", + "ĠG mail", + "le in", + "ĠCanad iens", + "m aterial", + "I l", + "Ġinstruct ors", + "ĠK Y", + "Ġconce ive", + "ub b", + "ĠP ossible", + "Ġeas ing", + "ĠChrist ina", + "Ġcar ic", + "ĠHD R", + "R OM", + "Ġsho vel", + "de lete", + "Ġp uff", + "ĠCh anging", + "Ġseam lessly", + "Att ribute", + "Ġacqu isitions", + "ak ery", + "ĠE F", + "Ġaut istic", + "ĠT akes", + "ĠPow der", + "ĠSt ir", + "5 10", + "ĠBub ble", + "sett ings", + "ĠF owler", + "Ġmust ard", + "Ġmore over", + "Ġcopyright ed", + "ĠLED s", + "15 00", + "æ ī", + "ĠH IS", + "en f", + "Ġcust od", + "ĠH uck", + "G i", + "Ġim g", + "An swer", + "C t", + "j ay", + "ĠInf rastructure", + "Ġfeder ally", + "L oc", + "Ġmicro bes", + "Ġover run", + "dd s", + "ot ent", + "adi ator", + ">>>> >>>>", + "Ġtorn ado", + "Ġadj ud", + "Ġintrig ued", + "Ġs i", + "ĠRevel ation", + "pro gress", + "Ġburgl ary", + "ĠSai yan", + "ĠK athy", + "Ġser pent", + "ĠAndre as", + "Ġcomp el", + "ess ler", + "ĠPl astic", + "ĠAd vent", + "ĠPos itive", + "ĠQ t", + "ĠHind us", + "reg istered", + "ular ity", + "Ġrighteous ness", + "Ġdemon ic", + "u itive", + "ĠB DS", + "ĠGre gg", + "c ia", + "ĠCrus ade", + "ĠSina i", + "W ARE", + "+ (", + "Ġme ll", + "Ġder ail", + "y ards", + "A st", + "Ġnotice ably", + "ĠO ber", + "R am", + "Ġun noticed", + "Ġse q", + "av age", + "T s", + "Ġ6 40", + "Ġconced e", + "Ġ] )", + "F ill", + "Ġcapt ivity", + "ĠImprove ment", + "ĠCrus ader", + "ara oh", + "M AP", + "æ Ĺ", + "Ġstr ide", + "al ways", + "F ly", + "N it", + "Ġal gae", + "ĠCook ing", + "ĠDo ors", + "Mal ley", + "Ġpolic emen", + "ãģ į", + "Ġastron aut", + "access ible", + "49 5", + "ĠR AW", + "cl iffe", + "udic rous", + "Ġdep ended", + "al ach", + "Ġvent ures", + "ra ke", + "Ġt its", + "ĠH ou", + "Ġcond om", + "ormon al", + "Ġind ent", + "Ġupload ing", + "Foot note", + "Import ant", + "Ġ27 1", + "Ġmind ful", + "Ġcont ends", + "C ra", + "Ġcal ibr", + "ĠO ECD", + "plug in", + "F at", + "ĠIS S", + "ĠDynam ics", + "ans en", + "68 6", + "' ),", + "Ġsp rite", + "Ġhand held", + "ĠH ipp", + "=~ =~", + "Tr ust", + "Ġsem antics", + "ĠBund es", + "ĠRen o", + "ĠLiter ature", + "s ense", + "G ary", + "ĠA eg", + "ĠTr in", + "EE K", + "Ġcler ic", + "ĠSS H", + "Ġch rist", + "Ġinv ading", + "ib u", + "Ġen um", + "aur a", + "Ġal lege", + "ĠInc redible", + "B BC", + "Ġth ru", + "Ġsa iled", + "Ġem ulate", + "Ġin security", + "Ġc rou", + "Ġaccommod ations", + "Ġincompet ent", + "Ġsl ips", + "ĠEarth qu", + "s ama", + "IL LE", + "Ġi Phones", + "as aki", + "Ġby e", + "Ġar d", + "Ġext ras", + "Ġsl aughtered", + "Ġcrowd funding", + "res so", + "Ġfil ib", + "ĠER ROR", + "ĠT LS", + "e gg", + "ĠIt al", + "Ġen list", + "ĠCatal onia", + "ĠSc ots", + "Ġser geant", + "Ġdiss olve", + "N H", + "Ġstand ings", + "ri que", + "I Q", + "Ġbenef iciary", + "Ġaqu arium", + "You Tube", + "ĠPower Shell", + "Ġbright est", + "ĠWar rant", + "S old", + "Writ ing", + "Ġbegin nings", + "ĠRes erved", + "ĠLatin os", + "head ing", + "Ġ4 40", + "Ġrooft op", + "AT ING", + "Ġ3 90", + "VP N", + "G s", + "k ernel", + "turn ed", + "Ġprefer able", + "Ġturn overs", + "ĠH els", + "S a", + "ĠShin ji", + "ve h", + "ĠMOD ULE", + "V iol", + "Ġex iting", + "Ġj ab", + "ĠVan illa", + "Ġac ron", + "ĠG ap", + "ber n", + "A k", + "ĠMc Gu", + "Ġend lessly", + "ĠFar age", + "ĠNo el", + "V a", + "M K", + "Ġbr ute", + "ĠK ru", + "ĠES V", + "ĠOl ivia", + "âĢ ł", + "ĠK af", + "Ġtrust ing", + "Ġh ots", + "3 24", + "Ġmal aria", + "Ġj son", + "Ġp ounding", + "ort ment", + "Count ry", + "Ġpostp oned", + "Ġunequ iv", + "? ),", + "ĠRo oney", + "udd ing", + "ĠLe ap", + "ur rence", + "sh apeshifter", + "ĠH AS", + "os ate", + "Ġca vern", + "Ġconserv atism", + "ĠB AD", + "Ġmile age", + "Ġarrest ing", + "V aults", + "Ġmix er", + "Dem ocratic", + "ĠB enson", + "Ġauth ored", + "8 000", + "Ġpro active", + "ĠSpirit ual", + "t re", + "Ġincarcer ated", + "ĠS ort", + "Ġpe aked", + "Ġwield ing", + "re ciation", + "×Ļ ×", + "P atch", + "ĠEm my", + "Ġex qu", + "tt o", + "ĠRat io", + "ĠP icks", + "ĠG ry", + "ph ant", + "Ġf ret", + "Ġeth n", + "Ġarch ived", + "% -", + "c ases", + "ĠBl aze", + "Ġim b", + "c v", + "y ss", + "im ony", + "Ġcount down", + "Ġaw akening", + "ĠTunis ia", + "ĠRe fer", + "ĠM J", + "Ġun natural", + "ĠCar negie", + "iz en", + "ĠN uggets", + "he ss", + "Ġev ils", + "64 7", + "Ġintrodu ctory", + "l oving", + "ĠMcM ahon", + "Ġambig uity", + "L abel", + "ĠAlm ighty", + "Ġcolor ing", + "ĠCl aus", + "set ting", + "N ULL", + "ĠF avorite", + "ĠS IG", + "> (", + "ĠSh iva", + "ĠMay er", + "Ġstorm ed", + "ĠCo verage", + "we apons", + "igh am", + "Ġun answered", + "Ġle ve", + "Ġc oy", + "c as", + "b ags", + "as ured", + "Se attle", + "ĠSant orum", + "ser ious", + "Ġcourage ous", + "ĠS oup", + "Ġconfisc ated", + "Ġ// /", + "Ġuncon ventional", + "Ġmom s", + "ĠRohing ya", + "ĠOrche stra", + "ĠPot ion", + "Ġdisc redit", + "ĠF IL", + "f ixed", + "ĠDe er", + "do i", + "ĠDim ension", + "Ġbureaucr ats", + "et een", + "Ġaction Group", + "oh m", + "Ġb umps", + "ĠUt ility", + "Ġsubmar ines", + "ren heit", + "re search", + "ĠShap iro", + "Ġsket ches", + "Ġde ceptive", + "ĠV il", + "es ame", + "ĠEss entially", + "Ġramp age", + "isk y", + "Ġmut tered", + "th ritis", + "Ġ23 6", + "f et", + "b ars", + "Ġpup il", + "ĠTh ou", + "o S", + "s ong", + "Ġfract ured", + "Ġre vert", + "pict ure", + "Ġcrit erion", + "us her", + "Ġreperc ussions", + "ĠV intage", + "ĠSuper intendent", + "Offic ers", + "Ġflag ged", + "Ġbl ames", + "Ġin verse", + "ograp hers", + "Ġmakes hift", + "Ġdev oid", + "Ġfoss ils", + "ĠArist otle", + "ĠFund s", + "Ġde pleted", + "ĠFl u", + "ĠY uan", + "Ġw oes", + "Ġlip id", + "Ġsit u", + "requ isites", + "Ġfurn ish", + "ĠSam ar", + "Ġshame ful", + "Ġadverse ly", + "Ġad ept", + "Ġrem orse", + "Ġmurder ous", + "uck les", + "ĠE SL", + "Ġ3 14", + "s ent", + "Ġred ef", + "ĠC ache", + "ĠP urs", + "ig ans", + "Ġ4 60", + "Ġpres criptions", + "Ġf res", + "F uck", + "ocr ates", + "Tw enty", + "ĠWe ird", + "ĠT oggle", + "ĠC alled", + "itiz ens", + "Ġp oultry", + "Ġharvest ing", + "ãĤ¦ ãĤ¹", + "Bott om", + "Ġcaution ed", + "t n", + "39 6", + "ĠNik ki", + "Ġeval uations", + "Ġharass ing", + "Ġbind ings", + "ĠMon etary", + "Ġhit ters", + "Ġadvers ary", + "un ts", + "Ġset back", + "Ġenc rypt", + "ĠC ait", + "Ġl ows", + "eng es", + "ĠN orn", + "Ġbul bs", + "Ġbott led", + "ĠVoy ager", + "3 17", + "Ġsp heres", + "p olitics", + "Ġsubt ract", + "Ġsens ations", + "Ġapp alling", + "Ġ3 16", + "Ġenvironment ally", + "ĠST EM", + "Ġpub lishes", + "5 60", + "Ġdilig ence", + "48 4", + "Ġadv ises", + "Ġpet rol", + "Ġimag ining", + "Ġpatrol s", + "ĠInt eger", + "ĠAs hes", + "act us", + "ĠRad iant", + "ĠL T", + "it ability", + "ht aking", + "Set ting", + "Ġnu anced", + "ĠRe ef", + "ĠDevelop ers", + "N i", + "pie ces", + "99 0", + "Lic ense", + "Ġlow ers", + "ĠOtt oman", + "3 27", + "oo o", + "Ġqu itting", + "mark ets", + "Beh ind", + "Ġbas in", + "Ġdoc s", + "an ie", + "fl ash", + "ct l", + "Ġcivil ized", + "ĠFuk ushima", + "\"] ,\"", + "ĠK S", + "ĠHonest ly", + "ar at", + "Ġconstruct s", + "ĠL ans", + "ĠD ire", + "ĠLI KE", + "ĠTrou ble", + "Ġwith holding", + "ĠOb livion", + "Ġsan ity", + "any a", + "Con st", + "Ġgro cer", + "ĠC elsius", + "Ġrecount ed", + "ĠW ife", + "B order", + "ate red", + "h appy", + "Ġspo iler", + "Ġlog ically", + "H all", + "Ġsucceed ing", + "Ġpoly morph", + "Ġax es", + "ĠShot gun", + "ĠS lim", + "ĠPrin ciples", + "ĠL eth", + "art a", + "Ġsc or", + "Sc reenshot", + "Ġrelax ation", + "#$ #$", + "Ġdeter rent", + "idd y", + "Ġpower less", + "Ġles bians", + "Ġch ords", + "ĠEd ited", + "se lected", + "Ġseparat ists", + "000 2", + "Ġair space", + "Ġturn around", + "Ġc unning", + "P ATH", + "P oly", + "Ġbomb ed", + "Ġt ion", + "x s", + "Ġwith hold", + "Ġw aged", + "ĠLiber ties", + "Fl ag", + "Ġcomfort ing", + "45 4", + "ĠI ris", + "are rs", + "Ġr ag", + "Ġrel ocated", + "ĠGu arant", + "Ġstrateg ically", + "Ġgam ma", + "uber ty", + "ĠLock heed", + "g res", + "Ġgr illed", + "ĠLow e", + "st ats", + "ĠR ocks", + "Ġsens ing", + "Ġrent ing", + "ĠGe ological", + "ا Ø", + "ot rop", + "Ġse w", + "Ġimproper ly", + "48 6", + "Ġâĸ ł", + "Ġstar ving", + "ĠB j", + "Disc ussion", + "3 28", + "ĠCom bo", + "ĠFix es", + "N AT", + "Ġstri ving", + "th ora", + "Ġharvest ed", + "ĠP ing", + "Ġplay ful", + "Ġaven ues", + "Ġoccup ational", + "Ġw akes", + "ĠCou rier", + "Ġdrum mer", + "ĠBrow ser", + "ĠH outh", + "it u", + "Ġapp arel", + "p aste", + "Ġhun ted", + "ĠSecond ly", + "l ain", + "X Y", + "ĠP IN", + "ic ons", + "Ġcock tails", + "Ġs izable", + "Ġhurd les", + "est inal", + "ĠRecre ation", + "Ġe co", + "64 8", + "ĠD ied", + "m int", + "Ġfinger prints", + "Ġdis pose", + "ĠBos nia", + "ts y", + "22 00", + "Ġins pected", + "ĠF ou", + "Ġf uss", + "Ġamb ush", + "ĠR ak", + "Ġmanif ested", + "Pro secut", + "Ġsuff ice", + "ren ces", + "Ġcompens ated", + "ĠC yrus", + "Ġgen us", + "ĠWolver ine", + "ĠTrend s", + "Ġh ikes", + "ĠSe en", + "Ġen rol", + "C old", + "Ġpol itely", + "ĠSl av", + "ĠRu pert", + "Ġey ewitness", + "ĠAl to", + "Ġun comp", + "Ġposter ior", + "M ust", + "ĠHer z", + "Ġprogress ively", + "Ġ23 4", + "Ġind ifference", + "ĠCunning ham", + "Ġacadem ia", + "Ġse wer", + "Ġast ounding", + "ĠA ES", + "r ather", + "Ġeld est", + "Ġclim bs", + "ĠAdd s", + "Ġout cry", + "Ġcont ag", + "ĠH ouses", + "Ġpe pt", + "ĠMel ania", + "interest ed", + "ĠU CH", + "ĠR oots", + "ĠHub bard", + "ĠT BD", + "ĠRoman ian", + "fil ename", + "St one", + "ĠIm pl", + "Ġchromos ome", + "C le", + "d x", + "Ġscram bled", + "ĠP t", + "Ġ24 2", + "OP LE", + "Ġtremend ously", + "St reet", + "Ġcra ving", + "Ġbund led", + "ĠR G", + "p ipe", + "Ġinj uring", + "Ġarc ane", + "Part icip", + "ĠHero ic", + "st y", + "Ġto pping", + "ĠTemp est", + "rent ices", + "b h", + "Ġpar anoia", + "ĠUnic ode", + "Ġegreg ious", + "Ġ\\ '", + "ĠOsw ald", + "Ġgra vel", + "ĠSim psons", + "Ġbl and", + "ĠGuant anamo", + "Writ er", + "lin ers", + "ĠD ice", + "J C", + "Ġpar ity", + "Ġs ided", + "Ġ23 7", + "ĠPyr rha", + "at ters", + "d k", + "F ine", + "comp an", + "Ġform ulated", + "ĠId ol", + "il ers", + "hem oth", + "ĠF av", + "Ġintr usion", + "Ġcar rots", + "ĠL ayer", + "ĠH acker", + "Ġ ----------------", + "Ġmoder ation", + "é ģ", + "oc oc", + "Ġcharacter ize", + "ĠTe resa", + "Ġsocio economic", + "Ġper k", + "ĠParticip ation", + "tr aining", + "ĠPaul o", + "ph ys", + "Ġtrust worthy", + "Ġembod ied", + "ĠMer ch", + "c urrency", + "ĠPrior ity", + "Ġte asing", + "Ġabsor bing", + "Ġunf inished", + "ĠCompar ison", + "Ġdis ple", + "writ ers", + "Ġprofess ions", + "ĠPengu in", + "Ġang rily", + "ĠL INK", + "68 8", + "ĠCor respond", + "Ġprev ailed", + "Ġcart el", + "l p", + "as ms", + "ĠRed emption", + "ĠIslam ists", + "effect s", + "d ose", + "ĠL atter", + "ĠHal ifax", + "Ġv as", + "ĠTop ics", + "ĠN amed", + "advert ising", + "zz a", + "IC ES", + "Ġret arded", + "ach able", + "ĠPupp et", + "ĠItem Level", + "Ġret ract", + "Ġident ifiable", + "A aron", + "ĠB uster", + "s ol", + "hel le", + "as semb", + "H ope", + "r anged", + "B a", + "ĠP urch", + "é Ģ", + "ĠSir i", + "Ġarri vals", + "Ġ19 12", + "Ġshort ened", + "Ġ3 12", + "Ġdiscrep ancy", + "ĠTem perature", + "ĠWal ton", + "Ġkind erg", + "p olit", + "Ġrem ix", + "Ġconnect ors", + "ãĥĺ ãĥ©", + "ĠKazakh stan", + "dom inated", + "Ġsu gars", + "im ble", + "ĠPan ic", + "ĠDem and", + "ĠCol ony", + "on en", + "ĠM ER", + "7 75", + "ur ia", + "aza ar", + "ĠDeg ree", + "P ri", + "Ġsun shine", + "Ġ25 1", + "Ġpsychedel ic", + "Ġdigit ally", + "ĠBra un", + "Ġsh immer", + "Ġsh ave", + "ĠTel esc", + "ĠAst ral", + "ĠVenezuel an", + "ĠO G", + "Ġc rawling", + "Int eg", + "ĠFe ather", + "Ġunfold ing", + "Ġappropri ation", + "Ġè£ı è", + "ĠMob ility", + "ĠN ey", + "- .", + "b ilt", + "L IN", + "ĠT ube", + "ĠCon versely", + "Ġkey boards", + "ĠC ao", + "Ġover th", + "Ġla ure", + ">> \\", + "ĠV iper", + "ach a", + "Off set", + "ĠR aleigh", + "ĠJ ae", + "J ordan", + "j p", + "Ġtotal itarian", + "Connect or", + "Ġobserv es", + "ĠSpart an", + "ĠIm mediately", + "ĠSc al", + "C ool", + "Ġt aps", + "Ġro ar", + "P ast", + "Ġch ars", + "ĠB ender", + "ĠShe ldon", + "Ġpain ter", + "Ġbe acon", + "ĠCreat ures", + "Ġdownt urn", + "Ġh inder", + "ĠAnd romeda", + "à Ľ", + "cc oli", + "ĠF itness", + "et rical", + "Ġutil izes", + "Ġsen ate", + "Ġen semble", + "Ġche ers", + "T W", + "Ġaff luent", + "k il", + "ry lic", + "ord ering", + "Com puter", + "Ġgru esome", + "ost ics", + "ĠUb isoft", + "ĠKel ley", + "Ġw rench", + "Ġbourgeois ie", + "IB LE", + "ĠPrest on", + "w orn", + "ar ist", + "reat ing", + "Ġst ained", + "ar ine", + "Ġsl ime", + "EN N", + "Ġche sts", + "Ġground water", + "ann ot", + "ĠTr ay", + "ĠLoc ke", + "ĠC TR", + "Ġd udes", + "ĠEx ternal", + "ĠDec oder", + "Ġpar amed", + "ĠMed line", + "80 9", + "ĠD inner", + "rup al", + "g z", + "ĠG um", + "ĠDem o", + "j ee", + "Ġd h", + "ber man", + "arch s", + "Ġen qu", + "ĠEp stein", + "Ġdevast ation", + "Ġfriends hips", + "ĠAr d", + "Ġ23 1", + "ĠRub in", + "ĠDist ance", + "Ġsp urred", + "Ġd ossier", + "Ġover looking", + "\\\\\\\\\\\\\\\\ \\\\\\\\\\\\\\\\", + "Fore st", + "ĠCom es", + "\\ \",", + "ĠIran ians", + "Ġf ixtures", + "L aughs", + "Ġcur ry", + "ĠKing ston", + "Ġsqu ash", + "Ġcat alogue", + "Ġabnormal ities", + "Ġdigest ive", + ".... .....", + "Ġsubord inate", + "og ly", + "Ġ24 9", + "M iddle", + "Ġmass ac", + "Ġburg ers", + "Ġdown stairs", + "Ġ19 31", + "39 4", + "ĠV G", + "Ġl asers", + "ĠS ikh", + "ĠAlex a", + "der ived", + "Ġcycl ist", + "ãģ® éŃĶ", + "onel iness", + "!!!! !!!!", + "Ġbuff s", + "leg ate", + "Ġrap ing", + "Ġrecomm ending", + "ro red", + "Ġmult icultural", + "un ique", + "Ġbusiness men", + "Ġune asy", + "ĠM AP", + "Ġdisp ersed", + "cipl ine", + "J ess", + "ĠK erala", + "å §", + "Ġabst raction", + "Sur v", + "U h", + "Ġprin ters", + "ij a", + "ow der", + "Ġanalog ous", + "ĠA SP", + "af er", + "Ġunfold ed", + "Ġlevel ing", + "Ġbre ached", + "ĠH earing", + "Ġn at", + "Ġtransl ating", + "crit ical", + "Ġant agonist", + "ĠYes terday", + "Ġfuzz y", + "w ash", + "m ere", + "Ġbe wild", + "ĠM ae", + "V irgin", + "ph rase", + "Ġsign aled", + "ĠH IGH", + "Ġprot ester", + "Ġgar ner", + "unk nown", + "Ġk ay", + "Ġabduct ed", + "Ġst alking", + "am n", + "Ġdes erving", + "ĠR iv", + "ĠJ orge", + "Ġscratch ing", + "ĠS aving", + "ip ing", + "Ġte ase", + "Ġmission ary", + "ĠMor row", + "T IME", + "P resent", + "Ġchem otherapy", + "tern ess", + "ĠH omes", + "ĠP urdue", + "Ġst aunch", + "ĠWhit ney", + "ĠTH ERE", + "Î ¼", + "iat us", + "ĠErn est", + "ĠDe ploy", + "Ġcove ted", + "F ML", + "ĠDial ogue", + "Ġex ited", + "f ruit", + "Ġner d", + "\":\" \",\"", + "Ġv ivo", + "ru ly", + "4 60", + "ĠAm en", + "rehens ible", + "Ġâ ĺ", + "D IR", + "Ġad herence", + "Ġche w", + "ĠCo ke", + "ĠSerge i", + "dig ital", + "ĠNe ck", + "g ently", + "enth al", + "/ )", + "Ġwe ary", + "Ġgu ise", + "ĠConc ord", + "ĠOn ion", + "at cher", + "Ġb inge", + "ĠDirect ive", + "Ġman ned", + "ans k", + "Ġill usions", + "Ġbillion aires", + "38 3", + "oly n", + "odynam ic", + "ĠWhe at", + "ĠA lic", + "Ġcol oured", + "ĠN AFTA", + "ab o", + "Ġmac ros", + "ind ependent", + "s weet", + "Ġsp ac", + "ĠK abul", + "Ġ Ä", + "em e", + "Ġdict ated", + "Ġsh outs", + "= {", + "Ġr ipping", + "ĠSh ay", + "ĠCr icket", + "direct ed", + "Ġanalys ed", + "ĠWAR RANT", + "ag ons", + "ĠBlaz ers", + "Ġche ered", + "Ġar ithmetic", + "ĠTan z", + "37 3", + "ĠFl ags", + "Ġ29 5", + "Ġw itches", + "ĠIn cluded", + "ĠG ained", + "ĠBl ades", + "G am", + "ĠSam antha", + "ĠAtl antis", + "ĠPr att", + "Ġspo iled", + "ĠI B", + "ĠRam irez", + "Pro bably", + "re ro", + "ĠN g", + "ĠWar lock", + "t p", + "Ġover he", + "Ġadministr ations", + "Ġt int", + "Ġreg iment", + "Ġpist ols", + "Ġblank ets", + "Ġep ist", + "Ġbowl s", + "Ġhydra ulic", + "Ġde an", + "Ġj ung", + "Ġasc end", + "70 5", + "ĠSant iago", + "à ®", + "Ġun avoid", + "ĠSh aman", + "re b", + "Ġstem ming", + "99 8", + "ĠM G", + "st icks", + "esthes ia", + "ER O", + "Ġmor bid", + "ĠGr ill", + "ĠP oe", + "any l", + "Ġdele ting", + "ĠSurve illance", + "Ġdirect ives", + "Ġiter ations", + "ĠR ox", + "ĠMil ky", + "F ather", + "Ġpat ented", + "44 7", + "Ġprec ursor", + "Ġm aiden", + "ĠP hen", + "ĠVe gan", + "ĠPat ent", + "K elly", + "Redd itor", + "Ġn ods", + "Ġvent ilation", + "ĠSchwar z", + "Ġw izards", + "Ġomin ous", + "ĠHe ads", + "ĠB G", + "Ġl umber", + "ĠSp iel", + "Ġis Enabled", + "Ġancest ral", + "ĠSh ips", + "Ġwrest ler", + "ph i", + "Ġy uan", + "ĠRebell ion", + "Ġice berg", + "Ġmag ically", + "Ġdivers ion", + "ar ro", + "yth m", + "ĠR iders", + "ĠRob bie", + "ĠK ara", + "ĠMain tenance", + "ĠHer b", + "Ġhar ms", + "p acked", + "ĠFe instein", + "Ġmarry ing", + "Ġbl ending", + "ĠR ates", + "Ġ18 80", + "Ġwr ink", + "ĠUn ch", + "ĠTor ch", + "desc ribed", + "Ġhuman oid", + "ilit ating", + "ĠCon v", + "ĠFe ld", + "IGH TS", + "Ġwhistlebl ower", + "ort mund", + "ets y", + "arre tt", + "ĠMon o", + "ĠI ke", + "ĠC NBC", + "ĠW AY", + "ĠMD MA", + "ĠIndividual s", + "Ġsupplement al", + "Ġpower house", + "ĠSt ru", + "F ocus", + "aph ael", + "ĠCol leg", + "att i", + "Z A", + "Ġp erenn", + "ĠSign ature", + "ĠRod ney", + "Ġcub es", + "idd led", + "ĠD ante", + "ĠIN V", + "iling ual", + "ĠC th", + "Ġso fa", + "Ġintimid ate", + "ĠR oe", + "ĠDi plom", + "ĠCount ries", + "ays on", + "Ġextrad ition", + "Ġdis abling", + "ĠCard iff", + "Ġmemor andum", + "ĠTr ace", + "Ġ?? ?", + "se ctor", + "ĠRou hani", + "ĠY ates", + "ĠFree ze", + "Ġbl adder", + "M otor", + "ĠProm ise", + "ant asy", + "Ġforesee able", + "ĠC ologne", + "cont ainer", + "ĠTre es", + "ĠG ors", + "ĠSin clair", + "Ġbar ring", + "key e", + "Ġsl ashed", + "ĠStat istical", + "é ĩ", + "Ġâĸ º", + "All ows", + "Ġhum ility", + "Ġdr illed", + "ĠF urn", + "44 3", + "Ġse wage", + "Ġhome page", + "Ġcour tyard", + "Ġv ile", + "Ġsubsid iaries", + "aj o", + "direct ory", + "Ġam mon", + "V ers", + "charg es", + "Ġ} }", + "ĠCh ains", + "Ġ24 6", + "n ob", + "Ġper cept", + "Ġg rit", + "Ġfisher men", + "ĠIraq is", + "ĠDIS TR", + "ĠF ULL", + "ĠEval uation", + "g raph", + "at ial", + "Ġcooper ating", + "Ġmel an", + "Ġenlight ened", + "Ġal i", + "t ailed", + "Ġsal ute", + "Ġweak est", + "ĠBull dogs", + "U A", + "ĠAll oy", + "Ġsem en", + "oc ene", + "ĠWilliam son", + "s pr", + ", âĢĶ", + "ĠG F", + "itt ens", + "Be at", + "ĠJ unk", + "iph ate", + "ĠFarm ers", + "ĠBit coins", + "ig ers", + "d h", + "ĠL oyal", + "p ayer", + "Ġentert ained", + "Ġpenn ed", + "Ġcoup on", + "Que ue", + "Ġweaken ing", + "c arry", + "Ġunderest imate", + "Ġshoot out", + "Ġcharism atic", + "ĠProced ure", + "Ġprud ent", + "in ances", + "Ġric hes", + "Ġcort ical", + "Ġstr ides", + "Ġd rib", + "ĠOil ers", + "5 40", + "ĠPer form", + "ĠBang kok", + "Ġe uth", + "S ER", + "Ġsimpl istic", + "t ops", + "camp aign", + "Q uality", + "Ġimpover ished", + "ĠEisen hower", + "Ġaug ment", + "ĠH arden", + "Ġinterven ed", + "Ġlist ens", + "ĠK ok", + "Ġs age", + "Ġrub bish", + "ĠD ed", + "Ġm ull", + "pe lling", + "Ġvide ot", + "Produ ction", + "D J", + "m iah", + "Ġadapt ations", + "Ġmed ically", + "Ġboard ed", + "Ġarrog ance", + "Ġscra pped", + "Ġopp ress", + "FORM ATION", + "Ġj unction", + "4 15", + "EE EE", + "S kill", + "Ġsub du", + "ĠSug gest", + "ĠP ett", + "Ġle tt", + "ĠMan ip", + "ĠC af", + "ĠCooper ation", + "T her", + "Ġreg ained", + "¶ æ", + "ref lect", + "Ġth ugs", + "ĠShel by", + "Ġdict ates", + "ĠWe iner", + "ĠH ale", + "Ġbatt leground", + "s child", + "Ġcond ol", + "h unt", + "osit ories", + "Ġacc uses", + "Fil ename", + "Ġsh ri", + "Ġmotiv ate", + "Ġreflect ions", + "N ull", + "ĠL obby", + "¥ µ", + "ĠS ATA", + "ĠBack up", + "Ñ ĥ", + "n in", + "ĠCor rection", + "Ġju icy", + "ut ra", + "ĠP ric", + "Ġrest raining", + "ĠAir bnb", + "ĠAr rest", + "Ġappropri ations", + "Ġsl opes", + "Ġmans laughter", + "Ġwork ings", + "ĠH uss", + "ĠF rey", + "Le ave", + "ĠHarm ony", + "ĠF eder", + "Ġ4 30", + "Ġt rench", + "Ġglad ly", + "Ġbull pen", + "ĠG au", + "b ones", + "Ġgro ove", + "Ġpre text", + "ã ħĭ", + "Ġtransm itter", + "ĠComp onent", + "Ġunder age", + "ĠEm pires", + "T ile", + "Ġo y", + "ĠMar vin", + "ĠC AS", + "Ġbl oss", + "Ġrepl icated", + "ĠMar iners", + "Marc us", + "ĠBl ocks", + "Ġliber ated", + "Ġbutter fly", + "Fe el", + "Ġfer mentation", + "Ġyou tube", + "Ġoff end", + "ĠTer m", + "res ist", + "Ġcess ation", + "Ġinsurg ency", + "Ġb ir", + "ĠRa ise", + "59 5", + "Ġhypothes es", + "50 2", + "Ġpl aque", + "ocr at", + "Ġjack ets", + "ĠHuff Post", + "am ong", + "Ġconf er", + "48 7", + "ĠL illy", + "Ġadapt ing", + "ĠF ay", + "Ġsh oved", + "ve c", + "Ġref ine", + "Ġg on", + "Ġgun men", + "z ai", + "ĠShut tle", + "ĠI zan", + "Ġ19 13", + "Ġple thora", + "· ·", + "Ġ5 10", + "Ġp uberty", + "Ġ24 1", + "ĠWe alth", + "ĠAl ma", + "ĠM EM", + "ĠAd ults", + "C as", + "pr ison", + "R ace", + "Ġwater proof", + "Ġathlet icism", + "Ġcapital ize", + "ĠJu ice", + "Ġillum inated", + "ĠP ascal", + "Ġirrit ation", + "ĠWitness es", + "ad le", + "ĠAst ro", + "Ġf ax", + "ĠEl vis", + "Prim ary", + "ĠL ich", + "ĠEl ves", + "Ġres iding", + "Ġst umble", + "3 19", + "ĠP KK", + "Ġadvers aries", + "D OS", + "ĠR itual", + "Ġsm ear", + "Ġar son", + "ident al", + "Ġsc ant", + "Ġmon archy", + "Ġhal ftime", + "Ġresid ue", + "Ġind ign", + "ĠSh aun", + "ĠEl m", + "aur i", + "A ff", + "W ATCH", + "ĠLy on", + "hel ps", + "36 1", + "Ġlobby ist", + "Ġdimin ishing", + "Ġout breaks", + "Ġgo ats", + "f avorite", + "ĠN ah", + "son ian", + "ĠBo oster", + "Ġsand box", + "ĠF are", + "ĠMalt a", + "Ġatt Rot", + "ĠM OR", + "ld e", + "Ġnavig ating", + "T ouch", + "Ġunt rue", + "ĠDis aster", + "Ġl udicrous", + "Pass word", + "ĠJ FK", + "blog spot", + "4 16", + "ĠUN DER", + "ern al", + "Ġdelay ing", + "T OP", + "Ġimpl ants", + "ĠAV G", + "ĠH uge", + "att r", + "Ġjournal istic", + "ĠPe yton", + "ĠI A", + "R ap", + "go al", + "ĠProgram me", + "Ġsm ashing", + "w ives", + "print ln", + "ĠPl ague", + "in us", + "EE P", + "Ġcru iser", + "ĠPar ish", + "umin ium", + "Ġoccup ants", + "ĠJ ihad", + "m op", + "Ġp int", + "Ġhe ct", + "ĠMe cca", + "direct or", + "ĠFund ing", + "ĠM ixed", + "Ġst ag", + "T ier", + "Ġg ust", + "Ġbright ly", + "ors i", + "Ġup hill", + "R D", + "Ġles ions", + "ĠBund y", + "liv ious", + "Ġbi ologist", + "ĠFac ulty", + "ĠAuthor ization", + "Ġ24 4", + "All ow", + "ï ¸", + "ĠGi ul", + "Ġpert inent", + "ot aur", + "es se", + "ĠRo of", + "Ġunman ned", + "35 1", + "ĠSh ak", + "ĠO rient", + "Ġend anger", + "D ir", + "Ġrepl en", + "ed ient", + "Ġtail or", + "Ġgad gets", + "Ġaud ible", + "âĺ Ĩ", + "N ice", + "Ġbomb ard", + "ĠR ape", + "Ġdef iance", + "ĠTW O", + "ĠFilip ino", + "Ġunaff ected", + "erv atives", + "Ġso ared", + "ĠBol ton", + "Ġcomprom ising", + "ĠBrew ers", + "R AL", + "ĠA HL", + "icy cle", + "Ġv ampires", + "Ġdi pped", + "oy er", + "ĠX III", + "Ġsidew ays", + "ĠW aste", + "ĠD iss", + "ĠâĶľ âĶĢâĶĢ", + "$ .", + "Ġhabit ats", + "ĠBe ef", + "tr uth", + "tr ained", + "spl it", + "R us", + "And y", + "ĠB ram", + "RE P", + "p id", + "è£ ħ", + "ĠMut ant", + "An im", + "ĠMar ina", + "Ġfut ile", + "hig hest", + "f requency", + "Ġepile psy", + "Ġcop ing", + "Ġconc ise", + "Ġtr acing", + "ĠS UN", + "pan el", + "ĠSoph ie", + "ĠCrow ley", + "ĠAd olf", + "ĠShoot er", + "Ġsh aky", + "ĠI G", + "ĠL ies", + "ĠBar ber", + "p kg", + "Ġupt ake", + "Ġpred atory", + "UL TS", + "/ **", + "Ġintox icated", + "ĠWest brook", + "od der", + "he ment", + "Ġbas eman", + "AP D", + "st orage", + "ĠFif ty", + "ed itor", + "G EN", + "UT ION", + "ir ting", + "Ġse wing", + "r ift", + "Ġag ony", + "ĠS ands", + "Ġ25 4", + "C ash", + "Ġl odge", + "Ġp unt", + "N atural", + "ĠIde as", + "Ġerrone ous", + "ĠSens or", + "ĠHann ity", + "Ġ19 21", + "Ġm ould", + "ĠG on", + "kay a", + "Ġanonym ously", + "ĠK EY", + "Ġsim ulator", + "W inter", + "Ġstream ed", + "50 7", + "? \",", + "Ġte ased", + "Ġco efficient", + "Ġwart ime", + "ĠTH R", + "' '.", + "ĠBank ing", + "mp ire", + "Ġf andom", + "Ġl ia", + "G a", + "Ġdown hill", + "Ġinterpre ting", + "Ind ividual", + "N orm", + "Ġjealous y", + "bit coin", + "Ġple asures", + "ĠToy s", + "ĠChev rolet", + "ĠAd visor", + "IZ E", + "Ġrecept ions", + "70 6", + "C ro", + "Ġ26 2", + "Ġcit rus", + "ir u", + "Review er", + "ject ed", + "U ES", + "an z", + "19 81", + "ĠWork er", + "Ġcompl ied", + "ores cent", + "contin ental", + "T on", + "ĠPr ism", + "ĠShe ep", + "Ġ28 8", + "n ox", + "ĠV og", + "O rd", + "Ġreal ms", + "te k", + "Ġirrig ation", + "Ġbicy cles", + "Ġelectron ically", + "p oly", + "t all", + "() );", + "Ġaest hetics", + "ĠInteg rated", + "Expl ore", + "Ġd unk", + "47 6", + "p ain", + "ĠJac ques", + "ĠD mit", + "Fram es", + "Ġreun ited", + "Ġhum id", + "D ro", + "P olitical", + "Ġyouth ful", + "Ġent ails", + "Ġmosqu ito", + "36 3", + "spe cies", + "Ġcoord inating", + "ĠMay hem", + "ĠMagn us", + "M ount", + "Impro ved", + "ĠST ATE", + "ATT LE", + "Ġflow ed", + "Ġtack led", + "Ġfashion ed", + "Ġre organ", + "iv ari", + "f inger", + "Ġreluct antly", + "et ting", + "ĠV and", + "you ng", + "ĠGar land", + "Ġpresum ption", + "Ġamen ities", + "ĠPle asant", + "on ential", + "ĠO xy", + "Ġmor als", + "ĠY ah", + "Read y", + "Sim on", + "En h", + "D emon", + "Ġcl ich", + "Mon itor", + "ĠD U", + "Ġwel comes", + "Ġstand out", + "Ġdread ful", + "Ġban anas", + "Ġball oons", + "h ooting", + "bas ic", + "Ġsuff ix", + "Ġd uly", + "can o", + "Ch ain", + "at os", + "Ġgeop olitical", + "Ġ( &", + "ĠGem ini", + "ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ ÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤÃĥÃĤ", + "Ġacqu itted", + "L uck", + "prot ect", + "10 24", + "Ġsc arcity", + "Ġmind fulness", + "ec ided", + "D N", + "pr ime", + "ĠPres idents", + "ĠVID EO", + "Ġ( âĪĴ", + "add ock", + "N OR", + "ĠP ru", + "p un", + "ĠL OL", + ")) ))", + "ĠL iqu", + "ĠS AS", + "Ġsty ling", + "Ġpunish ments", + "Ġnum b", + "Ġasc ertain", + "ĠRock ies", + "f lu", + "Th umbnail", + "Ġperpet rated", + "ĠSem i", + "Ġdis arm", + "ĠOld er", + "ĠEx ception", + "Ġexponent ially", + "ĠCommun ities", + "Ġabol ish", + "ĠPart ner", + "pt oms", + "Ġ7 77", + "ĠFo ley", + "ĠC ases", + "Ġgre ase", + "ĠReb irth", + "G round", + "Ġ; )", + "ĠDoct rine", + "ik ini", + "Y e", + "ĠBl ossom", + "Ġpers ists", + "b ill", + "Ġinf usion", + "Ġbud dies", + "9 11", + "ĠPat ient", + "Ġdem os", + "Ġacquaint ance", + "ĠP aw", + "at ari", + "Ġx ml", + "Ġfasc ination", + "ĠSer ve", + "Ï Ĥ", + "br anded", + "Ġa z", + "Return s", + "Ġover shadow", + "Ġro am", + "Ġspeed y", + "n umbered", + "hel ial", + "Ġdisc iple", + "Ġass urances", + "g iven", + "pect ing", + "ĠN atalie", + "çĶ °", + "Ġmosquit oes", + "rote in", + "Ġnumer ic", + "Ġindepend ents", + "Ġtrans itional", + "Ġreaction ary", + "ĠMech dragon", + "do ctor", + "Ġshort est", + "Ġsequ ential", + "ĠB ac", + "ĠAccount s", + "ãģ Į", + "ach y", + "ract ive", + "ĠReg iment", + "Ġbreat htaking", + "ffic iency", + "ĠB ates", + "Ġ3 11", + "Ġward robe", + "ft s", + "ĠBer k", + "Sim ply", + "ĠRivers ide", + "iver ing", + "ident ial", + "lu cent", + "Ġen riched", + "ĠCon ver", + "ĠG iving", + "ãĥ Ļ", + "Ġlegal ize", + "ĠF TC", + "Ġfre aking", + "M ix", + "Ġter restrial", + "es ian", + "ci ents", + "W ing", + "LO AD", + "Ġled ge", + "ĠViol ent", + "ĠMet all", + "Ġ30 8", + "Ġs outheastern", + "hett o", + "M eat", + "Ġslow down", + "Ġret reated", + "Jere my", + "end as", + "**** *", + "er ic", + "Ġre ins", + "opp able", + "ĠHuman ity", + "ear ances", + "rig an", + "C amera", + "Ġwa ivers", + "s oc", + "Ġalter ation", + "trans form", + "ĠC emetery", + "50 6", + "Ġindef inite", + "Ġstim ulating", + "y g", + "60 3", + "ĠS op", + "Ġdescript ive", + "Ph ase", + "ĠEd mund", + "Ġpneum onia", + "vent us", + "A mb", + "Ġlabor atories", + "ĠEx clusive", + "ug ar", + "W ere", + "Ġmalf unction", + "Ġhomosexual s", + "Ġ---- ---", + "un i", + "Ġturb ines", + "ĠEqu ity", + "D u", + "Ġmind ed", + "ĠR H", + "ĠBlack hawks", + "Ġfe ats", + "Ġ17 00", + "re pl", + "36 2", + "lad en", + "Ġindisp ensable", + "ly ss", + "tt i", + "Ġre el", + "Ġdiver ted", + "Ġlik eness", + "Ġsubscript ions", + "Ġfing ert", + "Ġfil thy", + "dest ruct", + "d raft", + "ĠBernard ino", + "l aunch", + "Ġper plex", + "ĠS UM", + "car b", + "Ġswe ater", + "ĠVent ure", + "ĠJ ag", + "ĠCele b", + "ĠV oters", + "Ġstead fast", + "Ġathlet ics", + "ĠHans on", + "ĠDr ac", + "Tr acker", + "Ġcomm end", + "ĠPres idency", + "ĠD ID", + "in formed", + "Ġweb page", + "P retty", + "Ġforce fully", + "ãĥĥ ãĤ¯", + "Ġrel ocation", + "Ġsat ire", + "â ī", + "ĠSunder land", + "æ Ħ", + "V oice", + "???? ????", + "Ġinform ant", + "Ġbow el", + "ĠUn iform", + "Ġ ...\"", + "Ġpur ge", + "Ġpic nic", + "ĠU mb", + "ĠU PDATE", + "ĠSapp hire", + "ĠSt all", + "le arn", + "Ġobject ively", + "Ġob liter", + "Ġlooph ole", + "Ġjour neys", + "Ġo mission", + "Pro s", + "ĠSid ney", + "pl oma", + "Ġspray ed", + "Ġg uru", + "Ġtra itor", + "Ġtim et", + "Ġsn apping", + "ĠSe vent", + "urn al", + "ĠUk ip", + "Ġb owed", + "por al", + "l iberal", + "R os", + "Quest ions", + "i OS", + "Ġsummar ize", + "ST AT", + "Ġ18 50", + "ap est", + "Ġl ender", + "ĠVari able", + "br inging", + "ĠL ORD", + ", )", + "Ġcollaps es", + "x iety", + "ĠN ed", + "Y D", + "ĠSch a", + "Ġantib ody", + "Ġdis band", + "y re", + "ill usion", + "Ġro ver", + "s hed", + "ĠHiro sh", + "cc i", + "Ġcal am", + "ĠMort on", + "P interest", + "Ġ19 28", + "ĠE uras", + "ord es", + "Ġf ences", + "ĠIn ventory", + "ĠVal encia", + "ĠU d", + "ĠT iff", + "Ġsqu e", + "Ġqu otation", + "Ġtroubles ome", + "er ker", + "QU EST", + "ĠKing doms", + "s outh", + "Ġle vy", + "Pr ince", + "ĠSt ing", + "Ġnick named", + "Ġapp e", + "Ġphot ographic", + "Ġcorp us", + "re ference", + "ĠT rog", + "U nt", + ") =(", + "ĠLat via", + "Ġactiv ating", + "Ġlicense e", + "Ġdispar ities", + "ĠNews letter", + "ãĥĥ ãĥĪ", + "Ġfree ing", + "ĠJe ep", + "ĠPer ception", + "ins k", + "Ġsil icone", + "ĠHay den", + "Le an", + "ĠSuz uki", + "ibr arian", + "66 8", + "Ġsp or", + "Ġcorrel ations", + "ag hetti", + "Ġtu ber", + "ĠIP CC", + "il us", + "ĠV u", + "Ġwealth iest", + "ĠCarb uncle", + "an za", + "Ġfool ed", + "ĠZ ur", + "Ġd addy", + "ran o", + "il ian", + "Ġknock out", + "f man", + "requ ired", + "ĠWik ileaks", + "ĠD uffy", + "ON T", + "Ġins ol", + "ĠObject s", + "Ġb ou", + "ĠNord ic", + "ĠIns ert", + "sc an", + "Ġd ancers", + "Ġid iots", + "major ity", + "ĠNev ille", + "ĠFree BSD", + "Ġt art", + "pan ic", + "69 0", + "Ġcoc oa", + "Ġsam pled", + "Ġlook up", + "Ind ust", + "Ġinject ions", + "gen re", + "Ġa u", + "Ġroad way", + "Ġgen itals", + "K ind", + "ĠEx aminer", + "ĠY az", + "F resh", + "Ġpar alysis", + "ĠAl uminum", + "Ġre ap", + "ok é", + "Ġsl oppy", + "ĠTun nel", + "pos ium", + "ner y", + "en ic", + "Ġher bal", + "ĠOut er", + "ĠBuild er", + "Ġinc ur", + "Ġide ologies", + "Ġback ups", + "cons uming", + "ĠDet ect", + "de ck", + "ĠKN OW", + "ĠG ret", + "ĠM IC", + "Ġtough ness", + "ĠEx hibit", + "Ġh ive", + "L es", + "ĠSCH OOL", + "ĠAt ari", + "ald e", + "ĠN ull", + "and estine", + "m ouse", + "Ġbrig ade", + "48 9", + "Ġrev ol", + "ĠLaw son", + "ĠW ah", + "op oly", + "eb ted", + "ĠS aunders", + "Ġ3 13", + "ĠW inc", + "Ġtab oo", + "ĠHel met", + "Ġw edge", + "ch ip", + "ĠT ina", + "b g", + "Ġinf uri", + "r n", + "Ġanomal ies", + "ĠSy nc", + "ĠEx am", + "ĠComm it", + "ĠDi ary", + "ĠALS O", + "ĠDe bor", + "omed ical", + "Ġcomprehens ion", + "6 55", + "Ġempower ing", + "Ġ ire", + "Ġju ices", + "ĠE TH", + "ĠBox ing", + "=\" /", + "Ġfacilit ated", + "p oke", + "ĠPars ons", + "ĠMod er", + "tra vel", + "Ġcivil izations", + "Ġliber tarians", + "Ġrun e", + "ĠCl arks", + "at hed", + "Ġcampaign ers", + "ĠDis patch", + "ĠFah renheit", + "ĠCap com", + "-------- --", + "Ġl ace", + "Ġdr aining", + "Ġl iner", + "ĠArt ificial", + "é n", + "t ask", + "] ).", + "ĠGM O", + "ĠOper ator", + "ord inary", + "ĠInf luence", + "ĠU ps", + "Ġpot ency", + "uss en", + "osp ons", + "ĠSw im", + "ĠDead line", + "Un ity", + "Ġcul inary", + "Ġenlight enment", + "Ġwe arer", + "Ġmin ed", + "Ġp ly", + "Ġinc est", + "ĠDVD s", + "W alk", + "B TC", + "Tr ade", + "Ġdev al", + "ib and", + "ĠOvers ight", + "Palest inian", + "Ġd art", + "Ġm ul", + "L R", + "Ġrem ovable", + "ĠReal ms", + "ì Ŀ", + "Ġmisc ar", + "ĠV ulkan", + "68 5", + "è re", + "ĠS ap", + "Ġmer ging", + "ĠCar ly", + "che ster", + "Ġbr isk", + "Ġlux urious", + "ĠGener ator", + "Ġbit terness", + "Ġed ible", + "Ġ24 3", + "T G", + "Ġrect angle", + "With No", + "bel ow", + "J enn", + "Ġdark est", + "Ġh itch", + "Ġdos age", + "Ġsc aven", + "ĠK eller", + "ĠIllust rated", + "Certain ly", + "ĠMaver icks", + "Marg inal", + "Ġdiarr hea", + "Ġenorm ously", + "Ġ9 99", + "sh r", + "qu art", + "Ġadam ant", + "ĠM ew", + "Ġren ovation", + "Ġcerv ical", + "ĠPercent age", + "en ers", + "ĠKim ber", + "Ġflo ats", + "Ġde x", + "ĠW itcher", + "ĠSwan sea", + "d m", + "Ġsal ty", + "y ellow", + "Ġca pe", + "ĠDr ain", + "ĠPaul a", + "ĠTol edo", + "les i", + "Mag azine", + "ĠW ick", + "ĠM n", + "ĠA ck", + "ĠR iding", + "AS ON", + "Ġhom ophobic", + "AR P", + "Ġwand ered", + "C PU", + "ood oo", + "ĠP ipe", + "Ġtight ening", + "ĠBut t", + "3 18", + "Ġdesert ed", + "S ession", + "Ġfacilit ating", + "J ump", + "Ġemer gencies", + "OW ER", + "Ġexhaust ive", + "ĠAF TER", + "Ġheart beat", + "ĠLab el", + "ack y", + "ĠCert ified", + "ilt ration", + "Z e", + "ĠU tt", + "Ġ13 00", + "Ġpres ume", + "ĠDis p", + "Ġsur ged", + "Ġdoll s", + "Col umb", + "Ġchim pan", + "ĠR azor", + "Ġt icks", + "Ġcouncill or", + "Ġpilgr image", + "ĠReb els", + "ĠQ C", + "ĠA uction", + "x ia", + "ik k", + "b red", + "Ġinsert ion", + "Ġco arse", + "d B", + "SE E", + "ĠZ ap", + "ĠF oo", + "Ġcontem por", + "ĠQuarter ly", + "ot ions", + "ĠAl chemist", + "ĠT rey", + "ĠDu o", + "S weet", + "80 4", + "ĠGi ov", + "Ġfun n", + "N in", + "h off", + "Ġram ifications", + "Ġ19 22", + "ĠExper ts", + "az es", + "Ġgar ments", + "ar ial", + "ĠN ab", + "Ġ25 7", + "ĠV ed", + "Ġhum orous", + "ĠPom pe", + "Ġn ylon", + "Ġlur king", + "ĠSerge y", + "ĠMatt is", + "Ġmisogyn y", + "ĠComp onents", + "ĠWatch ing", + "ĠF olk", + "ract ical", + "B ush", + "Ġt aped", + "Ġgroup ing", + "Ġbe ads", + "Ġ20 48", + "Ġcon du", + "quer que", + "Read ing", + "Ġgriev ances", + "Ult ra", + "Ġend point", + "H ig", + "ĠSt atic", + "ĠScar borough", + "L ua", + "ĠMess i", + "a qu", + "ĠPsy Net", + "ĠR udd", + "Ġa venue", + "v p", + "J er", + "Ġsh ady", + "ĠRes ist", + "ĠArt emis", + "Ġcare less", + "Ġbro kers", + "Ġtemper ament", + "Ġ5 20", + "T ags", + "ĠTurn ing", + "Ġut tered", + "Ġp edd", + "Ġimpro vised", + "Ġ: (", + "Ġtab l", + "Ġpl ains", + "16 00", + "press ure", + "ĠEss ence", + "marg in", + "friend s", + "ĠRest oration", + "Ġpoll ut", + "ĠPok er", + "ĠAugust ine", + "ĠC IS", + "ĠSE AL", + "or ama", + "Ġth wart", + "se ek", + "Ġp agan", + " º", + "cp u", + "Ġg arn", + "Ġass ortment", + "ĠI LCS", + "t ower", + "Recomm ended", + "Ġun born", + "ĠRandom Redditor", + "ĠRandomRedditor WithNo", + "Ġparaly zed", + "Ġeru ption", + "Ġinter sect", + "ĠSt oke", + "ĠS co", + "B ind", + "å ¾", + "ĠP NG", + "ĠNeg ative", + "ĠNO AA", + "Le on", + "Ġall oy", + "ĠL ama", + "ĠD iversity", + "5 75", + "Ġunderest imated", + "ĠSc or", + "Ġm ural", + "Ġb usted", + "so on", + "l if", + "Ġnone x", + "Ġall ergy", + "ĠUnder world", + "ĠR ays", + "ĠBl asio", + "Ġh rs", + "ĠD ir", + "Ġ3 27", + "by ter", + "Ġrepl acements", + "Ġactiv ates", + "ri ved", + "M H", + "Ġp ans", + "ĠH I", + "Ġlong itudinal", + "Ġnu isance", + "al er", + "Ġsw ell", + "ĠS igned", + "s ci", + "ĠIs les", + "ĠA GA", + "Ġdef iant", + "Ġson ic", + "oc on", + "K C", + "ĠA im", + "t ie", + "ah ah", + "Ġm L", + "D X", + "Ġb isc", + "ĠBill board", + "ĠSY STEM", + "NE Y", + "ga ard", + "Ġdist ressed", + "former ly", + "Al an", + "Ġche fs", + "Ġopt ics", + "ĠC omet", + "ĠAM C", + "Ġredes igned", + "irm ation", + "Ġsight ings", + "38 2", + "3 11", + "ĠW B", + "Ġcont raction", + "ĠT OTAL", + "D ual", + "Ġstart led", + "Ġunderstand ably", + "Ġsung lasses", + "ETH OD", + "Ġd ocker", + "Ġsurf ing", + "ĠH EL", + "ĠSl ack", + "ton es", + "Ġsh alt", + "Vis ual", + "49 8", + "Dep artment", + "c ussion", + "Ġunrest ricted", + "Ġt ad", + "Ġre name", + "employ ed", + "Ġeduc ating", + "Ġgrin ned", + "bed room", + "ĠActiv ities", + "ĠV elvet", + "ĠSW AT", + "Ġsh uffle", + "ig or", + "Ġsatur ation", + "F inding", + "c ream", + "ic ter", + "Ġv odka", + "tr acking", + "te c", + "Ġfore ground", + "iest a", + "Ġve hement", + "ĠEC B", + "ĠT ie", + "E y", + "Ġt urtles", + "ĠRail road", + "ĠKat z", + "ĠFram es", + "Ġmen ace", + "ĠFell owship", + "ĠEss ential", + "ugg ish", + "Ġdri p", + "ch witz", + "ĠKy oto", + "s b", + "ĠN ina", + "Param eter", + "Ġal arms", + "ĠCl aud", + "Ġpione ering", + "Ġchief ly", + "ĠSc ream", + "Col lection", + "Ġthank fully", + "ĠRonald o", + "åŃ IJ", + "st rip", + "ĠDisney land", + "com mercial", + "See ing", + "S oul", + "Ġevac uate", + "Ġc iv", + "ĠAs he", + "Ġdiv ides", + "ĠD agger", + "rehens ive", + "Ġber ries", + "ĠD F", + "Ġs ushi", + "Ġplur ality", + "W I", + "Ġdisadvant aged", + "Ġbatt alion", + "ob iles", + "45 1", + "Ġcl ing", + "Ġunden iable", + "ĠL ounge", + "Ġha unt", + "p he", + "Ġquant ify", + "Ġdiff ered", + "Ġ[* ]", + "ĠV iz", + "c um", + "sl ave", + "Ġvide og", + "Ġqu ar", + "Ġbund les", + "ĠAl onso", + "t ackle", + "Ġneur onal", + "Ġlandsl ide", + "conf irmed", + "ĠDep th", + "Ġrenew ables", + "B ear", + "ĠMaced onia", + "Ġjer seys", + "Ġb unk", + "ĠSp awn", + "ĠControl s", + "ĠBuch anan", + "Ġrobot ics", + "Ġemphas izing", + "ĠTut orial", + "h yp", + "ist on", + "Ġmonument al", + "æ °", + "ĠCar ry", + "Ġt bsp", + "en ance", + "H ill", + "art hed", + "Ġro tten", + "De an", + "Ġtw isting", + "Ġgood will", + "Ġimm ersion", + "L iving", + "Ġbr ushes", + "ĠC GI", + "ĠAt k", + "tr aditional", + "Ġph antom", + "ĠSt amina", + "Ġexpans ions", + "ĠMar in", + "Ġembark ed", + "ĠE g", + "int estinal", + "ĠPE OPLE", + "ĠBo oth", + "ĠApp alach", + "Ġreleg ated", + "V T", + "M IT", + "Ġmust er", + "Ġwithdraw ing", + "Ġmicrosc ope", + "ĠG athering", + "ĠC rescent", + "ĠArgent ine", + "ĠDec re", + "ĠDomin ic", + "Ġbud s", + "ant age", + "ĠI on", + "Ġwid ened", + "ONS ORED", + "ĠGl oves", + "iann opoulos", + "raz en", + "fe el", + "Ġrepay ment", + "Ġhind sight", + "ĠRE ALLY", + "ĠPist ol", + "ĠBra h", + "Ġwat ts", + "Ġsurv ives", + "Ġfl urry", + "iss y", + "Al ert", + "ĠUrug uay", + "Ph oenix", + "S low", + "ĠG rave", + "ĠF ir", + "Ġmanage able", + "Ġtar iff", + "ĠU DP", + "ĠPist ons", + "ĠNiger ian", + "Ġstrike outs", + "Ġcos metics", + "whel ming", + "f ab", + "c ape", + "pro xy", + "Ġre think", + "Ġover coming", + "sim ple", + "Ġw oo", + "Ġdistract ing", + "ĠSt anton", + "ĠTuls a", + "ĠD ock", + "65 9", + "Ġdisc ord", + "ĠEm acs", + "ĠV es", + "ĠR OB", + "Ġreass uring", + "Ġcons ortium", + "Muslim s", + "3 21", + "Ġprompt s", + "se i", + "ĠH itch", + "imp osed", + "ĠF ool", + "Ġindisc rim", + "wr ong", + "bu querque", + "D avis", + "! ]", + "Ġtim eless", + "ĠNE ED", + "Ġpestic ide", + "Ġrally ing", + "ĠCal der", + "Ġå ¤", + "Ġx p", + "ĠUn le", + "ĠEx port", + "lu aj", + "B uff", + ") [", + "Ġsq or", + "S audi", + "Ġis tg", + "Ġindul ge", + "pro c", + "Ġdisg usted", + "Ġcomp ounded", + "Ġn em", + "Ġschool ing", + "ĠC ure", + "process ing", + "S ol", + "Ġpro verb", + "it ized", + "ĠAlv arez", + "Ġscar f", + "Ġrect angular", + "re ve", + "Ġh ormonal", + "ĠSt ress", + "itiz en", + "Ġ4 25", + "girl s", + "ĠNo ir", + "ĠR app", + "Ġmar ches", + "ch urch", + "ĠUs es", + "Ġ40 5", + "ĠBer m", + "Ġord inances", + "ĠJud gment", + "Charg es", + "ĠZ in", + "Ġdust y", + "Ġstraw berries", + "Ġper ce", + "ĠTh ur", + "ĠDebor ah", + "net flix", + "ĠLam bert", + "Ġam used", + "ĠGu ang", + "Y OU", + "R GB", + "ĠC CTV", + "Ġf iat", + "r ang", + "Ġf ederation", + "ĠM ant", + "ĠB ust", + "ĠM are", + "respect ive", + "ĠM igration", + "ĠB IT", + "59 0", + "Ġpatriot ism", + "Ġout lining", + "reg ion", + "ĠJos é", + "Ġbl asting", + "ĠEz ra", + "B s", + "Ġundermin es", + "ĠSm ooth", + "Ġcl ashed", + "rad io", + "Ġtransition ing", + "ĠBucc aneers", + "ĠOw l", + "Ġplug s", + "Ġh iatus", + "ĠPin ball", + "Ġm ig", + "ĠNut r", + "ĠWolf e", + "Ġinteg ers", + "Ġor bits", + "ĠEd win", + "ĠDirect X", + "b ite", + "Ġbl azing", + "v r", + "Ed ge", + "ĠP ID", + "ex it", + "ĠCom ed", + "ĠPath finder", + "ĠGu id", + "ĠSign s", + "ĠZ er", + "ĠAg enda", + "Ġreimburse ment", + "M esh", + "i Phone", + "ĠMar cos", + "ĠS ites", + "h ate", + "en burg", + "Ġs ockets", + "p end", + "Bat man", + "v ir", + "ĠSH OW", + "Ġprovision al", + "con n", + "ĠDeath s", + "AT IVE", + "Pro file", + "sy m", + "J A", + "Ġnin ja", + "inst alled", + "id ates", + "eb ra", + "ĠOm aha", + "Ġse izing", + "ĠBe asts", + "Ġsal ts", + "M ission", + "Gener ally", + "ĠTr ilogy", + "he on", + "leg ates", + "Ġd ime", + "Ġf aire", + "par able", + "G raph", + "Ġtotal ing", + "Ġdiagram s", + "ĠYan uk", + "ple t", + "ĠMe h", + "Ġmyth ical", + "ĠStep hens", + "aut ical", + "ochem istry", + "Ġkil ograms", + "Ġel bows", + "anc ock", + "ĠB CE", + "ĠPr ague", + "Ġimpro v", + "ĠDev in", + "Ġ\" \\", + "par alle", + "Ġsuprem acists", + "ĠB illion", + "Ġreg imen", + "inn acle", + "Ġrequ isite", + "ang an", + "ĠBur lington", + "ain ment", + "ĠObject ive", + "oms ky", + "G V", + "Ġun ilateral", + "Ġt c", + "Ġh ires", + "ment al", + "Ġinvol untary", + "Ġtrans pl", + "ĠASC II", + " ¨", + "Ev ents", + "Ġdoub ted", + "ĠKa plan", + "ĠCour age", + "ig on", + "ĠMan aging", + "ĠT art", + "Ġfalse hood", + "ĠV iolet", + "Ġair s", + "Ġfertil izer", + "Brit ain", + "Ġaqu atic", + "ou f", + "W ords", + "ĠHart ford", + "Ġeven ings", + "ĠV engeance", + "qu ite", + "G all", + "ĠP ret", + "Ġp df", + "ĠL M", + "ĠSo chi", + "ĠInter cept", + "9 20", + "Ġprofit ability", + "ĠId le", + "ĠMac Donald", + "ĠEst ablishment", + "um sy", + "Ġgather ings", + "ĠN aj", + "Charl ie", + "Ġas cent", + "ĠProt ector", + "Ġal gebra", + "Ġbi os", + "for ums", + "EL S", + "Introdu ced", + "Ġ3 35", + "Ġastron omy", + "Cont ribut", + "ĠPol ic", + "Pl atform", + "Ġcontain ment", + "w rap", + "Ġcoron ary", + "ĠJ elly", + "man ager", + "Ġheart breaking", + "c air", + "ĠChe ro", + "c gi", + "Med ical", + "ĠAccount ability", + "! !\"", + "oph ile", + "Ġpsych otic", + "ĠRest rict", + "Ġequ itable", + "iss ues", + "Ġ19 05", + "ĠN ek", + "c ised", + "ĠTr acking", + "Ġo zone", + "Ġcook er", + "ros is", + "Ġre open", + "Ġinf inity", + "ĠPharm aceutical", + "ens ional", + "Att empt", + "ĠR ory", + "Mar co", + "Ġawa its", + "H OW", + "t reated", + "Ġbol st", + "Ġreve red", + "Ġp ods", + "opp ers", + "00 10", + "Ġampl itude", + "ric an", + "SP ONSORED", + "Ġtrou sers", + "Ġhal ves", + "ĠK aine", + "ĠCut ler", + "ĠA UTH", + "Ġsplend id", + "Ġprevent ive", + "ĠDud ley", + "if acts", + "umin ati", + "ĠY in", + "Ġad mon", + "ĠV ag", + "Ġin verted", + "Ġhast ily", + "ĠH ague", + "L yn", + "Ġled ger", + "Ġastron omical", + "get ting", + "Ġcirc a", + "ĠC ic", + "ĠTenn is", + "Lim ited", + "Ġd ru", + "ĠBY U", + "Ġtrave llers", + "Ġp ane", + "ĠInt ro", + "Ġpatient ly", + "Ġa iding", + "Ġlo os", + "ĠT ough", + "Ġ29 3", + "Ġconsum es", + "Source File", + "Ġ\"\" \"", + "Ġbond ing", + "Ġtil ted", + "Ġmenstru al", + "ĠCel estial", + "UL AR", + "Plug in", + "Ġrisk ing", + "N az", + "ĠRiy adh", + "Ġacc redited", + "Ġsk irm", + "é Ľ", + "Ġexam iner", + "Ġmess ing", + "Ġnear ing", + "ĠC hern", + "ĠBeck ham", + "Ġsw apped", + "Ġgo ose", + "K ay", + "Ġlo fty", + "ĠWal let", + "Ġ[ '", + "Ġap ocalypse", + "Ġb amboo", + "ĠSP ACE", + "ĠEl ena", + "Ġ30 6", + "ac ons", + "Ġtight ened", + "Ġadolesc ence", + "Ġrain y", + "Ġvandal ism", + "ĠNew town", + "Ġcon ject", + "c akes", + "Ġche ated", + "Ġmoder ators", + "par ams", + "E FF", + "Ġdece it", + "ĠST L", + "ĠTanz ania", + "ĠR I", + "Ġ19 23", + "ĠEx ile", + "the l", + "Ġthe olog", + "Ġquir ky", + "ĠIr vine", + "Ġneed y", + "or is", + "U m", + "K a", + "Ġmail box", + "3 22", + "Ġb os", + "ĠPet ra", + "K ING", + "Ġenlarg ed", + "O ften", + "Ġbad ass", + "Ġ3 43", + "ĠPl aces", + "ĠC AD", + "Ġpr istine", + "Ġinterven ing", + "d irection", + "Ġl az", + "ĠD SM", + "Ġproject ing", + "ĠF unk", + "ag og", + "pay ment", + "n ov", + "Ġch atter", + "AR B", + "Ġexam inations", + "ĠHouse hold", + "ĠG us", + "F ord", + "4 14", + "B oss", + "Ġmy stic", + "Ġle aps", + "ĠB av", + "ul z", + "b udget", + "Foot ball", + "Ġsubsid ized", + "Ġfirst hand", + "Ġcoinc ide", + "oc ular", + "Con n", + "ĠColl abor", + "Ġfool s", + "am ura", + "ah ar", + "r ists", + "Ġsw ollen", + "Ġexp ended", + "ĠP au", + "s up", + "Ġsp ar", + "Ġkey note", + "s uff", + "Ġunequ al", + "Ġprogress ing", + "str ings", + "ĠGamer gate", + "Dis ney", + "ĠEle ven", + "om nia", + "Ġscript ed", + "Ġear ners", + "bro ther", + "ĠEn abled", + "æ ³", + "Ġlar vae", + "ĠL OC", + "m ess", + "Wil son", + "ĠTem plate", + "success fully", + "Ġparam ount", + "Ġcamoufl age", + "Ġbind s", + "ĠQu iet", + "ĠSh utterstock", + "r ush", + "Ġmasc ot", + "fort une", + "ĠCol t", + "ĠBe yon", + "hab i", + "Ġha irc", + "Ġ26 7", + "ĠDe us", + "Ġtw itch", + "Ġconcent rating", + "Ġn ipples", + "c ible", + "Ġg ir", + "N Z", + "M ath", + "n ih", + "Requ ired", + "Ġp onder", + "ĠS AN", + "Ġwedd ings", + "Ġl oneliness", + "N ES", + "ĠMah jong", + "69 5", + "add le", + "ĠGar ner", + "ĠC OUR", + "Br idge", + "Ġsp ree", + "ĠCald well", + "Ġbri bery", + "Ġ���� ����", + "plug ins", + "Ġr acket", + "Ġchamp agne", + "vers ible", + "V ote", + "Ġmod ifiers", + "May or", + "6 80", + "Ġassemb lies", + "ĠS ultan", + "ĠN ing", + "ĠLad ies", + "Ġsulf ur", + "Ġor bs", + "Ġ---- -", + "____ ___", + "ĠJournal ism", + "Ġes ports", + "Ġl ush", + "Ġh ue", + "Ġspect ral", + "H onest", + "ãĥ ı", + "Ġbus hes", + "Ġrein forcement", + "Ġre opened", + "ĠWhe els", + "ĠM org", + "rie ving", + "Ġaux iliary", + "Ġj Query", + "ĠB AT", + "tes que", + "Ġver tex", + "p ure", + "f rey", + "ãĤ º", + "d os", + "Ġty ph", + "Ġc ull", + "Ġe q", + "Ġdec on", + "Ġtoss ing", + "Ġdispar ate", + "ĠBr igham", + "print f", + "led ged", + "Ġsu nd", + "Ġco zy", + "Ġhepat itis", + "per forming", + "Ġav al", + "ĠG G", + "f uture", + "Ġpet ertodd", + "ĠKos ovo", + "Ġmagn ets", + "Al ready", + "ĠEd ison", + "ĠCe res", + "ĠRA ID", + "Ġbrill iance", + "57 6", + "Ġder ives", + "Ġhypert ension", + "ĠÎ Ķ", + "Ġlamb da", + "Ġfl air", + "Ġmission aries", + "Ġrap es", + "ĠSt arter", + "ĠMon ths", + "Ġdef y", + "Ġseism ic", + "ĠR aphael", + "Ġeuro zone", + "65 6", + "z sche", + "Ġscr atched", + "Ġb ows", + "ĠLenn on", + "ĠGa ia", + "Ġdri pping", + "f acts", + "A le", + "Ġfrog s", + "ĠBre ast", + "ogene ity", + "ĠProsecut or", + "Ġampl ified", + "ĠHod g", + "ĠF n", + "Th ousands", + "ĠNI H", + "ĠMonitor ing", + "FT WARE", + "ĠPri ebus", + "ĠG rowing", + "hun ter", + "Ġdiagn ose", + "ĠM ald", + "ĠL R", + "Ġcrown ed", + "Ġburst ing", + "Ġdiss olution", + "j avascript", + "Ġuseful ness", + "ĠExec ution", + ": (", + "ĠIv ory", + "a ah", + "Ġpersecut ed", + "viol ence", + "ist as", + "ĠCr ate", + "Ġimpuls es", + "ĠSp ani", + "ed es", + "Hand le", + "ĠZ erg", + "think able", + "Last ly", + "Ġspont aneously", + "Ġinconven ient", + "Ġdismiss ing", + "Ġpl otted", + "Ġeight y", + "Ġ7 37", + "r ish", + "ĠThor nton", + "ath am", + "Ġsit com", + "V en", + "Rec ipe", + "t el", + "l und", + "Ġcle ars", + "ĠSas uke", + "Ġ25 8", + "Ġopt ing", + "Ġen raged", + "est hetic", + "ĠA e", + "uch s", + "Pre p", + "Fl ow", + "Ġrun off", + "ĠE ating", + "ĠG iles", + "ĠAct ing", + "res ources", + "ib aba", + "Ġr pm", + "Ġske wed", + "ĠBl anc", + "ĠS akuya", + "Ġhot ter", + "Ġ19 24", + "op ian", + "ck o", + "Ġcr umbling", + "Ġcapt ains", + "ĠAppropri ations", + "le aders", + "dro pping", + "an uts", + "Ġrevers ing", + "ĠP ose", + "ĠS ek", + "Sc ot", + "ĠIde a", + "c ise", + "ĠSloven ia", + "Ġ3 17", + "Do ctor", + "Ġcro cod", + "ald i", + "Se a", + "ĠFar rell", + "Ġmerc enaries", + "ĠR NC", + "ĠGu ess", + "Ġp acing", + "M achine", + "Streamer Bot", + "ĠChar ity", + "Ġ29 8", + "Ġcann ons", + "ĠTob y", + "TPP StreamerBot", + "ĠPass ion", + "cf g", + "Th om", + "Ġbad ges", + "ĠBern stein", + ". âĢĵ", + "ĠP OP", + "ĠCon j", + "Ġinitial ization", + "Ġbiod iversity", + "D ub", + "Ġfeud al", + "Ġdisclaim er", + "Ġc row", + "Ġign ition", + "ar f", + "S HA", + "Ġk Hz", + "h azard", + "ĠArt ists", + "oe uv", + "67 9", + "ĠRud y", + "N ine", + "ĠRam adan", + "å ½", + "itt o", + "Ġadren aline", + "C ert", + "Ġsmell ed", + "Ġimp unity", + "Ġag endas", + "ĠRe born", + "ĠCon cent", + "ĠSe ems", + "Ġo mega", + "ĠDust in", + "Ġback er", + "ĠSau ce", + "ĠBoy le", + "W IN", + "Ġsp ins", + "Ġpa uses", + "u pt", + "Ġshred ded", + "Ġstra pped", + "ĠCor ruption", + "Ġscr atches", + "Ġn i", + "Ġatt ire", + "ĠS AF", + "Factory Reloaded", + "ĠI PS", + "Ġ( %", + "Ġsem inar", + "f ocus", + "c ivil", + "Ġ18 60", + "int osh", + "Ġcontin ual", + "Ġabbre vi", + "ĠS ok", + "oc obo", + "X M", + "Ġfr antic", + "Ġunavoid able", + "Ġar tery", + "Ġannot ations", + "b ath", + "Cl imate", + "Ġd ors", + "ĠSl ide", + "co ord", + "ĠRel oad", + "ĠL DL", + "ĠLove craft", + "Ġunim agin", + "Ġresemb led", + "Ġbarr acks", + "n p", + "Ġsurrog ate", + "Ġcategor ized", + "ãĤ ©", + "Ġvacc inated", + "Ġdrain age", + "Ġind ist", + "ĠWhats App", + "Ġ18 70", + "oler ance", + "inv oke", + "am orph", + "Ġrecon nect", + "Ġem anc", + "Ġblind ness", + "Ġ12 80", + "intern et", + "c ollar", + "Ġalt ru", + "Ġab yss", + "ĠT RI", + "65 7", + "Ġinf used", + "HE AD", + "Ġforest ry", + "ĠWood y", + "ĠC i", + "w i", + "s am", + "78 4", + "hol iday", + "Ġmog ul", + "ĠF ees", + "ĠD EN", + "In ternal", + "ur bed", + "f usc", + "at om", + "ĠIll usion", + "Ġpoll ed", + "Ġfl ap", + "Ġco ax", + "L GBT", + "An aly", + "ĠSect ions", + "ĠCalif orn", + "em n", + "Ġh ither", + "ĠN IGHT", + "Ġn ailed", + "ĠPip eline", + "39 1", + "o of", + "ĠPr imal", + "vere nd", + "Ġsl ashing", + "Ġret ri", + "avi our", + "Ġdepart ing", + "g il", + "IS C", + "Ġmid way", + "Ġultras ound", + "Ġbeh aving", + "ĠT ara", + "class es", + "V irtual", + "ĠColon ial", + "Ġstri pping", + "Ġorchestr ated", + "ĠGra ves", + "45 2", + "ĠIron ically", + "ĠWrit ers", + "Ġl ends", + "ĠMan z", + "Ġra ven", + "Ġoxid ative", + "Ġ26 6", + "EL F", + "act ually", + "asc ar", + "D raft", + "Ġfavour able", + "Ġhumili ating", + "Ġf idelity", + "ĠH of", + "ĠX uan", + "49 6", + "Ġlay ered", + "at is", + "79 0", + "Ġpay check", + "it on", + "K ar", + "ĠVM ware", + "ĠFar mer", + "Ġserv ic", + "gl omer", + "Ġsl ump", + "ĠFab ric", + "ĠD OC", + "est ing", + "Ġreass ure", + "Ġph yl", + "v olt", + "it ory", + "R ules", + "Ġoxid ation", + "Ġpri zed", + "Ġmist ress", + "ĠDj ango", + "WAR N", + "å ij", + "Ġenc ode", + "ĠFeed back", + "Ġstupid ity", + "I an", + "ĠYugoslav ia", + "× ¨", + "ac l", + "UT E", + "19 77", + "Ġqual ifies", + "Ġpuls es", + "pret ty", + "Ġfro ze", + "Ġs s", + "Iter ator", + "Ġur gently", + "Ġm ailed", + "ĠCh am", + "Ġsust aining", + "Ġbas il", + "Ġpupp ies", + "il ant", + "ĠP LEASE", + "l ap", + "ace ous", + "F ear", + "ĠMaster y", + "aut omatic", + "ĠT AG", + "Ġant im", + "ag les", + "47 3", + "fram es", + "Ġwh ispers", + "ĠWho ever", + "Ġbra very", + "ĠUK IP", + "ract ions", + "\"\" \"", + "Ġt ame", + "Ġpart ed", + "every thing", + "CON T", + "Ġind ebted", + "Ġadd r", + "re k", + "IR ED", + "Ġem inent", + "cl inton", + "Ġo usted", + "Ġreview er", + "Ġmelt down", + "Ġre arr", + "ĠY ao", + "the real", + "aby te", + "Ġst umbling", + "Ġbat ches", + "Ġ25 9", + "Ġcontrace ptive", + "Ġprost itute", + "ens is", + "De cl", + "ĠSt rikes", + "M ilitary", + "ĠO ath", + "v acc", + "pp ings", + "05 2", + "Ġpart Name", + "amp ing", + "Rep orts", + "K I", + "CH R", + "Ġsubt ly", + "sw ers", + "Bl ake", + "us ual", + "Ġcontest ants", + "Ġcart ridges", + "ĠGRE AT", + "Ġbl ush", + "ĠâĢ º", + "47 2", + "Ġreason ed", + "ãĥ ¤", + "paralle led", + "Ġd yn", + "ag ate", + "Ġnight ly", + "å Ĩ", + "55 6", + "Ġsem antic", + "ĠAdv oc", + "Ġ !!", + "Ġdisag rees", + "ĠB W", + "V eh", + "Ġharm ing", + "Ġembr aces", + "Ġstri ves", + "Ġin land", + "ĠK ard", + "Ġhe ats", + "ĠGin ny", + "ut an", + "ern aut", + "yl ene", + "ĠE lev", + "J D", + "Ġh ars", + "ĠStar r", + "Ġsk ysc", + "Ġcollabor ators", + "Us ually", + "Ġrev olutions", + "ĠSTAT S", + "Ġdism antle", + "Ġconfident ly", + "Ġkin etic", + "Al i", + "Ġpercent ile", + "Ġextract ing", + "ill ian", + "est ead", + "Ġphysic ists", + "ĠMarsh al", + "Ġfell owship", + "Ġd ashed", + "ĠU R", + "ĠSi oux", + "ĠComp act", + "am ide", + "P ython", + "ĠLe igh", + "ĠPharm ac", + "ist rates", + "her ical", + "Ġf ue", + "ĠE min", + "Ġ( {", + "ĠNeighbor hood", + "Ġdisrupt ing", + "ĠD up", + "Ġg land", + "ĠSe v", + "ĠMar ian", + "arg on", + "ĠD und", + "Ġ< !--", + "Ġstr and", + "Ġstadium s", + "z os", + "Ġpsych osis", + "ĠR ack", + "Ġbrilliant ly", + "ï¸ ı", + "Ġsubmer ged", + "ĠInst it", + "ĠCh ow", + "Ġc ages", + "ĠH ats", + "ĠU rs", + "Ġdil uted", + "us at", + "ien ne", + "ĠMembers hip", + "ĠBur k", + "Ġ ie", + "Ġarche type", + "D rug", + "ult on", + "ĠSp ock", + "ĠMcK ay", + "ĠDep end", + "F eatured", + "S oc", + "19 78", + "ĠB ere", + "Ġrelent lessly", + "Ġcripp ling", + "Ġar thritis", + "çĶ Ł", + "ĠTrop ical", + "ĠBul g", + "ĠCher yl", + "Ġadm irable", + "Ġsub title", + "Over ride", + "Ġorig inating", + "ĠC CP", + "Ġsw ore", + "ĠSo le", + "ĠDis orders", + "3 29", + "Ġprocess ion", + "Ġref urb", + "Ġimm ersed", + "requ ently", + "Ġskept ics", + "Ġcer amic", + "m itter", + "en stein", + "b elt", + "ĠT IT", + "b idden", + "Ġf ir", + "m ist", + "> ]", + "Ġwe ave", + "ĠParad ox", + "Ġentr usted", + "ĠBarcl ays", + "Ġnovel ist", + "og ie", + "80 6", + "Ġnin ety", + "Ġdisag reements", + "@@@@ @@@@", + "ĠAus chwitz", + "c ars", + "ĠL ET", + "t ub", + "arant ine", + "P OS", + "Ġback story", + "Ġcheer ful", + "ĠR ag", + "ek a", + "bi ased", + "Ġinexper ienced", + "ak ra", + "ĠW itt", + "t an", + "Ġrap ist", + "Ġplate au", + "ch al", + "ĠInqu is", + "exp ression", + "Ġc ipher", + "Ġsh aving", + "add en", + "re ly", + "( \\", + "ism a", + "ĠReg ulatory", + "CH AR", + "ily n", + "N VIDIA", + "G U", + "Ġmur m", + "la us", + "Christ opher", + "Ġcontract ual", + "ĠPro xy", + "ĠJa ime", + "ĠMethod ist", + "Ġstew ards", + "st a", + "per ia", + "Ġphys iology", + "Ġbump ed", + "Ġf ructose", + "Austral ian", + "ĠMet allic", + "ĠMas querade", + "ar b", + "Ġprom ul", + "Ġdown fall", + "Ġbut cher", + "Ġb our", + "ĠIN FORMATION", + "ĠB is", + "pect s", + "ad ena", + "Ġcontempl ating", + "ar oo", + "cent ered", + "ĠPe aks", + "Us ed", + "Ġmod em", + "Ġg enders", + "Ġ8 000", + "37 1", + "Ġm aternity", + "ĠR az", + "Ġrock ing", + "Ġhandgun s", + "ĠD ACA", + "Aut om", + "ĠN ile", + "Ġtum ult", + "ĠBenef it", + "ĠAppro ach", + "works hop", + "ĠLe aving", + "G er", + "inst ead", + "Ġvibr ations", + "Ġrep ositories", + "49 7", + "ĠA unt", + "ĠJ ub", + "ĠExp edition", + "Al pha", + "Ġs ans", + "Ġoverd ue", + "Ġoverc rowd", + "Ġlegisl atures", + "Ġp aternal", + "ĠLeon ardo", + "Ġexp ressive", + "Ġdistract ions", + "Ġsil enced", + "tr ust", + "Ġb iking", + "Ġ5 60", + "Ġpropri et", + "Ġimp osition", + "Ġcon glomer", + "Ġ= ================================================================", + "ĠTe aching", + "ĠY ose", + "int ensive", + "T own", + "Ġtroll ing", + "ĠGr ac", + "ĠAS US", + "Y o", + "Ġspecial s", + "ĠNep h", + "ĠGod zilla", + "Dat abase", + "ĠHe gel", + "Ġ27 2", + "19 76", + "ĠGl oria", + "Ġdis emb", + "ĠInvestig ations", + "ĠB ane", + "ag ements", + "St range", + "Ġtre asury", + "ĠPl ays", + "Ġundes irable", + "Ġwid ening", + "Ġverb ally", + "Ġinf ancy", + "Ġcut ter", + "f ml", + "Ġ21 00", + "prot otype", + "f ine", + "Ġdec riminal", + "Ġdysfunction al", + "Ġbes ie", + "ĠErn st", + "z eb", + "Ġnort heastern", + "Ġa ust", + "por ate", + "ĠMar lins", + "Ġsegreg ated", + "ew orld", + "ĠMa her", + "Ġtra verse", + "Ġmon astery", + "ur gy", + "G ear", + "s and", + "Com pl", + "ĠE MP", + "Ġpl ent", + "ĠMer cer", + "Ġ27 6", + "TA BLE", + "Config uration", + "H undreds", + "Ġpr ic", + "Ġcollabor ating", + "ĠPar amount", + "ĠCumm ings", + "Ġ( <", + "Ġrecord er", + "Ġfl ats", + "Ġ4 16", + "wh ose", + "Font Size", + "ĠOr bit", + "Y R", + "Ġwr ists", + "Ġb akery", + ") }", + "ĠB ounty", + "ĠLanc aster", + "Ġend ings", + "acc ording", + "ĠSal am", + "e asy", + "75 5", + "ĠBur r", + "ĠBarn ett", + "onom ous", + "Un ion", + "Ġpreced ence", + "ĠScholars hip", + "ĠU X", + "Ġroll out", + "Ġbo on", + "al m", + "ĠCan ter", + "æ µ", + "Ġround ing", + "Ġcl ad", + "Ġv ap", + "ĠF eatured", + "is ations", + "Ġ5 40", + "pol ice", + "Ġunsett ling", + "Ġdr ifting", + "ĠLum ia", + "ĠObama Care", + "ĠF avor", + "Hy per", + "ĠRoth schild", + "ĠMil iband", + "an aly", + "ĠJul iet", + "H u", + "Ġrec alling", + "a head", + "69 6", + "Ġunf avorable", + "Ġd ances", + "O x", + "Ġleg ality", + "Ġ40 3", + "rom ancer", + "Ġinqu ire", + "ĠM oves", + "\\ \">", + "ĠVari ant", + "ĠMess iah", + "ĠL CS", + "ĠBah á", + "75 6", + "Ġeyeb row", + "Ġ ¥", + "ĠMc F", + "ĠFort y", + "M as", + "Ġpan icked", + "Ġtransform ations", + "q q", + "Ġrev olves", + "ring e", + "ĠA i", + "ax e", + "Ġon ward", + "ĠC FR", + "ĠB are", + "log in", + "Ġliqu ids", + "Ġde comp", + "second ary", + "il an", + "ĠCon vert", + "ami ya", + "Ġprosecut ing", + "Ġâī ¡", + "ĠYork ers", + "ĠByr ne", + "sl ow", + "aw ei", + "J ean", + "Ġ26 9", + "ĠSky dragon", + "Ġ é", + "ĠNicarag ua", + "ĠHuck abee", + "ĠHigh ly", + "Ġamph ib", + "ĠPast or", + "ĠL ets", + "Ġbl urred", + "Ġvisc eral", + "ĠC BO", + "Ġcollabor ated", + "z ig", + "Leg al", + "Ġapart heid", + "Ġbr id", + "Ġpres et", + "ĠD ET", + "ĠAM A", + "× Ķ", + "arch ing", + "auc uses", + "build er", + "Ġpo etic", + "Ġem ulator", + "ĠMole cular", + "Ġhon oring", + "ise um", + "Ġtract or", + "ĠCl uster", + "ĠCal m", + "ared evil", + "Ġsidew alks", + "Ġviol in", + "Ġgeneral ized", + "ĠAle c", + "Ġemb argo", + "Ġfast ball", + "ĠHT TPS", + "ĠL ack", + "ĠCh ill", + "ri ver", + "C hel", + "ĠSw arm", + "ĠLev ine", + "ro ying", + "L aunch", + "Ġkick er", + "Ġadd itive", + "ĠDe als", + "W idget", + "cont aining", + "Ġescal ate", + "ĠOP EN", + "Ġtwe aked", + "Ġst ash", + "Ġsp arks", + "ĠEs sex", + "ĠE cc", + "Ġconv ict", + "Ġblog ging", + "I ER", + "ĠH L", + "Ġmurd erers", + "75 9", + "ĠH ib", + "Ġde pl", + "ĠJ ord", + "S ac", + "Ġdis sect", + "ĠHow e", + "os her", + "Ġcustom izable", + "ĠFran z", + "Ġat ro", + "Ä ĩ", + "Ġ000 4", + "Ġout post", + "R oss", + "Ġglyph osate", + "ĠHast ings", + "ĠBE FORE", + "Ġsh ove", + "o pped", + "ĠSc ala", + "Ġam ulet", + "an ian", + "Ġexacerb ated", + "Ġe ater", + "47 1", + "UM E", + "Ġpul p", + "izont al", + "ĠZ am", + "ĠAT I", + "imm une", + "aby tes", + "Ġunnecess arily", + "ĠC AT", + "ĠAx is", + "Ġvisual ize", + "à ī", + "ĠRad ical", + "f m", + "Doc uments", + "ĠFor rest", + "Ġcontext ual", + "ĠSy mbol", + "Ġtent ative", + "ĠDO ES", + "ĠGood s", + "Ġintermitt ent", + "} :", + "medi ated", + "Ġridic ule", + "Ġathe ism", + "Ġpath ogens", + "ĠM um", + "Ġre introdu", + "Ġ30 7", + "i HUD", + "Ġflash light", + "Ġsw earing", + "Ġp engu", + "B u", + "Ġrot ated", + "ĠCr ane", + "Ġ() );", + "Ġfashion able", + "Ġendors ing", + "46 3", + ") [", + "Ġingest ion", + "Ġcook s", + "Ġ9 50", + "ot omy", + "ĠIm am", + "Ġk a", + "Ġte aser", + "ĠGhost s", + "ĠãĤ µ", + "19 69", + "Ï ĥ", + "ub by", + "Ġconver ter", + "zan ne", + "end e", + "ĠPre par", + "ĠNic kel", + "ĠChim era", + "h im", + "ĠTyr ann", + "ĠSabb ath", + "ĠNich ols", + "Ġra pt", + "ih ar", + "Ġshe lling", + "Ġillum inate", + "Ġdent ist", + "ut or", + "ĠInteg ration", + "Ġwh ims", + "ĠLiter ary", + "Be aut", + "Ġp archment", + "ag ara", + "Br and", + "Ġder og", + "âĢ¦ )", + "ĠNor se", + "Ġunw itting", + "Ġc uc", + "Ġborder line", + "Ġupset ting", + "Ġrec ourse", + "Ġd raped", + "ĠRad ar", + "Ġcold er", + "ĠPep si", + "im inary", + "], [", + "65 8", + "V i", + "ĠF rem", + "ĠP es", + "Ġveter inary", + "ĠT ED", + "ĠEp idem", + "n ova", + "k id", + "Ġdev out", + "o ct", + "j ad", + "M oh", + "ĠP AY", + "Ġge ometric", + "Ġ3 23", + "Ġcircum ference", + "ich ick", + "19 75", + "ĠY uri", + "ĠSh all", + "ĠH over", + "un in", + "S pr", + "Ġg raft", + "ĠHapp iness", + "Ġdisadvant ages", + "att acks", + "Ġhub s", + "ĠStar Craft", + "é ĸ", + "Ġgall eries", + "ĠKor ra", + "Ġgrocer ies", + "ĠGors uch", + "Ġrap ists", + "Ġfun gi", + "ĠTyph oon", + "V ector", + "ĠEm press", + "b attle", + "4 68", + "Ġparas ite", + "ĠBom ber", + "S G", + "ex ist", + "ĠP f", + "Ġun se", + "Ġsurge ons", + "B irth", + "ĠUn sure", + "ĠPrint ed", + "ĠBehavior al", + "ĠA ster", + "Pak istan", + "Ġun ethical", + "Ġs v", + "ĠIo T", + "Ġlay outs", + "P ain", + "Ġconst ants", + "ĠL W", + "ĠB ake", + "Ġtow els", + "Ġdeterior ation", + "ĠBol ivia", + "Ġblind ed", + "ĠW arden", + "ĠMist ress", + "Ġon stage", + "Ġcl ans", + "ĠB EST", + "19 60", + "Ġant ique", + "Ġrhet orical", + "ĠPer cy", + "ĠRw anda", + ", .", + "B ruce", + "Ġtra umat", + "ĠParliament ary", + "Ġfoot note", + "id ia", + "ĠLear ned", + "se eking", + "gen ic", + "Ġdim ensional", + "H ide", + "èĢ ħ", + "Ġintrig ue", + "in se", + "Ġle ases", + "Ġapp rentices", + "w ashing", + "Ġ19 26", + "V ILLE", + "Ġsw oop", + "s cl", + "Ġbed rooms", + "on ics", + "ĠCr unch", + "comp atible", + "Ġincap ac", + "ĠYemen i", + "ash tra", + "z hou", + "d anger", + "Ġmanifest ations", + "ĠDem ons", + "AA F", + "Secret ary", + "ACT ED", + "L OD", + "Ġam y", + "ra per", + "eth nic", + "4 17", + "Ġpos itives", + "Ġ27 3", + "ĠRefuge es", + "Ġus b", + "ĠV ald", + "odd y", + "ĠMahm oud", + "As ia", + "Ġskull s", + "ĠEx odus", + "ĠComp et", + "ĠL IC", + "ĠM ansion", + "ĠA me", + "Ġconsolid ate", + "storm s", + "ont ent", + "99 6", + "Ġcl en", + "Ġm ummy", + "fl at", + "75 8", + "ĠV OL", + "oter ic", + "n en", + "ĠMin ute", + "S ov", + "Ġfin er", + "R h", + "ly cer", + "Ġreinforce ments", + "ĠJohann es", + "ĠGall agher", + "Ġgym n", + "S uddenly", + "Ġext ortion", + "k r", + "i ator", + "T a", + "Ġhippocamp us", + "N PR", + "ĠComput ing", + "Ġsquare ly", + "Ġmod elling", + "ĠFor ums", + "ĠL isp", + "ĠKrish na", + "Ġ3 24", + "Ġr ushes", + "Ġens ued", + "Ġcre eping", + "on te", + "n ai", + "il ater", + "ĠHorn ets", + "Ġob livious", + "IN ST", + "55 9", + "Ġjeopard y", + "Ġdistingu ishing", + "j ured", + "Ġbeg s", + "sim ilar", + "ph ot", + "5 30", + "ĠPark way", + "Ġs inks", + "ĠHearth stone", + "ib ur", + "ĠBat on", + "Av oid", + "Ġd ancer", + "Ġmag istrate", + "ary n", + "Ġdisturb ances", + "ĠRom ero", + "Ġpar aph", + "Ġmis chief", + "âĸ ĵ", + "ĠSh aria", + "Ġur inary", + "r oute", + "iv as", + "f itted", + "Ġeject ed", + "ĠAl buquerque", + "Ġ4 70", + "Ġirrit ated", + "ĠZ ip", + "ĠB iol", + "à į", + "Ġden ounce", + "Ġbin aries", + "ĠVer se", + "Ġopp os", + "ĠKend rick", + "ĠG PL", + "Ġsp ew", + "ĠEl ijah", + "ĠE as", + "Ġdr ifted", + "so far", + "Ġannoy ance", + "ĠB ET", + "47 4", + "ĠSt rongh", + "it ates", + "ĠCogn itive", + "oph one", + "ĠIdent ification", + "ocr ine", + "connect ion", + "Ġbox er", + "ĠAS D", + "ĠAre as", + "Y ang", + "t ch", + "ull ah", + "Ġdece ive", + "Comb at", + "ep isode", + "cre te", + "W itness", + "Ġcondol ences", + "ht ar", + "Ġhe als", + "Ġbuck ets", + "ĠLA W", + "B lu", + "Ġsl ab", + "ĠOR DER", + "oc l", + "att on", + "ĠSteven son", + "ĠG inger", + "ĠFriend ly", + "ĠVander bilt", + "sp irit", + "ig l", + "ĠReg arding", + "ĠPR OG", + "Ġse aling", + "start ing", + "Ġcard inal", + "ĠV ec", + "ĠBe ir", + "Ġmillisec onds", + "we ak", + "per se", + "Ġster ile", + "ĠCont emporary", + "ĠPh ant", + "ĠCl o", + "Ġout p", + "Ġex iled", + "Ġ27 7", + "Ġself ie", + "Ġman ic", + "Ġn ano", + "ter ms", + "Alex ander", + "Ġres olves", + "Ġmillenn ia", + "Ġexpl odes", + "Ġconst ellation", + "Ġadul tery", + "m otion", + "D OC", + "Ġbroad casters", + "Ġkinderg arten", + "ĠMay weather", + "ĠE co", + "ich o", + "Ġ28 7", + "l aun", + "Ġm ute", + "Ġdisc reet", + "Ġpres chool", + "Ġpre empt", + "De lete", + "ĠFre ed", + "P i", + "H K", + "Ġblock er", + "ĠC umber", + "Ġw rought", + "d ating", + "Ġins urer", + "Ġquot as", + "Ġpre ached", + "Ġev iction", + "ĠReg ina", + "ĠP ens", + "Ġsevent een", + "ĠN ass", + "D ick", + "Ġfold s", + "Ġd otted", + "ĠA ad", + "Un iversal", + "Ġp izz", + "ĠG uru", + "Ġso ils", + "Ġno vice", + "ĠNe ander", + "Ġst ool", + "Ġdeton ated", + "ĠPik achu", + "ĠMass ive", + "IV ER", + "ĠAb del", + "Ġsubdu ed", + "Ġtall est", + "Ġprec arious", + "Ġa y", + "r ification", + "ĠOb j", + "c ale", + "Ġun question", + "cul osis", + "ad as", + "igr ated", + "D ays", + "Ġque ens", + "ĠGaz ette", + "ĠCol our", + "ĠBow man", + "ĠJ J", + "ï ve", + "Ġdomin ates", + "Stud ent", + "Ġm u", + "Ġback log", + "ĠElect ro", + "Tr uth", + "48 3", + "Ġcond ensed", + "r ules", + "ĠCons piracy", + "Ġacron ym", + "hand led", + "ĠMat te", + "j ri", + "ĠImp ossible", + "l ude", + "cre ation", + "Ġwar med", + "ĠSl ave", + "Ġmis led", + "Ġfer ment", + "ĠK ah", + "ink i", + "ke leton", + "cy l", + "ĠKar in", + "Hun ter", + "Reg ister", + "ĠSur rey", + "Ġst ares", + "ĠW idth", + "ĠN ay", + "ĠSk i", + "Ġblack list", + "uck et", + "Ġexp ulsion", + "im et", + "Ġret weet", + "vant age", + "Fe ature", + "Ġtro opers", + "Ġhom ers", + "9 69", + "Ġconting ency", + "ĠW TC", + "ĠBrew er", + "fore ign", + "W are", + "S olar", + "Ġund ue", + "RE C", + "ulner able", + "path ic", + "ĠBo ise", + "Ġ3 22", + "Ġarous ed", + "ĠY ing", + "ä¸ į", + "uel ess", + "Ġp as", + "Ġmor p", + "Ġfl oral", + "Ex press", + "ud ging", + "k B", + "ĠGr anted", + "Ø ¯", + "ĠMich a", + "ĠGoth ic", + "ĠSPEC IAL", + "ĠRic ardo", + "F ran", + "Ġadminister ing", + "6 20", + "por a", + "Ġ ®", + "Ġcomprom ises", + "Ġb itten", + "Ac cept", + "Th irty", + "Ð ²", + "Ġmater ially", + "ĠTer r", + "ig matic", + "ch ains", + "Ġdo ve", + "stad t", + "Mar vel", + "FA ULT", + "Ġwind shield", + "Ġ3 36", + "ad ier", + "Ġsw apping", + "Ġflaw less", + "ĠPred ator", + "ĠMiche le", + "Ġprop ulsion", + "ĠPsych ic", + "Ġassign ing", + "Ġfabric ation", + "Ġbar ley", + "l ust", + "Ġtow ering", + "Ġalter cation", + "ĠBent ley", + "Sp here", + "Ġtun a", + "ĠClass es", + "Fre edom", + "un er", + "L ady", + "v oice", + "Ġcool est", + "or r", + "Ġpal p", + "$ {", + "Ġhyster ia", + "ĠMet atron", + "p ants", + "Ġspawn ing", + "Exper ts", + "ĠInvest ors", + "ĠAn archy", + "Ġshr unk", + "ĠVict im", + "Ġ28 9", + "Ġec stasy", + "ĠB inding", + "58 5", + "ĠMel ody", + "57 8", + "ot ally", + "ĠE tsy", + "lig a", + "Ġapplaud ed", + "Ġswe ating", + "Ġredist ributed", + "Ġpop corn", + "Ġsem inal", + "f ur", + "ĠNeuro science", + "R and", + "ĠO st", + "ĠMadd en", + "ĠIncre asing", + "ĠDaw kins", + "ĠSub way", + "Ġar sen", + "cons erv", + "B UR", + "Ġsp iked", + "ĠLy ft", + "ĠImper ium", + "ĠDrop box", + "Ġfav oured", + "Ġencomp asses", + "gh ost", + "Ġins pires", + "Ġbur geoning", + "ĠY oshi", + "ĠVert ical", + "ĠAud itor", + "Ġint ending", + "Ġfilib uster", + "Bl oom", + "f ac", + "ĠCav s", + "ign ing", + "Ġcowork ers", + "ĠBarb arian", + "rem ember", + "FL AG", + "Ġaudit ory", + "ason ry", + "Col lege", + "Ġmut ed", + "gem ony", + "ob in", + "ĠPsych o", + "9 68", + "Ġlav ish", + "Ġhierarch ical", + "ĠDr one", + "ou k", + "Ġcripp led", + "ĠMax im", + "Sl ot", + "Ġqu iz", + "ĠV id", + "if ling", + "Ġarchae ologists", + "Ġabandon ment", + "d ial", + "le on", + "ĠF as", + "T ed", + "Ġr aspberry", + "Ġmaneu vers", + "Ġbehavi ours", + "Ġins ure", + "Ġrem od", + "Sw itch", + "h oe", + "Ġsp aced", + "Ġafford ability", + "ĠF ern", + "not ation", + "ĠBal anced", + "Ġoccup ies", + "en vironment", + "Ġneck lace", + "Ġsed an", + "F U", + "ĠBrav o", + "Ġab users", + "ĠAn ita", + "met adata", + "ĠG ithub", + "ait o", + "ĠF aster", + "ĠWass erman", + "ĠF lesh", + "Ġth orn", + "r arily", + "ĠMer ry", + "w ine", + "Ġpopul ace", + "ĠL ann", + "Ġrepair ing", + "Ġpsy che", + "Ġmod ulation", + "aw aru", + "âĢĭ âĢĭ", + "ari j", + "Ġdecor ations", + "Ġapolog ise", + "ĠG arg", + "app ly", + "Ġgive away", + "ĠFl an", + "ĠWy att", + "U ber", + "Ġauthor ised", + "ĠMor al", + "HAHA HAHA", + "activ ate", + "Ġtorped o", + "ĠF AR", + "Ġam assed", + "ĠA ram", + "ark in", + "ĠVict ims", + "st ab", + "Ġo m", + "ĠE CO", + "Ġopio ids", + "Ġpurpose ly", + "ĠV est", + "Ġer g", + "at an", + "ĠSur gery", + "Ġcorrect ing", + "ĠOrt iz", + "ĠBe et", + "Ġrev oke", + "Ġfre eway", + "ĠH iggins", + "F ail", + "ĠFar ms", + "ĠAT P", + "h ound", + "Ġp oking", + "ĠCommun ists", + "mon ster", + "iment ary", + "Ġunlock ing", + "Ġunf it", + "we ed", + "en ario", + "at ical", + "ĠEnlight enment", + "ĠN G", + "ĠComp ensation", + "de en", + "ĠWid ow", + "ĠCind y", + "ĠAfter wards", + "Ġ6 000", + "ikh ail", + "ag ically", + "Ġrat ified", + "Ġcasual ty", + "H OME", + "p sey", + "f ee", + "Ġspark ling", + "Ġd é", + "Ġconcert ed", + "C atal", + "Ġcomp lying", + "ĠA res", + "ĠD ent", + "Sh ut", + "Ġsk im", + "ad minist", + "Ġhost ilities", + "ĠG ins", + "Ġ6 08", + "Ġm uddy", + "ĠMc Int", + "ĠDec ay", + "5 25", + "Ġconspic uous", + "ĠEx posure", + "Ġresc ind", + "Ġwear able", + "Ġ3 28", + "our met", + "ah s", + "ĠRob ots", + "Ġe clips", + "inst ance", + "ĠRE PORT", + "ĠApp l", + "0 30", + "ĠSk ies", + "01 00", + "Ġfall acy", + "S ocket", + "ĠRece iver", + "Ġsol ves", + "ĠButter fly", + "ĠSho pping", + "ĠFI RE", + "65 4", + "Med ic", + "Ġsing ers", + "ĠNeed less", + "'' ''", + "isher s", + "ĠD ive", + "58 8", + "Ġselect ively", + "Ġcl umsy", + "88 9", + "Ġpurch aser", + "ear ned", + "ard y", + "Ġbenef iting", + "eng lish", + "Ġyield ing", + "ĠP our", + "Ġspin ach", + "Ġdel ve", + "ĠC rom", + "6 10", + "Ġexport ing", + "ĠMA KE", + "Ġ26 3", + "Ġg rop", + "Ġenv oy", + "ĠInqu iry", + "ĠLu igi", + "d ry", + "ĠT uring", + "Thumbnail Image", + "ĠVar iety", + "Ġfac et", + "Ġfl uffy", + "Ġexcerpt s", + "Ġsh orth", + "ĠOl sen", + "CL UD", + "Ġrel iant", + "ĠUN C", + "T our", + "Ġbat hing", + "Comp any", + "Ġglobal ization", + "P red", + "ĠMalf oy", + "Ġh oc", + "j am", + "craft ed", + "ĠBond s", + "ĠKiss inger", + "Eng land", + "Ġorder ly", + "cat entry", + "Ġ26 1", + "Ġexch anging", + "ĠInt ent", + "ĠAmend ments", + "D OM", + "Ġst out", + "³³³³³³³³ ³³³³³³³³", + "ĠAir bus", + "Ġ27 8", + "hy de", + "P oll", + "Item ThumbnailImage", + "Ġlooph oles", + "ĠPill ar", + "Ġexpl or", + "St retch", + "A part", + "Ġun married", + "Lim it", + "ĠTransform ers", + "Ġintellect ually", + "unct ure", + "18 00", + "Ġd arn", + "B razil", + "Ġleft over", + "ber us", + "f red", + "Mine craft", + "3 26", + "ĠForm s", + "Ġproof s", + "ĠDes igned", + "Ġindex es", + "ĠSupp ose", + "EM S", + "ĠL oving", + "ĠBon nie", + "im ating", + "OT US", + "Ġconduct or", + "Ġbehav ed", + "ĠF ren", + "Ġsy nerg", + "Ġmillenn ium", + "Ġcater ing", + "ĠL auder", + "W r", + "ĠY iannopoulos", + "ĠAT F", + "Ġensl aved", + "Ġawaken ed", + "D VD", + "ĠED ITION", + "ĠConc ert", + "ĠChall enger", + "ĠH aku", + "umer ic", + "Ġdep recated", + "ĠSH AR", + "4 12", + "Ġdy stop", + "Ġtremb ling", + "Ġdread ed", + "ĠSp ac", + "p adding", + "Re pl", + "ĠG arrison", + "M ini", + "Ġun paralleled", + "am ar", + "URR ENT", + "w reck", + "c ertain", + "t al", + "ĠC LS", + "app ings", + "Ġsens ed", + "Ġf encing", + "ĠPas o", + "ĠDes k", + "Ġsc off", + "Ġcontem plate", + "ĠL iga", + "l iquid", + "75 7", + "Ġapp rentice", + "ĠUCH IJ", + "5 70", + "ĠTh ousand", + "ĠIll um", + "Ġchampion ed", + "ãĤ Į", + "Ġelect ors", + "Ġ3 98", + "ĠH ancock", + "round ed", + "ĠJ OHN", + "Ġuns atisf", + "Ġqual ifier", + "ĠGad get", + "EN E", + "Ġdead liest", + "ĠPl ants", + "Ġ ions", + "Ġacc ents", + "Ġtwe aking", + "Ġsh aved", + "F REE", + "ĠCh aser", + "Again st", + "9 60", + "Ġmeth amphetamine", + "Ġnormal ized", + "Ġ$ \\", + "ĠPre cision", + "ĠGu am", + "Ġch oked", + "ĠX II", + "ĠCast ing", + "Tor rent", + "Ġscal p", + "ĠJagu ar", + "w it", + "Ġsem ic", + "ix ie", + "ĠG ould", + "Ġconf ines", + "N usra", + "ĠL on", + "ĠJ ugg", + "y cle", + "ĠCod ec", + "E gypt", + "Ġrest rain", + "ĠAl iens", + "Ġch oking", + "ĠD unk", + "ĠBell a", + "ab c", + "Ġsl ang", + "Ġneuro trans", + "s av", + "Ġempower ment", + "â ĨĴ", + "Ġclim bers", + "ĠM im", + "ĠF ra", + "ros se", + "Cap ital", + "ĠCth ulhu", + "Inter face", + "Ġprof icient", + "ĠIN TO", + "Ġ3 18", + "ront al", + "5 80", + "ĠDes pair", + "K enn", + "Ġscrim mage", + "ĠCo at", + "as ions", + "Ġwall paper", + "ĠJ ol", + "Ġresurg ence", + "Ġant iv", + "ĠB alls", + "² ¾", + "Ġbuff ers", + "Ġsub system", + "ĠSt ellar", + "ĠL ung", + "A IDS", + "Ġerad icate", + "Ġblat antly", + "Ġbehav es", + "ĠN un", + "Ġant ics", + "ex port", + "DE V", + "w b", + "Ġph p", + "ĠInteg rity", + "Ġexplore r", + "Ġrev olving", + "auth ored", + "g ans", + "Ġbas k", + "Ġas ynchronous", + "å į", + "TH ING", + "69 8", + "G ene", + "ĠR acer", + "ĠN ico", + "iss ued", + "Ġser mon", + "p ossibly", + "Ġsize of", + "Ġentrepreneur ial", + "ox in", + "ĠMin erva", + "Ġpl atoon", + "n os", + "ri ks", + "A UT", + "ĠAval anche", + "ĠDes c", + "ij 士", + "ĠP oc", + "Ġconf erred", + "Î »", + "Ġpat ched", + "F BI", + "66 2", + "Ġfract ures", + "Ġdetect s", + "Ġded icate", + "Ġconstitu ent", + "Ġcos mos", + "W T", + "Ġswe ats", + "Ġspr ung", + "b ara", + "s olid", + "Ġuns us", + "Ġbul ky", + "ĠPhilipp e", + "ĠFen rir", + "Ġtherap ists", + "ore al", + "^^ ^^", + "Ġtotal ed", + "Ġboo ze", + "ĠR PC", + "Prosecut ors", + "Ġdis eng", + "ĠSh ared", + "Ġmotor cycles", + "Ġinvent ions", + "Ġlett uce", + "ĠMer ge", + "ĠJ C", + "Ġspiritual ity", + "ĠWAR NING", + "Ġunl ucky", + "ĠT ess", + "Ġtong ues", + "ĠD UI", + "T umblr", + "Ġle ans", + "Ġinv aders", + "Ġcan opy", + "ĠHur ricanes", + "ĠB ret", + "ĠAP PLIC", + "id ine", + "ick le", + "Reg arding", + "Ġve ggies", + "Ġe jac", + "ju ven", + "F ish", + "D EM", + "ĠD ino", + "Th row", + "ĠCheck ing", + "be ard", + "( &", + "Ġj ails", + "Ġh r", + "trans fer", + "iv ating", + "Ġfle ets", + "ĠIm ag", + "ĠMc Donnell", + "Ġsnipp et", + "Is a", + "ĠCh att", + "ĠSt ain", + "ĠSet FontSize", + "ĠO y", + "ĠMathemat ics", + "49 4", + "Ġelectro ly", + "ĠG ott", + "ĠBr as", + "B OOK", + "ĠF inger", + "d ump", + "Ġmut ants", + "Ġrent als", + "Ġinter tw", + "Ġc reek", + "ail a", + "Bro ther", + "ĠDisc ord", + "pe e", + "raw ler", + "Ġcar p", + "Ġ27 9", + "ãĤ· ãĥ£", + "rel ations", + "Ġcontr asts", + "Col umn", + "Ġrec onnaissance", + "Ġun know", + "Ġl ooting", + "Ġregul ates", + "Ġopt imum", + "ĠChero kee", + "ĠA ry", + "Lat est", + "Ġroad side", + "Ġd anced", + "ĠUnic orn", + "A cknowled", + "Ġuncont roll", + "ĠM US", + "at io", + "ch ance", + "ha ven", + "VAL UE", + "Ġfavour ites", + "Ġceremon ial", + "b inary", + "pe ed", + "wood s", + "EM P", + "Ġv ascular", + "Ġcontempl ated", + "Ġbar ren", + "ĠL IST", + "Y ellow", + "ospons ors", + "Ġwhisk y", + "ĠM amm", + "ĠDeV os", + "min imum", + "H ung", + "44 2", + "P ic", + "ĠSnap dragon", + "77 6", + "Ġcar ving", + "Ġund ecided", + "Ġadvantage ous", + "Ġpal ms", + "ĠA Q", + "Ġst arch", + "L oop", + "Ġpadd le", + "Ġfl aming", + "ĠHor izons", + "An imation", + "bo ost", + "Ġprob abilities", + "ĠM ish", + "Ġex odus", + "ĠEditor ial", + "Ġfung us", + "Ġdissent ing", + "ĠDel icious", + "rog ram", + "ĠD yn", + "d isk", + "t om", + "Ġfab rics", + "ĠC ove", + "ĠB ans", + "Ġsoft en", + "ĠCON S", + "Ġin eligible", + "Ġestim ating", + "ĠLex ington", + "pract ice", + "of i", + "Ġshe dding", + "ĠN ope", + "Ġbreat hed", + "ĠCorinth ians", + "y ne", + "ek i", + "B ull", + "Ġatt aching", + "reens hots", + "Ġanaly se", + "ĠK appa", + "Ġuns ustainable", + "Ġinter pol", + "ank y", + "he mer", + "Ġprot agonists", + "Ġform atted", + "ĠBry ce", + "ĠAch illes", + "ĠAb edin", + "sh ock", + "Ġb um", + "b os", + "qu a", + "ĠW arn", + "q t", + "ĠDi abetes", + "8 64", + "ĠIn visible", + "Ġvan ish", + "Ġtrans mitting", + "Ġmur ky", + "ĠFe i", + "Ġawa ited", + "ĠJur assic", + "umm ies", + "Ġmen acing", + "g all", + "C ath", + "B uilt", + "ild o", + "ĠV otes", + "Ġon t", + "Ġmun itions", + "ĠFre em", + "ÃŃ n", + "Ġdec ency", + "lo pp", + "ie ved", + "ĠG ord", + "Ġun thinkable", + "ĠNews week", + "Ġ3 21", + "He at", + "Ġpresent er", + "ji ang", + "Ġpl ank", + "ĠAval on", + "Ġben z", + "ĠR out", + "Ġslam ming", + "ĠD ai", + "ou ter", + "ĠCook ie", + "ĠAlic ia", + "ge y", + "Ġvan ity", + "Ġow l", + "á µ", + "t ested", + "ĠAw akens", + "Ġcan v", + "Ġblind ly", + "ĠRid ley", + "ĠEm ails", + "Requ ires", + "ĠSer bian", + "ograp hed", + "if rame", + "eter ia", + "Ġaltern ating", + "qu iet", + "Ġsoc iology", + "ĠUn lock", + "ĠCommun ism", + "Ġo ps", + "Ġatt ribution", + "Ġab duction", + "ĠAb ram", + "Ġsidel ined", + "ĠB OOK", + "Ġref ining", + "ĠFe eling", + "ĠOs lo", + "ĠPru itt", + "r ack", + "ang ible", + "Ġcaut iously", + "ĠM ARK", + "eed s", + "M ouse", + "ĠStep h", + "ĠP air", + "S ab", + "99 7", + "ĠBa al", + "B ec", + "Ġcomm a", + "ĠP all", + "ĠG ael", + "Ġmisunder stand", + "ĠP esh", + "Order able", + "Ġdis mal", + "ĠSh iny", + "% \"", + "Ġreal istically", + "Ġpat io", + "ĠG w", + "ĠVirt ue", + "Ġexhaust ing", + "wh atever", + "oph ys", + "y ip", + "4 18", + "Ad just", + "ĠWa iting", + "ess on", + "ĠMaz da", + "ĠDo zens", + "Ġstream lined", + "Ġincompet ence", + "ĠM eth", + "Ġeth os", + "ON ES", + "Ġincent iv", + "Ġgr itty", + "ĠBut cher", + "Head er", + "Ġexp onential", + "à Ł", + "Ġcorrel ate", + "Ġcons ensual", + "s ounding", + "R ing", + "Orig in", + "Ġcon clusive", + "fe et", + "ac ly", + "ĠF ernandez", + "Buy able", + "Ġd ucks", + "aunt lets", + "Ġel ong", + "Ġ28 6", + "Ġsim ul", + "G as", + "ĠK irst", + "Ġprot r", + "ĠRob o", + "ĠAo E", + "op ol", + "Ġpsych ologically", + "sp in", + "ilater ally", + "ĠCon rad", + "W ave", + "44 1", + "ĠAd vertisement", + "ĠHarm on", + "ĠOri ental", + "is Special", + "Ġpresum ptive", + "Ġw il", + "ĠK ier", + "ne a", + "Ġp pm", + "Ġhar bour", + "ĠW ired", + "comp any", + "Ġcor oner", + "atur days", + "ĠP roud", + "ĠN EXT", + "ĠFl ake", + "val ued", + "ce iver", + "Ġfra ught", + "Ġc asing", + "Ġrun away", + "Ġg in", + "ĠLaure nt", + "ĠHar lem", + "ĠCur iosity", + "qu ished", + "Ġneuro science", + "ĠH ulu", + "Ġborrow er", + "Ġpetition er", + "ĠCo oldown", + "W ARD", + "Ġinv oking", + "conf idence", + "For ward", + "Ġst s", + "pop ulation", + "Delivery Date", + "Fil m", + "ĠC ov", + "quick Ship", + "quickShip Available", + "prim ary", + "isSpecial Orderable", + "inventory Quantity", + "channel Availability", + "BO X", + "ĠMulti player", + "ĠJen ner", + "77 8", + "ĠM d", + "Ġ~ /.", + "M N", + "Ġchild ish", + "Ġantioxid ant", + "ĠChrom ebook", + "Ġ27 4", + "Ġscreen play", + "Ġadvent urous", + "ĠRelations hip", + "respons ive", + "ming ton", + "Ġcorner stone", + "ĠF ey", + "F IR", + "Ġrook ies", + "ĠF eaturing", + "Ġorig inate", + "Ġelectro des", + "ant es", + "Ġscript ures", + "Ġgl ued", + "Ġdiscont ent", + "Ġaff licted", + "lay out", + "B rave", + "Ġm osa", + "ĠQuant ity", + "ĠH ik", + "w inner", + "H ours", + "Ġent ail", + "ĠCell s", + "olog ue", + "Ġv il", + "Ġpre acher", + "Ġdecor ative", + "d ifferent", + "Ġprejud ices", + "ĠSm oking", + "ĠNotting ham", + "so Type", + "Ġrhyth ms", + "ĠAl ph", + "bl ast", + "Ste el", + "ĠDaniel le", + "Ġstr ife", + "Ġrem atch", + "so DeliveryDate", + "ĠF ork", + "t rip", + "ol ulu", + "hes es", + "C G", + "ĠPOLIT ICO", + "ost a", + "ĠDr ift", + "é¾įå ¥", + "é¾įå¥ ij士", + "Ġvet ting", + "ĠJin ping", + "ĠRec ession", + "Min or", + "ĠF raud", + "enf ranch", + "Ġconven ed", + "ĠNA ACP", + "ĠMill ions", + "ĠFarm ing", + "ĠW oo", + "ĠFl are", + "rit o", + "imm igrant", + "Ġvac ancy", + "ĠHE AD", + "ĠV aj", + "eg al", + "ĠV igil", + "Stud y", + "Ġru ining", + "Ġr acks", + "Ġhe ater", + "ĠRand olph", + "ĠBr ush", + "ĠT ir", + "Ø ¨", + "Ġc ov", + "% ]", + "Ġrecount s", + "ĠO PT", + "ĠM elt", + "Ġtr uce", + "Ġcas inos", + "Ġcrus ade", + "Ġcarn age", + "Ġstri pe", + "ĠK yl", + "Text ures", + "Ġ6 98", + "Ġpro clamation", + "Ġgood ies", + "Ġ........ ..", + "pro claimed", + "P olit", + "Ġtop ical", + "Ġspecial ize", + "ĠA min", + "g m", + "Ġanch ored", + "Ġbear ings", + "s ample", + "ĠHigh land", + "ĠAut ism", + "Ġmerc enary", + "Ġinterview er", + "L ER", + "ĠSom ers", + "Ġembry o", + "ĠAss y", + "Ġ28 1", + "ĠEd iting", + "ĠCh osen", + "6 60", + "Ġp ci", + "ĠThunder bolt", + "BI LL", + "Ġchuck led", + "jri wal", + "h of", + "Ġearth ly", + "() {", + "ind ependence", + "Ġdisp ers", + "ĠV endor", + "ĠG areth", + "Ġp als", + "P enn", + "ĠSub mit", + "ic um", + "Th u", + "Ġcl andestine", + "Ġcann ibal", + "ĠCl erk", + "E Stream", + "gal itarian", + "âĻ ¥", + "g ew", + "Ġhor rend", + "ĠL ov", + "ĠRe action", + "ocr in", + "Class ic", + "Ġecho ing", + "Ġdiscl osing", + "ĠIns ight", + "og un", + "ĠInc arn", + "upload s", + "pp erc", + "guy en", + "Ġ19 01", + "ĠB ars", + "68 7", + "Ġb ribes", + "ĠFres no", + "ur at", + "ĠRe ese", + "Ġintr usive", + "Ġgri pping", + "ĠBlue print", + "ĠR asm", + "un ia", + "man aged", + "ĠHeb do", + "Ġ3 45", + "Ġdec oding", + "Ġpo ets", + "Ġj aws", + "ĠF IGHT", + "am eless", + "ĠMead ows", + "ĠHar baugh", + "Inter view", + "ĠH osp", + "ĠB RA", + "Ġdelet ion", + "m ob", + "W alker", + "ĠMoon light", + "ĠJ ed", + "ĠSoph ia", + "Ġus ur", + "Ġfortun ately", + "ĠPut ting", + "ĠF old", + "Ġsan itation", + "Ġpart isans", + "IS ON", + "B ow", + "ĠCON C", + "ĠRed uced", + "ĠS utton", + "Ġtouch screen", + "Ġembry os", + "âĢ¢âĢ¢ âĢ¢âĢ¢", + "ĠK rug", + "com bat", + "ĠPet roleum", + "Ġam d", + "ĠCos mos", + "Ġpresc ribing", + "Ġconform ity", + "ours es", + "Ġplent iful", + "Ġdis illusion", + "ĠEc ology", + "itt al", + "Ġf anc", + "Ġassass inated", + "regn ancy", + "Ġperenn ial", + "ĠBul lets", + "Ġst ale", + "Ġc ached", + "ĠJud ith", + "ĠDise ases", + "All en", + "Ġl as", + "Ġsh ards", + "ĠSu arez", + "ĠFriend ship", + "inter face", + "ĠSupp orters", + "add ons", + "46 2", + "ĠIm ran", + "ĠW im", + "Ġnew found", + "ĠM b", + "An imal", + "Ġd arling", + "and e", + "Ġrh y", + "ĠTw isted", + "pos al", + "yn ski", + "Var ious", + "× ľ", + "ĠK iw", + "uy omi", + "Ġwell being", + "ĠL au", + "an os", + "Ġunm ist", + "Ġmac OS", + "Ġrest room", + "ĠOl iv", + "ĠAir ways", + "Ġtimet able", + "9 80", + "Ġrad ios", + "v oy", + "ias co", + "Ġcloud y", + "ĠDraw ing", + "Any thing", + "Sy ria", + "ĠH ert", + "st aking", + "Ġun checked", + "Ġb razen", + "ĠN RS", + "69 7", + "onom ic", + "est ablish", + "Ġl eng", + "Ġdi agonal", + "ĠF ior", + "L air", + "ĠSt ard", + "Ġdef icient", + "jo ining", + "be am", + "Ġomn ip", + "Ġbl ender", + "Ġsun rise", + "Mo ore", + "ĠF ault", + "ĠCost ume", + "ĠM ub", + "Fl ags", + "an se", + "Ġpay out", + "ĠGovern ors", + "ĠD illon", + "ĠBan ana", + "N ar", + "Ġtra iled", + "Ġimperial ist", + "um ann", + "ats uki", + "4 35", + "ĠRoad s", + "Ġsl ur", + "ĠIde ally", + "Ġt renches", + "C trl", + "Ġmir rored", + "ĠZ el", + "ĠC rest", + "Comp at", + "ĠRoll s", + "sc rib", + "ĠTra ils", + "omet ers", + "w inter", + "Ġimm ortality", + "il ated", + "Ġcontrad icts", + "un iversal", + "ill ions", + "ĠM ama", + "opt im", + "AT URE", + "Ġge o", + "et ter", + "ĠCar lo", + "4 24", + "Ġcanon ical", + "ĠStrongh old", + "n ear", + "Ġperf ume", + "Ġorche stra", + "od iac", + "Ġup he", + "Ġreign ing", + "vers ive", + "Ġc aucuses", + "ĠD EM", + "Ġinsult ed", + "Ġ---- --", + "ĠCr ush", + "Ġroot ing", + "ĠWra ith", + "Ġwh ore", + "Ġto fu", + "C md", + "ĠB ree", + "Ġ$ _", + "Ġr ive", + "ĠAd vertising", + "Ġw att", + "ĠH O", + "Ġpersu asive", + "ĠParam eters", + "Ġobserv ational", + "ĠN CT", + "ĠMo j", + "ĠSal on", + "Ġtr unc", + "Ġexqu isite", + "ĠMar a", + "Ġpo op", + "ĠAN N", + "Ex c", + "ĠWonder ful", + "ĠT aco", + "Ġhome owner", + "ĠSmith sonian", + "orpor ated", + "mm mm", + "Ġlo af", + "ĠYam ato", + "ĠInd o", + "Ġcl inging", + "á s", + "Ġimm utable", + "h ub", + "Or ange", + "Ġfingert ips", + "ĠWood en", + "ĠK idd", + "ĠJ PM", + "ĠDam n", + "C ow", + "c odes", + "48 2", + "Ġiniti ating", + "ĠEl k", + "ĠCut ting", + "Ġabsent ee", + "ĠV ance", + "ĠLil ith", + "G UI", + "Ġobsc ured", + "Ġdwar ves", + "ĠCh op", + "ĠB oko", + "Val ues", + "Ġmult imedia", + "Ġbrew ed", + "Reg ular", + "CRIP TION", + "ĠMort al", + "Ġa pex", + "Ġtravel er", + "Ġbo ils", + "Ġspray ing", + "Rep resent", + "ĠStars hip", + "4 28", + "Ġdisappro val", + "Ġshadow y", + "Ġlament ed", + "ĠRe place", + "ĠFran ç", + "67 7", + "d or", + "Ġunst oppable", + "Ġcoh orts", + "gy n", + "ĠClass ics", + "ĠAm ph", + "Ġsl uggish", + "ĠAdd iction", + "ĠPad res", + "Ġins cription", + "Ġin human", + "min us", + "ĠJere miah", + "at ars", + "Ter ror", + "ĠT os", + "ĠSh arma", + "ast a", + "c atch", + "Ġpl umbing", + "ĠTim bers", + "Sh ar", + "H al", + "ĠO sc", + "Ġcou pling", + "hum ans", + "Ġsp onge", + "Ġid ols", + "ĠSp a", + "ĠAdv ocate", + "ĠBe ats", + "lu a", + "Ġtick ing", + "Ġload er", + "ĠG ron", + "8 10", + "Ġstim ulated", + "Ġside bar", + "ĠManufact urer", + "ore And", + "19 73", + "Ġpra ises", + "ĠFl ores", + "dis able", + "ĠElect rical", + "ra ise", + "E th", + "Ġmigr ated", + "Ġlect urer", + "K ids", + "ĠCa vern", + "Ġk ettle", + "Ġgly c", + "ĠMand ela", + "ĠF ully", + "å§ «", + "FIN EST", + "Ġsquee zing", + "ĠRy der", + "amp oo", + "oreAnd Online", + "Inst oreAndOnline", + "Buyable InstoreAndOnline", + "Ġcommem orate", + "ĠRamp age", + "Aust in", + "ĠSh roud", + "ĠRu ins", + "9 15", + "ĠK H", + "Ġwater front", + "ĠE SC", + "b aby", + "ĠC out", + "ĠEm blem", + "Ġequival ents", + "49 2", + "Un ique", + "ĠNiet zsche", + "brow ser", + "Ġim itation", + "ĠWere wolf", + "ĠKir in", + "ac as", + "' ,\"", + "Ġà ¾", + "Review ed", + "Ġc unt", + "Ġvo ic", + "ĠLen ovo", + "Ġbond ed", + "48 1", + "Ġinhib itors", + "Ġendeav ors", + "ĠHav ana", + "ĠSt out", + "ĠJ olly", + "A ctor", + "*/ (", + "Ġoccur rences", + "ĠT ens", + "Incre ased", + "ĠACT ION", + "Ġ ãĢĮ", + "ĠRank ings", + "ĠB reat", + "Ġ30 9", + "D ou", + "Ġimpact ing", + "ĠDuc hess", + "pre fix", + "Q B", + "Ġsummon ing", + "Ġbest owed", + "ĠKe pler", + "ĠPOW ER", + "c ube", + "ĠK its", + "ĠG rip", + "Ġop ium", + "Ġrep utable", + "t oc", + "ich ael", + "ĠR ipple", + "Ġcaf é", + "ĠZ oom", + "ĠBur ma", + "Ġwa ive", + "Ġst alls", + "Ġdem eanor", + "inc erity", + "Ġfluor ide", + "ĠSH OULD", + "Par is", + "Ġlong ing", + "Ġpl at", + "Ġgross ly", + "Ġbull s", + "Ġshowc asing", + "ex pected", + "ĠG addafi", + "engine ering", + "Re peat", + "ĠK ut", + "Ġconce ivable", + "Ġtrim med", + "osc ope", + "ĠCand idate", + "ĠT ears", + "rol og", + "Lew is", + "S UP", + "Ġroad map", + "Ġsal iva", + "Ġtrump et", + "Jim my", + "Ġmirac ulous", + "Ġcolon ization", + "Ġam put", + "ĠGN OME", + "ate ch", + "D ifferent", + "ĠE LE", + "ĠGovern ments", + "ĠA head", + "ãħĭ ãħĭ", + "word press", + "L IB", + "ĠIn clude", + "ĠDor othy", + "0 45", + "ĠColomb ian", + "Ġle ased", + "88 4", + "Ġde grading", + "ĠDa isy", + "i ations", + "Ġbapt ized", + "Ġsurn ame", + "co x", + "Ġblink ed", + "ãĥ ¢", + "Ġpoll en", + "Ġder mat", + "Ġre gex", + "ĠNich olson", + "ĠE ater", + "ç ľ", + "rad or", + "Ġnarrow er", + "Ġhur ricanes", + "Ġhalluc inations", + "r idden", + "ISS ION", + "ĠFire fly", + "Ġattain ment", + "Ġnom inate", + "Ġav ocado", + "ĠM eredith", + "Ġt s", + "Ġreve rence", + "Ġe uph", + "Ġcr ates", + "ĠT EXT", + "Ġ4 43", + "Ġ3 19", + "J SON", + "iqu ette", + "Ġshort stop", + "ic key", + "Ġpro pelled", + "Ġap i", + "ĠTh ieves", + "77 9", + "Ġovers aw", + "Ġcol i", + "ĠNic ola", + "Ġover cl", + "ik awa", + "ĠC yr", + "Ġ38 4", + "78 9", + "ĠAll ows", + "10 27", + "Det roit", + "TR Y", + "set up", + "ĠSocial ism", + "Sov iet", + "s usp", + "ĠAP R", + "ĠShut down", + "Ġal uminium", + "zb ek", + "ĠL over", + "GGGG GGGG", + "Ġdemocr acies", + "Ġ19 08", + "ĠMer rill", + "ĠFranco is", + "gd ala", + "Ġtraff ickers", + "ĠT il", + "ĠGo at", + "Ġsp ed", + "ĠRes erv", + "Ġpro d", + "55 2", + "Ġc ac", + "ĠUn iv", + "ĠSch we", + "Ġsw irling", + "ĠWild erness", + "ĠEgg s", + "Ġsadd ened", + "Ġarch aic", + "H yd", + "Ġexcess ively", + "B RE", + "Ġaer ospace", + "ĠVo ices", + "Cra ig", + "Ġign ited", + "In itially", + "ĠMc A", + "Ġhand set", + "Ġreform ing", + "Ġfrust rations", + "ĠDead pool", + "ĠBel ichick", + "ract or", + "ĠRagnar ok", + "ĠD rupal", + "ĠApp roximately", + "19 20", + "ĠHub ble", + "arm or", + "ĠSar as", + "ĠJon as", + "Ġnostalg ic", + "Ġfeas ibility", + "Sah aran", + "Ġorb iting", + "Ġ9 70", + "R u", + "Ġsh in", + "ĠInvestig ators", + "Ġinconsist encies", + "ĠP AN", + "B G", + "Ġgraz ing", + "Ġdetect ors", + "ĠStart up", + "ĠFun ny", + "ĠNa omi", + "Consider ing", + "Ġh og", + "ut f", + "ce mic", + "Ġfort ified", + "ĠFun ctions", + "Ġcod ec", + "nut rition", + "H at", + "\" !", + "micro soft", + "55 8", + "ĠTh in", + "ĠA CE", + "Al ias", + "ĠO PS", + "p apers", + "P K", + "ãĢ İ", + "Ġimpro bable", + "N orthern", + "equ al", + "Ġlook out", + "Ġty res", + "ĠMod ified", + "ĠK op", + "Abs olutely", + "Ġbuild up", + "sil ver", + "Ġaud i", + "Ġgro tesque", + "ĠSab er", + "ĠPres byter", + "ON Y", + "Ġglac iers", + "ĠSho als", + "ĠK ass", + "ĠH RC", + "ĠNic ol", + "ĠL unch", + "ĠF oss", + "âĸ Ĵ", + "AD RA", + "ĠOne Plus", + "o ing", + "ground s", + "Ġincident al", + "Ġdatas ets", + "68 9", + "ĠClarks on", + "Ġassemb ling", + "ĠCorrect ions", + "Ġdrink ers", + "Ġqual ifiers", + "Ġle ash", + "Ġunf ounded", + "ĠH undred", + "Ġkick off", + "T i", + "Ġrecon cil", + "ĠGr ants", + "ĠCompl iance", + "ĠDexter ity", + "Ġ19 06", + "w arn", + "D allas", + "Max imum", + "n ard", + "av ia", + "be aut", + "ens itivity", + "tr ace", + "Ġpione ers", + "ĠF ract", + "ãĢ ı", + "Ġpre cept", + "Ġgloss y", + "ĠI EEE", + "Ac ross", + "Ġ6 80", + "S leep", + "che on", + "Ġsatir ical", + "ĠMin otaur", + "ĠCla ude", + "Ġr é", + "ape go", + "Ġcar rot", + "ĠSem in", + "ino a", + "Ġz o", + "Ind ependent", + "Ġdiagn oses", + "ĠC ue", + "M AR", + "Ġrend ition", + "ĠK ik", + "Ġpath ology", + "Ġselect s", + "Link edIn", + "Ġass ay", + "ĠD res", + "Ġtext ual", + "post ed", + "IT AL", + "ĠM aul", + "N eal", + "Ġinter connected", + "Ġerr atic", + "ĠVir us", + "Ġ5 30", + "Ġenvironmental ists", + "ĠP helps", + "Ġeng agements", + "ĠIN ST", + "Ġeconom ical", + "nox ious", + "Ġg earing", + "izz y", + "Ġfavor ably", + "ĠMcG ill", + "T erm", + "Ġh anged", + "Ġball park", + "ĠRe yes", + "Ġbe ware", + "ĠP sal", + "ĠMass acre", + "q i", + "Ġin accessible", + "acly sm", + "Ġfr ay", + "ill ac", + "Ġbitter ly", + "ĠCert ification", + "Mich igan", + "Ġir respective", + "al ore", + "Em pty", + "Ġendorse ments", + "Ġund et", + "f g", + "equ ipped", + "Ġmerc iless", + "ĠC ust", + "Ġimm ature", + "Ġvou cher", + "ĠBlack well", + "Ñ ı", + "h awk", + "dis ciplinary", + "ile e", + "ĠMak oto", + "ĠD ude", + "ãĥĩ ãĤ£", + "Y ears", + "Ġin ver", + "Ġsh aman", + "ĠY ong", + "ip el", + "ell en", + "ĠCath y", + "br ids", + "Ġs arc", + "65 1", + "N ear", + "Ġground work", + "Ġam az", + "Ġ4 15", + "ĠHunting ton", + "hew s", + "ĠB ung", + "Ġarbit rarily", + "ĠW it", + "ĠAl berto", + "Ġdis qualified", + "best os", + "46 1", + "Ġp c", + "Ġ28 4", + "ro bat", + "Rob in", + "Ġh ugs", + "ĠTrans ition", + "ĠOcc asionally", + "Ġ3 26", + "ĠWh ilst", + "ĠLe y", + "Ġspaces hip", + "cs v", + "Ġun successfully", + "ĠA u", + "le ck", + "ĠWing ed", + "ĠGrizz lies", + ". �", + "Ġne arer", + "ĠSorce ress", + "ĠInd igo", + "El se", + "8 40", + "let es", + "Co ach", + "Ġup bringing", + "ĠK es", + "Ġseparat ist", + "Ġrac ists", + "Ġch ained", + "Ġabst inence", + "lear ning", + "Ġrein stated", + "Ġsymm etry", + "Ġremind ers", + "ĠChe vy", + "Ġm ont", + "Ġexempl ary", + "ĠT OR", + "Z X", + "Ġqual itative", + "ĠSt amp", + "ĠSav annah", + "ĠRoss i", + "Ġp aed", + "Ġdispens aries", + "ĠWall s", + "ĠCh ronic", + "Ġcompliment ary", + "ĠBeir ut", + "Ġ+ ---", + "igs list", + "Ġcrypt ographic", + "mas ters", + "ĠCap itals", + "Ġmax imal", + "Ġent ropy", + "Point s", + "Ġcombat ants", + "l ip", + "ĠGl ob", + "ĠB MC", + "ph ase", + "th ank", + "HT TP", + "Ġcomm uter", + "Ġ\\( \\", + ".. /", + "ĠReg ener", + "ĠDO I", + "ĠActiv ision", + "Ġsl it", + "os al", + "RE M", + "Ġch ants", + "Y u", + "Ke ys", + "Bre xit", + "ĠFor ced", + "Ari zona", + "Ġsquad ron", + "IS O", + "ĠMal one", + "Ġ3 38", + "Ġcontrast ing", + "Ġt idal", + "Ġlib el", + "Ġimpl anted", + "Ġupro ar", + "ĠC ater", + "Ġpropos itions", + "M anchester", + "ĠEuro s", + "it amin", + "G il", + "ĠEl ven", + "ĠSe ek", + "ĠB ai", + "Ġredevelop ment", + "ĠTown s", + "ĠL ub", + "! \",", + "al on", + "K rist", + "Ġmeas urable", + "Ġimagin able", + "Ġapost les", + "Y N", + "7 60", + "Ġster oid", + "Ġspecific ity", + "ĠL ocated", + "ĠBeck er", + "ĠE du", + "ĠDiet ary", + "uts ch", + "ĠMar ilyn", + "Ġbl ister", + "ĠM EP", + "ĠK oz", + "ĠC MS", + "y ahoo", + "ĠCar ney", + "Ġbo asting", + "ĠC aleb", + "By te", + "read s", + "ad en", + "Pro blem", + "ĠWood ward", + "S we", + "S up", + "ĠK GB", + "Set up", + "Ġtac it", + "Ġret ribution", + "Ġd ues", + "ĠM ü", + ". ?", + "ä¸ Ń", + "p ots", + "Ġcame o", + "ĠP AL", + "educ ation", + "A my", + "like ly", + "g ling", + "Ġconstitution ally", + "ĠHam m", + "ĠSpe ak", + "Ġwid gets", + "br ate", + "Ġcra ppy", + "ĠI ter", + "Ġanticip ating", + "ĠB out", + "P ixel", + "ĠY ep", + "ĠLaur ie", + "Ġh ut", + "Ġbullet in", + "ĠSal vation", + "Ġch ats", + "ear able", + "Honest ly", + "AL TH", + "onse qu", + "c ult", + "isco very", + "ovy ch", + "Ġse lves", + "ĠSat oshi", + "S ounds", + "Ġconver gence", + "ĠRosen berg", + "19 74", + "Ġnas al", + "Ġfull est", + "Ġfer ocious", + "x us", + "ist e", + "AM S", + "Ġlobb ied", + "Ġso othing", + "ĠGun n", + "t oday", + "0 24", + "Ġinspir ational", + "ĠN BN", + "p b", + "g ewater", + "or ah", + "all owed", + "ĠCol iseum", + "Ġspecial izing", + "Ġinsane ly", + "ĠT ape", + "del ay", + "Ġt arn", + "ĠP ound", + "Ġmel anch", + "Ġdeploy ments", + "il and", + "Ġless en", + "Ġfur ry", + "ĠUE FA", + "Ġblood shed", + "ĠMe ier", + "ither ing", + "Ġhe irs", + "ĠJ aw", + "ax ter", + "ĠPublic ations", + "Ġal ters", + "int ention", + "ĠWinc hester", + "d etermination", + "ĠLif etime", + "th in", + "Mon ster", + "7 80", + "Ġapprox imation", + "Ġsuper markets", + "ĠSecond s", + "or os", + "h uge", + "Ġb ribe", + "ĠLIM ITED", + "un ed", + "Ġmis interpret", + "ĠIn jury", + "Ġ3 67", + "Ġthreshold s", + "ĠCarn ival", + "Ġgastro intestinal", + "Ġguid eline", + "Ġde ceived", + "f eatures", + "Ġpurported ly", + "ĠRon nie", + "ĠNew t", + "Ġsp acious", + "as us", + "Ġsuperhero es", + "ĠCyn thia", + "le gged", + "k amp", + "ch io", + "Ġth umbnail", + "ĠShir ley", + "ill ation", + "Ġshe ds", + "ĠZ y", + "E PA", + "Ġdam s", + "Ġy awn", + "n ah", + "ĠPe ggy", + "ĠE rie", + "ĠJu ventus", + "ĠF ountain", + "r x", + "don ald", + "al bum", + "ĠComp rehensive", + "Ġc aching", + "ĠU z", + "ulner ability", + "ĠPrinc iple", + "ĠJ ian", + "ing ers", + "cast s", + "ĠOs iris", + "ch art", + "t ile", + "ĠTiff any", + "ĠPatt on", + "ĠWh ip", + "Ġovers ized", + "J e", + "ĠCind erella", + "ĠB orders", + "ĠDa esh", + "M ah", + "Ġdog ma", + "Ġcommun ists", + "v u", + "Coun cil", + "Ġfresh water", + "Ġw ounding", + "Ġdeb acle", + "Ġyoung ster", + "Ġthread ed", + "ĠB ots", + "ĠSav ings", + "ãģ Ĥ", + "ol ing", + "oh o", + "Ġillum ination", + "M RI", + "Ġlo osen", + "tr ump", + "ag ency", + "ur ion", + "Ġmoment arily", + "ĠCh un", + "ĠBud apest", + "ĠAl ley", + "D isk", + "Ġaston ished", + "ĠCon quer", + "ĠAccount ing", + "h aving", + "ĠWe in", + "ĠAl right", + "Ġrev olver", + "Ġdel usion", + "Ġrelic s", + "Ġad herent", + "qu ant", + "Ġhand made", + "or io", + "Ġcomb ating", + "c oded", + "Ġquad ru", + "re th", + "N ik", + "ĠTrib al", + "ĠMyster ious", + "Ġin hal", + "ĠWin ning", + "ĠClass ification", + "ch anged", + "Ġun ab", + "Ġsc orn", + "icip ated", + "w l", + "ond uctor", + "Ġrein forcing", + "ĠChild hood", + "an ova", + "Ġadventure r", + "Ġdoctor al", + "ĠStrateg ies", + "Ġengulf ed", + "ĠEnc ounter", + "Ġl ashes", + "Crit ical", + "ric ular", + "ĠU TF", + "oci ation", + "check ing", + "ĠConsult ing", + "Run time", + "per iod", + "ĠAs gard", + "Ġdist illed", + "ĠPas adena", + "ĠD ying", + "ĠCOUN TY", + "Ġgran ite", + "Ġsm ack", + "Ġparach ute", + "ĠS UR", + "Virgin ia", + "ĠF urious", + "78 7", + "ĠO kin", + "Ġcam el", + "ĠM bps", + "19 72", + "ĠCh ao", + "ĠC yan", + "j oice", + "ef er", + "ĠW rap", + "ĠDeb ate", + "S eg", + "Ġfore arm", + "ĠIgn ore", + "Ġtim estamp", + "Ġprob ing", + "ĠNo on", + "ĠGra il", + "f en", + "Ġdorm ant", + "ĠFirst ly", + "ĠE ighth", + "ĠH UN", + "ĠDes ire", + "or as", + "Girl s", + "ĠDes mond", + "z ar", + "am ines", + "O AD", + "exec ute", + "Ġbo obs", + "ĠAT L", + "_ (", + "Chel sea", + "Ġmasturb ation", + "ĠCo C", + "Ġdestroy er", + "ĠCh omsky", + "Ġsc atter", + "ĠAss ets", + "79 6", + "ĠC argo", + "Ġrecept ive", + "ĠSc ope", + "Ġmarket ers", + "Ġlaun chers", + "Ġax le", + "ĠSE A", + "se q", + "ĠM off", + "f inding", + "ĠGib bs", + "Georg ia", + "extreme ly", + "N J", + "Ġlab orers", + "st als", + "Ġmed iation", + "ĠH edge", + "at own", + "Ġi od", + "des pite", + "v ill", + "J ane", + "ex istence", + "Ġcoinc ided", + "ĠUt ilities", + "ĠChe ap", + "Ġlog istical", + "Ġcul mination", + "ĠNic otine", + "p ak", + "F older", + "Ġrod ents", + "st uff", + "Ġlaw fully", + "Ġreper to", + "io ch", + "j j", + "Dial ogue", + "HH HH", + "lic tion", + "Look s", + "Ġ29 7", + "Ġtur rets", + "ĠAb andon", + "Ġinc ess", + "ĠTraff ord", + "Ġcur led", + "Ġprefer ring", + "Ġprivat ization", + "Ġir resist", + "ĠP anda", + "ĠSh ake", + "ĠMc Gr", + "ãĥ Ħ", + "und ers", + "Ġdiscrim inated", + "Ġbart ender", + "I LE", + "Atl antic", + "Ġprop ensity", + "ĠW iz", + "ĠG im", + "con ference", + "Ġrein forces", + "G h", + "w agon", + "Ġe erie", + "F al", + "Ġhug ged", + "rac ist", + "R IC", + "F u", + "Ġf iller", + "ĠSt ub", + "Ġeng raved", + "ĠWrest le", + "Ġimagin ative", + "ĠPe er", + "ĠFact ors", + "an us", + "ĠDrac ula", + "mon itor", + "Ġrou ters", + "ib ia", + "ĠBoo lean", + "end ale", + "ĠSl aughter", + "ĠSh ack", + "R FC", + "ĠSpiel berg", + "S ax", + "ĠPH OTO", + "ĠCl over", + "ĠR ae", + "Dep ending", + "ĠMem or", + "ar am", + "Ġpier ced", + "Ġcur tains", + "v ale", + "ĠInqu isition", + "ĠP oke", + "Ġforecast ing", + "Ġcompl ains", + "S ense", + "ĠHer mes", + "isc overed", + "Ġb ible", + "ĠMor ph", + "Ġg erm", + "78 5", + "D ON", + "Ġcon gen", + "Ġcr ane", + "ĠD PR", + "Ġrespect fully", + "R oom", + "ĠN aw", + "ĠDal ai", + "re ason", + "ĠAng us", + "Educ ation", + "ĠTitan ic", + "Ë ľ", + "Ġo val", + "un ited", + "Ġthird s", + "Ġmoist ur", + "ĠC PC", + "M iami", + "Ġtent acles", + "ĠPol aris", + "ex c", + "ex clusive", + "ĠPra irie", + "Ġcol ossal", + "ĠBl end", + "sur prisingly", + "ÃŃ s", + "Ġindo ctr", + "Ġbas al", + "ĠMP EG", + "und o", + "Spl it", + "Develop ment", + "Ġlan tern", + "19 71", + "Ġprov ocation", + "Ġang uish", + "ĠB ind", + "ĠLe ia", + "duc ers", + "ipp y", + "conserv ancy", + "Ġinitial ize", + "ĠTw ice", + "ĠSu k", + "Ġpred ic", + "Ġdi ploma", + "Ġsoc iop", + "Ing redients", + "Ġhamm ered", + "ĠIr ma", + "Q aida", + "Ġglim ps", + "ĠB ian", + "Ġst acking", + "Ġf end", + "gov track", + "Ġun n", + "dem ocratic", + "ig ree", + "Ġ5 80", + "Ġ29 4", + "Ġstraw berry", + "ID ER", + "Ġcher ished", + "ĠH ots", + "Ġinfer red", + "Ġ8 08", + "ĠS ocrates", + "O regon", + "ĠR oses", + "ĠFO IA", + "Ġins ensitive", + "Ġ40 8", + "Recomm end", + "ĠSh ine", + "Ġpain staking", + "UG E", + "ĠHell er", + "ĠEnter prises", + "I OR", + "ad j", + "N RS", + "L G", + "Ġalien ated", + "Ġacknowled gement", + "ĠA UD", + "ĠRen eg", + "Ġvou chers", + "Ġ9 60", + "Ġm oot", + "ĠDim ensions", + "Ġc abbage", + "B right", + "g at", + "ĠK lu", + "Ġlat ent", + "Ġz e", + "ĠM eng", + "Ġdis perse", + "Ġpand emonium", + "H Q", + "Ġvirt uous", + "ĠLoc ations", + "ee per", + "prov ided", + "Ġse ams", + "ĠW T", + "iz o", + "PR OV", + "Ġtit anium", + "Ġrecol lection", + "Ġcr an", + "Ġ7 80", + "ĠN F", + "49 1", + "64 2", + "p acking", + "59 8", + "text ure", + "Sp ider", + "fre edom", + "cipl ed", + "ĠTAM ADRA", + "âĻ ¦", + "aut hent", + "ĠW ANT", + "r ified", + "Ġr ites", + "Ġuter us", + "k iss", + "Ġâī ¤", + "Ġsk illet", + "Ġdis enfranch", + "ĠGa al", + "Comp an", + "Ġage ing", + "gu ide", + "B alt", + "Ġiter ator", + "Ġdiscretion ary", + "t ips", + "Ġprim ates", + "ĠTechn ique", + "ĠPay ments", + "az el", + "ĠR OCK", + "stant ial", + "0 60", + "Ġd mg", + "ĠJack ets", + "ĠPlay off", + "Ġnurs ery", + "ĠSy mb", + "art on", + "Ġannex ation", + "Color ado", + "Ġco ils", + "ĠSh oes", + "âĦ¢ :", + "ĠRo z", + "COM PLE", + "ĠEve rest", + "ĠTri umph", + "J oy", + "G rid", + "à ¼", + "process or", + "ĠPros per", + "ĠSever us", + "ĠSelect ed", + "r g", + "ĠTay yip", + "St ra", + "Ġski ing", + "Ġ? )", + "Ġpe g", + "Tes la", + "Ġtime frame", + "Ġmaster mind", + "ĠN B", + "scient ific", + "ĠSh it", + "gener ic", + "IN TER", + "N UM", + "Ġst roll", + "ĠEn ix", + "ĠM MR", + "ĠE MS", + "m ovie", + "Ĥ ª", + "Ġminim izing", + "idd ling", + "Ġilleg itimate", + "Ġprot otyp", + "Ġpremature ly", + "Ġmanual s", + "obb ies", + "ĠCass idy", + "D EC", + "des ktop", + "Ġaer os", + "Ġscreen ings", + "Ġdeb ilitating", + "ĠGr ind", + "nature conservancy", + "Ġf ades", + "ter mination", + "assets adobe", + "F actor", + "Ġdefinitive ly", + "P oké", + "ap ult", + "ĠLaf ayette", + "C orn", + "ĠCor al", + "Ġstagn ant", + "T ue", + "Ġdissatisf action", + "G ender", + "Ġkid neys", + "ĠG ow", + "ĠDef eat", + "ĠAsh ton", + "Ġcart els", + "Ġfore closure", + "ĠExpl ore", + "stre ngth", + "ot in", + "Ġveterin arian", + "Ġf umble", + "Ġpar ap", + "ĠSt rait", + "r ils", + "Ġpr ick", + "ĠBerm uda", + "ĠAm munition", + "skin ned", + "Ġab ound", + "ĠB raz", + "Ġshar per", + "ĠAsc ension", + "Ġ9 78", + "Ġpreview s", + "Ġcommun ion", + "ĠX Y", + "Ġph ony", + "Ġnewcom er", + "Ġ3 32", + ".\" ,\"", + "Ġredist ribution", + "Prot ect", + "ĠSo f", + "K al", + "Ġlip stick", + "w orst", + "Ġtang led", + "Ġretrospect ive", + "int eger", + "Ġvolunte ering", + "Ġ19 07", + "Ġ --------------------", + "ic hen", + "Ġunve iling", + "Ġsen seless", + "Ġfisher ies", + "\\ -", + "Ġh inges", + "Ġcalcul us", + "My th", + "Ġund efeated", + "Ġoptim izations", + "Ġdep ress", + "Ġbill board", + "ĠY ad", + "ĠPy ramid", + "Is n", + "I de", + "Ġleg ion", + "ĠK ramer", + "ent anyl", + "Ġpenet rating", + "ĠHaw th", + "ĠPR ODUCT", + "ĠGer ard", + "ĠP act", + "ĠIn cluding", + "ĠEl ias", + "ĠEl aine", + "vis ual", + "Ġhum ming", + "Ġcond esc", + "ĠF asc", + "ä¸ Ĭ", + "Ġe galitarian", + "Ġdev s", + "ĠD ahl", + "O ps", + "D H", + "ĠB ounce", + "id ated", + "ald o", + "Ġrepublic an", + "Ġh amb", + "ĠS ett", + "ograph ies", + "CH APTER", + "Ġtrans sexual", + "Ġsky rocket", + "ans wer", + "Ġmark up", + "Ø ª", + "Ġhero ine", + "Comp are", + "ĠT av", + "Be ast", + "Ġsuccess ors", + "Ġna ïve", + "ĠBuck ley", + "st ress", + "me at", + "Ġdownload able", + "Ġindex ed", + "Ġsc aff", + "ĠL ump", + "ĠHom o", + "Stud io", + "In sp", + "Ġr acked", + "far ious", + "ĠPet ty", + "Ex ternal", + "Ġ19 09", + "W ars", + "com mit", + "put ers", + "Ġun ob", + "ĠEr r", + "ĠE G", + "ĠAl am", + "ĠSiber ia", + "ĠAtmosp heric", + "IS TER", + "ĠSatan ic", + "trans lation", + "ĠL oud", + "tra umatic", + "l ique", + "Ġreson ate", + "ĠWel ch", + "Ġspark ing", + "ĠT OM", + "t one", + "Ġout l", + "Ġhandc uffed", + "ĠSer ie", + "8 01", + "Ġland marks", + "ĠRee ves", + "Ġsoft ened", + "Ġdazz ling", + "ĠW anted", + "month s", + "Mag ikarp", + "Ġunt reated", + "ĠBed ford", + "M i", + "ĠDynam o", + "O re", + "79 5", + "Ġwrong ful", + "Ġl ured", + "Ġcort isol", + "Ġve x", + "d rawn", + "ile t", + "Download ha", + "ĠF action", + "Ġlab yrinth", + "Ġhij acked", + "w aters", + "er ick", + "Ġsuper iors", + "ĠRow ling", + "ĠGu inness", + "Ġt d", + "99 2", + "Ġune arthed", + "Ġcentr if", + "Ġsham eless", + "P od", + "ĠF ib", + "Ġ icing", + "Ġpredict or", + "Ġ29 2", + "fore station", + "con struct", + "C and", + "@ #", + "Ġag itated", + "Ġre pr", + "OV A", + "Ġkn itting", + "ĠLim a", + "Ġf odder", + "68 4", + "ĠPerson a", + "k l", + "7 01", + "Ġbreak up", + "á ¸", + "Ġapp alled", + "Ġantidepress ants", + "ĠSus sex", + "Har ris", + "ĠTher mal", + "ee ee", + "U pload", + "Ġg ulf", + "Ġdoor step", + "ĠSh ank", + "L U", + "ĠM EN", + "ĠP ond", + "s orry", + "Ġmis fortune", + "n ance", + "Ġb ona", + "M ut", + "Ġde graded", + "ĠL OG", + "ĠN ess", + "an imal", + "Ġa version", + "und own", + "Ġsupplement ed", + "ĠC ups", + "Ġ50 4", + "Ġdep rive", + "ĠSpark le", + "Å Ĥ", + "ĠMed itation", + "auth ors", + "ĠSab an", + "ĠN aked", + "air d", + "ĠMand arin", + "ĠScript ures", + "ĠPerson nel", + "ĠMahar ashtra", + "Ġ19 03", + "ĠP ai", + "ĠMir age", + "omb at", + "Access ory", + "Ġfrag mented", + "T ogether", + "Ġbelie vable", + "ĠGl adiator", + "al igned", + "ĠSl ug", + "M AT", + "Ġconvert ible", + "ĠBour bon", + "amer on", + "ĠRe hab", + "nt ax", + "Ġpowd ered", + "pill ar", + "Ġsm oker", + "ĠMans on", + "ĠB F", + "5 11", + "ĠGood ell", + "ĠD AR", + "m ud", + "g art", + "Ġob edient", + "ĠTrans mission", + "ĠDon ation", + "8 80", + "Ġbother ing", + "Material s", + "ãĤ ±", + "dest roy", + "Ġfore going", + "Ġanarch ism", + "ĠK ry", + "ice ps", + "Ġl ittered", + "ĠSch iff", + "Ġanecd otal", + "un its", + "Ġf ian", + "ĠSt im", + "ĠS OME", + "ĠInv aders", + "Ġbehaviour al", + "ĠVent ures", + "Ġsub lime", + "Ġfru ition", + "ĠPen alty", + "Ġcorros ion", + "¶ ħ", + "Ġlik ened", + "Ġbesie ged", + "ween ey", + "ĠCre ep", + "Ġlinem en", + "mult i", + "ic ably", + "ud der", + "Ġvital ity", + "Ġshort fall", + "ĠP ants", + "ap ist", + "H idden", + "ĠDro ps", + "med ical", + "Ġpron unciation", + "ĠN RL", + "Ġinsight ful", + "J V", + "ĠBe ard", + "ĠCh ou", + "Ġchar ms", + "Ġb ins", + "Ġamb assadors", + "ĠS aturdays", + "Ġinhib itor", + "ĠFr anch", + "6 01", + "', '", + "ĠCon or", + "art ney", + "ĠX peria", + "g rave", + "be es", + "ĠProtest ants", + "Ġso aking", + "ĠM andal", + "Ġph ased", + "Ġ6 60", + "Ġsc ams", + "Ġbuzz ing", + "ĠItal ians", + "ĠLoren zo", + "ĠJ A", + "Ġhes itated", + "Ġcl iffs", + "ĠG OT", + "ingu ishable", + "Ġk o", + "Ġinter ruption", + "Z ip", + "Lear ning", + "Ġundersc ores", + "ĠBl ink", + "K u", + "57 9", + "ĠAut ob", + "I RE", + "Ġwater ing", + "Ġpast ry", + "8 20", + "Ġvision ary", + "ĠTempl ar", + "awa ited", + "Ġpist on", + "Ġant id", + "current ly", + "Ġp ard", + "Ġw aging", + "Ġnob ility", + "ĠY us", + "Ġinject ing", + "f aith", + "ĠP ASS", + "å º", + "Ġret ake", + "ĠPR OC", + "Ġcat hedral", + "b ash", + "Ġwrest lers", + "Ġpartner ing", + "Ġn oses", + "Ġ3 58", + "Trans form", + "am en", + "Ġb outs", + "ĠId eal", + "ĠConstant in", + "Ġse p", + "ĠMon arch", + "att en", + "ĠPe oples", + "mod ified", + "Ġmor atorium", + "Ġpen chant", + "Ġoffensive ly", + "Ġprox ies", + "ok ane", + "ĠTaiwan ese", + "ĠP oo", + "ĠH OME", + "us ional", + "Ġver bs", + "ĠO man", + "vis ory", + "Ġpersu asion", + "Ġmult it", + "Ġsc issors", + "G ay", + "ow ay", + "oph ysical", + "l us", + "gn u", + "Ġap ocalyptic", + "Ġabsurd ity", + "Ġplay book", + "Ġautobi ography", + "I UM", + "Ġsne aking", + "ĠSim ulation", + "pp s", + "ell ery", + "Plan et", + "Ġright fully", + "Ġn iece", + "ĠN EC", + "ĠIP O", + "ĠDis closure", + "lean or", + "ous y", + "ST ER", + "Ġ28 2", + "Cru z", + "Ch all", + "64 3", + "ĠSurv ive", + "ĠF atal", + "ĠAm id", + "ap o", + "We apons", + "D EN", + "7 70", + "ĠGreen wald", + "Ġlin en", + "al os", + "Ġpollut ants", + "ĠPCI e", + "k at", + "Ġp aw", + "ĠK raft", + "C hem", + "ĠTermin ator", + "Ġre incarn", + "Ġ] [", + "ĠSe eds", + "Ġsilhou ette", + "ĠSt ores", + "Ġgro oming", + "ĠD irection", + "ĠIs abel", + "ĠBr idges", + "ðŁ ij", + "E ED", + "ĠM orsi", + "Ġval ves", + "ĠRank ed", + "ĠPh arma", + "ĠOrgan izations", + "Ġpenet rated", + "ĠRod ham", + "ĠProt oss", + "Ġove rest", + "Ġex asper", + "ĠT J", + "Ġ 000000", + "Ġtrick le", + "Ġbour bon", + "WH O", + "Ġw retched", + "Ġmicrosc opic", + "Ġcheck list", + "Ġad orned", + "R oyal", + "Ad minist", + "ĠRet irement", + "ĠHig hest", + "We ather", + "ile ge", + "Ġincre ments", + "ĠC osponsors", + "Ġmas se", + "ĠS inn", + "r f", + "Ġh ordes", + "as sembly", + "75 4", + "ĠNat asha", + "ĠTY PE", + "ĠGEN ERAL", + "Ġarr anging", + "Ġ40 7", + "l ator", + "Ġg lean", + "Ġdisc redited", + "Ġclin icians", + "UN E", + "Ġachie ves", + "ĠEm erson", + "com plex", + "= [", + "Ġprincip ally", + "Ġfra il", + "p icked", + "Ġthan king", + "Ġre cl", + "ĠL AST", + "Ġsupp ressing", + "il ic", + "Ġantidepress ant", + "ĠLis bon", + "Ġth or", + "Ġsp a", + "Ġking doms", + "ĠPear ce", + "em o", + "Ġpl ung", + "Ġdiv est", + "Ġ ********************************", + "b is", + "osp els", + "ad r", + "Sp irit", + "hall a", + "P ink", + "end ez", + "Ġresurrect ed", + "esc ape", + "ĠRosen stein", + "Ġge ological", + "Ġnecess ities", + "Ġcarn iv", + "ĠE lys", + "ĠBar ney", + "Ġ29 6", + "dig y", + "ST ON", + "D OWN", + "Ġmil estones", + "Ġk er", + "Ġdismant ling", + "Ġre prim", + "Ġcross ings", + "19 45", + "Ġpatri archy", + "Ġblasp hemy", + "Ġ3 59", + "met ry", + "ĠOb esity", + "ĠDiff erences", + "bl ocking", + "ãĥķ ãĤ¡", + "ich ita", + "ĠSab ha", + "ph alt", + "ĠCol o", + "ual a", + "effic ients", + "ĠMed ina", + "con sole", + "55 7", + "ĠHann ibal", + "ĠHab it", + "ĠF ever", + "Ġthen ce", + "Ġsyn agogue", + "Ġessential s", + "Ġw ink", + "ĠTr ader", + "ID A", + "ĠSp oiler", + "ĠIceland ic", + "ĠHay ward", + "Ġpe ac", + "Ġmal ice", + "Ġflash back", + "Ġth w", + "Ġlay offs", + "L iquid", + "Ġtro oper", + "Ġh inge", + "ĠRead ers", + "Ph ill", + "ĠB auer", + "Cre ated", + "Ġaud its", + "ac compan", + "Ġunsus pecting", + "ier a", + "6666 6666", + "Ġbro ch", + "Ġapprehend ed", + "ĠM alk", + "cer ning", + "ĠCod ex", + "O VER", + "M arsh", + "ĠD eng", + "ĠExp ression", + "Ġdisrespect ful", + "Ġasc ending", + "t ests", + "ĠPlaint iff", + "ster y", + "ĠAl ibaba", + "din and", + "ĠDem psey", + "Applic ations", + "mor al", + "Ġthrough put", + "Ġquar rel", + "Ġm ills", + "Ġhe mor", + "ĠC ASE", + "terror ist", + "st im", + "ifest yle", + "ro zen", + "CE PT", + "Ar k", + "u ci", + "lect ic", + "Ġirrit ating", + "she ets", + "A y", + "Ġrede emed", + "Ġhorn y", + "ĠTe ach", + "ĠS ear", + "dem ocracy", + "4 65", + "ĠRest ore", + "Ġstand by", + "ĠP is", + "iff in", + "Ġsleep y", + "Ġextr ater", + "Ġcompl iments", + "Fram eworks", + "Ġinstall s", + "Ġb anging", + "sur face", + "found land", + "Ġmetaph ysical", + "Ġ28 3", + "oul s", + "dev ices", + "Ar gs", + "ĠSac rifice", + "ĠMcC orm", + "es on", + "Cons ervative", + "ĠM ikhail", + "see ing", + "is ively", + "ĠRo oms", + "ĠGener ic", + "Ġenthusi astically", + "Ġgri pped", + "Ġcomed ic", + "ĠElectric ity", + "Ġgu errilla", + "Ġdec oration", + "ĠPerspect ive", + "Ġconsult ations", + "Ġun amb", + "Ġplag iar", + "Ġmagic ian", + "Ġe rection", + "ĠTour ism", + "or ied", + "ro xy", + "11 00", + "T am", + "Ī è", + "Î ³", + "× ª", + "ĠPred ators", + "Nit rome", + "Ġtelesc opes", + "project s", + "Ġun protected", + "Ġst ocked", + "ĠEnt reprene", + "nex pected", + "Ġwast ewater", + "V ill", + "Ġint imately", + "Ġi Cloud", + "ĠConst able", + "Ġspo of", + "Ġne farious", + "Ġfin s", + "Ġcens or", + "ĠMod es", + "ĠEs per", + "ar bon", + "Ġinter sections", + "Ġlaud ed", + "Ġphys i", + "Ġgener ously", + "ĠThe Nitrome", + "ĠTheNitrome Fan", + "Ġar isen", + "ĠÙ Ī", + "Ġg lands", + "ĠPav ilion", + "ĠGu pta", + "Ġuniform ly", + "Ġr amps", + "ri et", + "ĠWH EN", + "ĠVan essa", + "Ġrout ed", + "Ġlim p", + "ĠC PI", + "p ter", + "int uitive", + "Ġv aping", + "Ġexperiment ed", + "ĠOlymp us", + "ĠAm on", + "Ġsight ing", + "Ġinfiltr ate", + "ĠGentle man", + "Ġsign ings", + "ĠMe ow", + "ĠNav igation", + "che cks", + "4 33", + "Ġel apsed", + "ĠBulg arian", + "esp ie", + "ĠS OM", + "d uring", + "Ġsp ills", + "anc a", + "ĠPly mouth", + "M AL", + "Ġdomest ically", + "ĠWater gate", + "ĠF AM", + "k illed", + "ed ited", + "ĠYour self", + "Ġsynchron ization", + "ĠPract ices", + "ST EP", + "Ġgen omes", + "ĠQ R", + "not ice", + "Ġloc ating", + "z in", + "Ġ3 29", + "al cohol", + "Ġk itten", + "V o", + "Ġr inse", + "Ġgrapp le", + "ĠSc rew", + "ĠD ul", + "A IR", + "Ġle asing", + "ĠCaf é", + "Ġro ses", + "ĠRes pect", + "Ġmis lead", + "Ġperfect ed", + "Ġnud ity", + "Ġnon partisan", + "ĠCons umption", + "Report ing", + "Ġnu ances", + "Ġdeduct ible", + "ĠSh ots", + "Ġ3 77", + "Ġæ ľ", + "ano oga", + "Ben ef", + "ĠB am", + "ĠS amp", + "if ix", + "Ġgal van", + "ĠMed als", + "rad ius", + "Ġno bles", + "Ġe aves", + "igr ate", + "K T", + "ĠHar bour", + "u ers", + "Ġrisk ed", + "re q", + "Ġneuro t", + "get table", + "ain a", + "Rom ney", + "Ġunder pin", + "Ġlo ft", + "ĠSub committee", + "ĠMong ol", + "b iz", + "Ġmanif ests", + "ass isted", + "ĠG aga", + "Ġsy nergy", + "Ġreligious ly", + "ĠPre f", + "ĠG erry", + "T AG", + "ĠCho i", + "4 66", + "beh ind", + "ĠO u", + "Gold Magikarp", + "Ġhemor rh", + "R iver", + "Ġtend on", + "Ġinj ure", + "ĠF iona", + "Ġp ag", + "Ġag itation", + "|| ||", + "ur an", + "ĠE SA", + "Ġest eem", + "Ġdod ging", + "Ġ4 12", + "r ss", + "Ġce ases", + "ex cluding", + "Ġint akes", + "Ġinsert s", + "Ġemb old", + "ĠO ral", + "up uncture", + "4 11", + "ĠUn ified", + "ĠDe le", + "Ġfurn ace", + "ĠCoy otes", + "ĠBr ach", + "L abor", + "Ġhand shake", + "Ġbru ises", + "Gr ade", + "éĹ ĺ", + "ĠGram my", + "ile en", + "St ates", + "ĠScandinav ian", + "ĠKard ash", + "8 66", + "Ġeffort lessly", + "ĠDI RECT", + "ĠTH EN", + "ĠMe i", + "ert ation", + "19 68", + "Ġgro in", + "w itch", + "Requ irements", + "98 5", + "Ġroof s", + "Ġest ates", + "ĠH F", + "Ġha ha", + "Ġdense ly", + "ĠO CT", + "Ġpl astics", + "Ġincident ally", + "ĠTr acks", + "ĠTax es", + "Ġch anted", + "Ġforce ful", + "ĠBie ber", + "ĠK ahn", + "K ent", + "ĠC ot", + "lic ts", + "F ed", + "Ġhide ous", + "ĠVer d", + "ĠSynd icate", + "ĠIl legal", + "J et", + "ĠD AV", + "re asonable", + "c rew", + "Ġfundamental ist", + "Ġtruth ful", + "ĠJ ing", + "Ġl il", + "Ġdown ed", + "Ġen chanted", + "ĠPolic ies", + "ĠMcM aster", + "ĠH are", + "ides how", + "Ġpar ams", + "en cers", + "gorith m", + "Ġallow ances", + "Ġturb ulent", + "Ġcomplex ities", + "ĠK T", + "Ġ3 37", + "ĠGen etic", + "F UN", + "D oug", + "t ick", + "Ġg igs", + "ument hal", + "Ġpatriarch al", + "Ġcal c", + ", ...", + "Ġc out", + "ĠGu an", + "Ġpath ological", + "ĠR ivals", + "Ġunder rated", + "Ġflu orescent", + "ĠJ iu", + "arna ev", + "ĠQu an", + "Ġ4 29", + "Ġ à¨", + "M ario", + "Con struct", + "ĠC itation", + "ĠR acial", + "ĠR SA", + "ĠF idel", + "Ġ3 95", + "Person ally", + "C ause", + "à »", + "rad ical", + "in en", + "Ġvehement ly", + "ĠPap a", + "Ġintern ship", + "Ġfl akes", + "ĠRe ck", + "Luck ily", + "B ra", + "20 20", + "rav ings", + "R N", + "W onder", + "Ser iously", + "Ġre usable", + "Ġpoll uted", + "ĠP eng", + "le igh", + "ind le", + "Ġcircuit ry", + "ĠMad onna", + "ĠB ART", + "Res idents", + "att ribute", + "Phil adelphia", + "Cl ub", + "Ġplan ner", + "Ġfr antically", + "Ġfaith fully", + "ĠTerrit ories", + "ĠL AT", + "ĠAnders en", + "an u", + "ĠP ARK", + "ĠS ora", + "i age", + "ĠPlay offs", + "ĠG CC", + "4 27", + "Ġab norm", + "ĠL ever", + "Ġdisob edience", + "As ync", + "ĠShe a", + "V ert", + "Ġsk irts", + "ĠSaw yer", + "x p", + "Ġwors ening", + "Ġsc apego", + "ĠAng le", + "oth al", + "Ġtro ve", + "ĠSt y", + "ĠN guyen", + "mar ine", + "ide on", + "Dep ths", + "Bl og", + "ĠIll uminati", + "Ġtract s", + "Ġorgan ise", + "Ġo str", + "F s", + "Ġlever aging", + "ĠD aredevil", + "as ar", + "Ġl ang", + "Ġex termin", + "urs ions", + "ĠRom o", + "ãĤ¤ ãĥĪ", + "Ġcont ended", + "Ġencounter ing", + "ĠTable t", + "ĠAltern ate", + "sk ill", + "Ġswe ets", + "Ġco hesive", + "cap acity", + "Ġrep ud", + "Ġl izard", + "ro o", + "Ġpilgr ims", + "ĠR uff", + "ĠInstr ument", + "ĠLog o", + "uit ous", + "E H", + "Ġsales man", + "Ġank les", + "L ed", + "ĠPat ty", + "ud os", + "Own er", + "Ġdiscrep ancies", + "k j", + "M U", + "Ġuncond itional", + "Dragon Magazine", + "i ard", + "O ak", + "ĠConvers ation", + "be er", + "ĠOs aka", + "D elta", + "us ky", + "Ġsecret ion", + "Ġpl aza", + "Ġm ing", + "Ġde pletion", + "ĠM ous", + "ĠI TS", + "ĠH imal", + "ĠFle ming", + "Ġcyt ok", + "ĠH ick", + "Ġbat ters", + "ĠInt ellectual", + "6 75", + "é r", + "IS ION", + "ĠQu entin", + "ĠCh apters", + "ih adi", + "Ġco aster", + "WAY S", + "ĠL izard", + "ĠY or", + "and ering", + "S kin", + "ha ust", + "ab by", + "Ġportray ing", + "Ġwield ed", + "d ash", + "Ġprop onent", + "Ġr ipple", + "Ġgrap hene", + "Ġfly er", + "Ġrec urrent", + "Ġdev ils", + "Ġwater fall", + "æĺ ¯", + "go o", + "Text Color", + "Ġtam pering", + "IV ES", + "TR UMP", + "ĠAb el", + "ĠS AL", + "ĠHend ricks", + "ĠLu cius", + "b ots", + "Ġ40 96", + "IST ORY", + "Gu est", + "ĠN X", + "in ant", + "Ben z", + "ĠLoad ed", + "ĠCle ver", + "t reatment", + "Ġta vern", + "Ġ3 39", + "ĠT NT", + "ific antly", + "Tem perature", + "F el", + "Ġunder world", + "ĠJud ges", + "Ġ< +", + "Ġst ump", + "Ġoccup ancy", + "Ġab er", + "ĠF inder", + ") \",", + "ĠN unes", + "res et", + "in et", + "ect omy", + "Ġwell ness", + "ĠP eb", + "quart ered", + "and an", + "Ġneg atives", + "ĠTh iel", + "ĠCl ip", + "ĠL TD", + "Ġbl ight", + "Ġreperto ire", + "K yle", + "Ġqu er", + "ĠC es", + "Ġha pl", + "98 9", + "ĠTh ames", + "isc opal", + "Des k", + "ivari ate", + "ĠEx cellence", + "found ation", + "Ġâ ĩ", + "X i", + "Ġmyster iously", + "esty les", + "Ġper ish", + "ĠEng els", + "ĠDE AD", + "09 0", + "}} }", + "ĠUn real", + "Ġrest less", + "ID ES", + "orth odox", + "ĠInter mediate", + "Ġdin ners", + "ĠTr out", + "ĠSe ym", + "ĠHall s", + "og ged", + "Ġtraged ies", + "Ġdid nt", + "67 6", + "Ġail ments", + "Ġobserv able", + "ĠV ide", + "ad apt", + "ĠD usk", + "Ġprofessional ism", + "ĠPres cott", + "ĠInd ies", + "p ox", + "ĠMe hran", + "W ide", + "Ġend emic", + "ĠPar an", + "B ird", + "Ġped als", + "ĠI U", + "ĠAdam ant", + "ĠH urt", + "Ġcorrel ates", + "urd en", + "Ġspons oring", + "cl imate", + "ĠUnivers ities", + "ĠK not", + "enn es", + "ĠDam ian", + "ĠAx el", + "S port", + "Ġbar b", + "ĠS no", + "sh own", + "ste en", + "ud ence", + "Ġnon violent", + "Ġhom ophobia", + "Ġbiom ass", + "ĠDet ail", + "Ġsrf N", + "ĠT une", + "accompan ied", + "I ENCE", + "Al bert", + "ĠMong o", + "z x", + "ĠCer berus", + "or bit", + "c ens", + "Ġsl ay", + "SH ARE", + "H Y", + "Ġb rawl", + "ĠPro be", + "Ġnonex istent", + "ĠClare nce", + "ĠBlack burn", + "Ġport als", + "ĠR ita", + "ĠRem ain", + "ĠLe vant", + "Ġtrick ed", + "ĠF erry", + "aver ing", + "ĠStraw berry", + "ĠAn swers", + "Ġhorrend ous", + "ĠA man", + "Supp lement", + "ĠT oad", + "Ġpe eled", + "Ġman oeuv", + "ĠU zbek", + "mond s", + "ĠH ector", + "Ġ40 2", + "pe es", + "fix es", + "Ġd j", + "Ġres umes", + "Ġaccount ant", + "Ġadvers ity", + "Ġham pered", + "ĠL arson", + "Ġd oping", + "part s", + "H ur", + "Ġbe arded", + "Ġy r", + "ĠPlug in", + "å¥ ³", + "Ġ/ **", + "rol ley", + "Ġwaters hed", + "ĠSub mission", + "if lower", + "AS C", + "Ġcho ir", + "Ġsculpt ures", + "m A", + "incre asing", + "ai i", + "Ġsne akers", + "Ġconfront s", + "ĠEle phant", + "ĠEl ixir", + "Ġrec al", + "ĠT TL", + "w idget", + "ĠW ax", + "ĠGr ayson", + "Ġha irst", + "Ġhumili ated", + "ĠWAR N", + "app iness", + "ĠT TC", + "F uel", + "Ġpol io", + "Ġcomplex es", + "Ġbab e", + "ĠX IV", + "P F", + "). [", + "P arts", + "Ġ4 35", + "M eg", + "ĠY ards", + "ĠAL P", + "Ġy ells", + "Ġprin ces", + "Ġbull ies", + "ĠCapital ism", + "ex empt", + "FA Q", + "ĠSp onge", + "ĠAl a", + "Ġpleas antly", + "Ġbu f", + "Ġden ote", + "Ġunp ublished", + "Ġkne eling", + "asc a", + "Ġl apse", + "al ien", + "99 4", + "Ġrefere es", + "ĠLaw yers", + "S anta", + "Ġpuzz ling", + "ĠProm etheus", + "ĠPh araoh", + "ĠDel ay", + "Ġfacilit ates", + "ĠC ES", + "Ġjew els", + "Ġbook let", + "ond ing", + "Ġpolar ization", + "ĠMor an", + "ĠSal ad", + "ĠS OS", + "ĠAdv ice", + "PH OTOS", + "IC AN", + "iat ures", + "ex press", + "ĠWonder land", + "ĠC ODE", + "ĠCL ASS", + "9 75", + "Ġg rep", + "ĠD iesel", + "ĠGl ac", + "! ?\"", + "Ġr m", + "o ine", + "disc rimination", + "ĠN urse", + "m allow", + "Ġv ortex", + "ĠCons ortium", + "Ġlarge Download", + "stra ight", + "augh lin", + "G rad", + "Ġpublic ized", + "ĠW aves", + "ĠRed d", + "Ġfest ivities", + "ĠM ane", + "ar ov", + "Ġfleet ing", + "ĠDr unk", + "ug en", + "C ele", + "Ġchromos omes", + "ĠD OT", + "-+-+ -+-+", + "Ġbus iest", + "ĠBe aver", + "Sy rian", + "ĠK yr", + "k as", + "ĠCross Ref", + "19 50", + "76 01", + "Ġrepe aling", + "ĠWin ners", + "ĠMac ro", + "ĠD OD", + "bl ance", + "S ort", + "64 1", + "Ġmet re", + "ĠD irk", + "Ġgo ggles", + "Ġdraw backs", + "Ġcomplain ant", + "Ġauthor izing", + "Ġantit rust", + "oper ated", + "Ġm ah", + "Ġexagger ation", + "Am azing", + "ĠSer aph", + "Ġha ze", + "w ow", + "Ġextingu ished", + "Ġcan yon", + "ĠB osh", + "Ġv ents", + "Ġsc rape", + "Cor rect", + "4 26", + "Ġav g", + "Dem and", + "ĠâĪ ¼", + "Ġmicrobi ota", + "\"} ],\"", + "ĠSt ev", + "B io", + "ĠPlan es", + "Ġsuggest ive", + "Ġdec ipher", + "ĠRefuge e", + "ĠKe jriwal", + "ĠGreen peace", + "Ġdecl ass", + "ĠSound ers", + "Ġth o", + "Ġdec rypt", + "Ġbr ushing", + "ĠJane iro", + "ip op", + "S i", + "8 77", + "ĠGeoff rey", + "Ġc pu", + "ĠHaz el", + "Ġview points", + "Ġcris py", + "ĠNot ification", + "Ġsold er", + "ĠMod est", + "ĠHem isphere", + "Ġcass ette", + "in cludes", + "Ġident ifiers", + "ĠC ALL", + "in cent", + "T odd", + "ĠSwe ep", + "Ġ3 34", + "b oss", + "Ġsm ir", + "gin x", + "Ġtown ship", + "Ġg rieving", + "ĠMos que", + "Net flix", + "AS ED", + "ĠMillenn ials", + "oc om", + "19 67", + "Ġbold ly", + "s leep", + "Ġes che", + "arij uana", + "Ġsw irl", + "ĠPen al", + "Ġneglig ent", + "ĠStephen son", + "K ER", + "ĠZ oro", + "ris is", + "Ġlocal ization", + "ĠSeym our", + "ĠAng lic", + "red itation", + "prot ection", + "ĠPa ige", + "Ġo mit", + "ĠR ousse", + "ĠT ub", + "Ġinv itations", + "t ty", + "Ġm oss", + "ph ysical", + "C redits", + "Ġan archy", + "Ġchild care", + "Ġl ull", + "ĠM ek", + "ĠL anguages", + "lat est", + "ĠSan ford", + "Ġus ability", + "Ġdiff use", + "ĠD ATA", + "Ġsp rites", + "ĠVeget a", + "ĠProm otion", + "ãĥ¼ ãĤ¯", + "rict ing", + "z ee", + "Tur kish", + "ĠTD s", + "pro ven", + "57 1", + "Ġsmug glers", + "707 10", + "Ġreform ed", + "ĠLo is", + "Ġun fl", + "ĠWITH OUT", + "ĠReturn ing", + "ann ie", + "ĠTom as", + "Fr anc", + "ĠProf it", + "ĠSER V", + "ĠR umble", + "ik uman", + "es an", + "Ġt esters", + "Ġgad get", + "Ġbrace let", + "ĠF SA", + "comp onent", + "Ġparamed ics", + "Ġj an", + "ĠRem em", + "ĠSk inner", + "Ġl ov", + "ĠQu ake", + "rom a", + "Ġfl ask", + "Pr inc", + "Ġover power", + "Ġlod ging", + "ĠK KK", + "ret te", + "Ġabsor bs", + "w rote", + "Ġ ,\"", + "K ings", + "ĠH ail", + "ĠFall ing", + "xt ap", + "ĠHel ena", + "ire ns", + "L arry", + "Ġpamph let", + "ĠC PR", + "G ro", + "ĠHirosh ima", + "Ġhol istic", + "\". [", + "Ġdet achment", + "Ġas pire", + "Ġcompl icit", + "ĠGreen wood", + "Ġresp awn", + "ĠSt upid", + "ĠFin ished", + "f al", + "b ass", + "Ġab hor", + "Ġmock ery", + "ĠFe ast", + "VID EO", + "Ġcon sec", + "ĠHung ry", + "P ull", + "ĠH ust", + "it ance", + "? ãĢį", + ") --", + "ĠPar allel", + "con v", + "4 69", + "ha ar", + "w ant", + "P aper", + "m ins", + "ĠTor o", + "ĠTR UMP", + "ĠR ai", + "D W", + "ĠW icked", + "ĠL ep", + "Ġfun ky", + "Ġdetrim ent", + "ios is", + "ache v", + "Ġde grade", + "im ilation", + "Ġret ard", + "Ġfrag mentation", + "Ġcow boy", + "ĠY PG", + "ĠH AL", + "Parent s", + "ĠS ieg", + "ĠStra uss", + "ĠRub ber", + "× IJ", + "Fr ag", + "Ġp t", + "Ġoption ally", + "ĠZ IP", + "ĠTrans cript", + "ĠD well", + "88 2", + "M erc", + "ĠM OT", + "ãĥ¯ ãĥ³", + "Ġhun ts", + "Ġexec utes", + "In cludes", + "Ġacid ic", + "ĠRespons ibility", + "ĠD umb", + "we i", + "And erson", + "ĠJas per", + "ight on", + "abs olutely", + "Ad ult", + "Ġpl under", + "Mor ning", + "ĠT ours", + "ĠD ane", + "Î º", + "ĠT EST", + "ĠG ina", + "Ġcan ine", + "aw an", + "Ġsocial ists", + "ĠS oda", + "Ġimp etus", + "ĠSupplement ary", + "oli ath", + "ĠKinn ikuman", + "mitted ly", + "second s", + "Ġorganis ers", + "Ġdocument aries", + "Vari able", + "GRE EN", + "Ġres orts", + "Ġbr agging", + "Ġ3 68", + "Art ist", + "w k", + "bl ers", + "Un common", + "ĠRet rieved", + "Ġhect ares", + "Ġtox in", + "r ank", + "Ġfaith s", + "ĠG raphic", + "Ġve c", + "ĠL IA", + "Af rican", + "Ġard ent", + "end iary", + "L ake", + "ĠD OS", + "cient ious", + "ĠOk awaru", + "ĠAll y", + "ĠTim eline", + "D ash", + "ĠI c", + "contin ue", + "Ġt idy", + "Ġinstinct ively", + "ĠP ossibly", + "ĠOut door", + "ĠWould n", + "Ġl ich", + "ĠBr ay", + "ĠA X", + "Ġà ī", + "Ġ+ #", + "\\ '", + "Direct ory", + "ab iding", + "Ġf eral", + "ic ative", + "but t", + "Ġper verse", + "S alt", + "Ġwar ped", + "Ġnin eteen", + "Ġcabin ets", + "Ġsrf Attach", + "ĠSl oan", + "Ġpower ing", + "reg ation", + "F light", + "se vere", + "Ġst ren", + "Ġc og", + "ap ache", + "Ġâ Ŀ", + "Ġcaf eteria", + "p aces", + "ĠGrim oire", + "uton ium", + "Ġr aining", + "Ġcir cling", + "Ġlineback ers", + "c redit", + "Ġrep atri", + "ĠCam den", + "lic ense", + "Ġly ric", + "Ġdescript or", + "Ġval leys", + "Ġre q", + "Ġback stage", + "ĠPro hibition", + "ĠK et", + "Op ening", + "S ym", + "æĸ ¹", + "Ġserv ings", + "Ġoverse en", + "Ġaster oids", + "ĠMod s", + "ĠSpr inger", + "ĠCont ainer", + "è »", + "ĠM ens", + "Ġmult im", + "Ġfire fighter", + "pe c", + "Ġchlor ine", + "Ð ¼", + "end i", + "Ġsp aring", + "Ġpolyg amy", + "ĠR N", + "ĠP ell", + "Ġt igers", + "Ġflash y", + "ĠMad ame", + "S word", + "Ġpref rontal", + "Ġpre requisite", + "uc a", + "Ġw ifi", + "Ġmiscon ception", + "Ġharsh ly", + "ĠStream ing", + "ot om", + "ĠGiul iani", + "foot ed", + "Ġtub ing", + "ind ividual", + "z ek", + "n uclear", + "m ol", + "Ġright ful", + "49 3", + "Ġspecial ization", + "Ġpassion ately", + "ĠVel ocity", + "ĠAv ailability", + "T enn", + "Ġl atch", + "ĠSome body", + "Ġhel ium", + "cl aw", + "Ġdi pping", + "XX X", + "Ġinter personal", + "7 10", + "Ġsub ter", + "Ġbi ologists", + "ĠLight ing", + "Ġopt ic", + "Ġden im", + "end on", + "ĠC orm", + "Ġ3 41", + "ĠC oup", + "Ġfear less", + "Ġal ot", + "ĠCliff ord", + "ĠRun time", + "ĠProv ision", + "up dated", + "lene ck", + "Ġneur on", + "Ġgrad ing", + "ĠC t", + "sequ ence", + "in ia", + "con cept", + "Ġro aring", + "ri val", + "ĠCaucas ian", + "Ġmon og", + "key es", + "Ġappell ate", + "Ġlia ison", + "EStream Frame", + "ĠPl um", + "! .", + "Ġsp herical", + "Ġper ished", + "Ġbl ot", + "Ġben ches", + "Ġ4 11", + "Ġpione ered", + "Ġhur led", + "Jenn ifer", + "ĠYose mite", + "Ch air", + "Ġreef s", + "Ġelect or", + "ĠAnt hem", + "65 2", + "Ġun install", + "Ġimp ede", + "Ġbl inking", + "Ġgot o", + "Dec re", + "A ren", + "Ġstabil ization", + "ĠDis abled", + "ĠYanuk ovych", + "Ġoutlaw ed", + "ĠVent ura", + "ten ess", + "Ġplant ation", + "Ġy acht", + "ĠHu awei", + "Ġsol vent", + "Ġgr acious", + "Ġcur iously", + "Ġcapac itor", + "Ġc x", + "ĠRef lex", + "Ph ys", + "ĠC f", + "pt in", + "cons ervative", + "Ġinv ocation", + "c our", + "F N", + "ĠNew ly", + "H our", + "As ian", + "ĠLe ading", + "ĠAer ospace", + "An ne", + "Ġpre natal", + "Ġdeterior ating", + "H CR", + "ĠNorm andy", + "ol ini", + "ĠAm bro", + "9 10", + "Ġset backs", + "ĠT RE", + "Ġs ig", + "ĠSc ourge", + "59 7", + "79 8", + "Game play", + "Ġm sec", + "M X", + "Ġprice y", + "ĠL LP", + "aker u", + "Ġover arching", + "ĠB ale", + "Ġworld ly", + "Cl ark", + "Ġscen ic", + "Ġdisl iked", + "ĠCont rolled", + "T ickets", + "ĠE W", + "ab ies", + "ĠPl enty", + "Non etheless", + "Ġart isan", + "Trans fer", + "ĠF amous", + "Ġinf ield", + "ble y", + "Ġunres olved", + "ĠML A", + "ãĤ Ĥ", + "Cor rection", + "Ġdemocr at", + "ĠMore no", + "ro cal", + "il ings", + "Ġsail or", + "Ġr ife", + "h ung", + "Ġtrop es", + "Ġsn atched", + "ĠL IN", + "ĠB ib", + "ES A", + "ĠPre v", + "ĠCam el", + "run time", + "Ġob noxious", + "4 37", + "Ġsum mers", + "Ġunexpl ained", + "ĠWal ters", + "cal iber", + "Ġg ull", + "ĠEnd urance", + "ä½ ľ", + "Ġ3 47", + "Ir ish", + "Ġaer obic", + "Ġcr amped", + "ĠHon olulu", + "à ©", + "us erc", + "ec ast", + "AC Y", + "ĠQu ery", + "ãĤ¹ ãĥĪ", + "Bet a", + "Ġsuscept ibility", + "ĠSh iv", + "ĠLim baugh", + "Ġà ĸ", + "ĠN XT", + "ĠM uss", + "ĠBrit ons", + "ES CO", + "EG IN", + "Ġ% %", + "Ġsec ession", + "ĠPat ron", + "ĠLu a", + "n aires", + "ĠJPM organ", + "us b", + "ocy te", + "Ġcouncill ors", + "ĠLi ang", + "f arm", + "Ġnerv ously", + "Ġattract iveness", + "ĠK ov", + "j ump", + "Pl ot", + "Ġst ains", + "ĠStat ue", + "ĠApost les", + "he ter", + "ĠSUP PORT", + "Ġoverwhel m", + "Y ES", + "Ġ29 1", + "d ensity", + "Ġtra pping", + "M it", + "Ġf ide", + "ĠPam ela", + "atl antic", + "Dam n", + "Ġp ts", + "OP A", + "Ġserv icing", + "Ġoverfl owing", + "ul o", + "ĠE rit", + "t icket", + "light ing", + "ĠH mm", + "ãĥ¼ ãĥ«", + "im oto", + "Ġchuck le", + "4 23", + "ãģ ķ", + "sh ape", + "Ġque ues", + "Ġanch ors", + "ãĤ¼ ãĤ¦ãĤ¹", + "F er", + "Ġaw oke", + "Ġ6 66", + "h ands", + "Ġdiver gence", + "Ġ50 5", + "T ips", + "Ġdep ot", + "Ġske w", + "ĠDel iver", + "op ot", + "Ġdiv ul", + "ĠE B", + "uns igned", + "ĠUn i", + "X box", + "Ġfor ks", + "Ġ7 02", + "å ¯", + "Ġpromot ers", + "ĠV apor", + "Ġlev ied", + "sl ot", + "Ġpig ment", + "Ġcyl inders", + "C RE", + "Ġsn atch", + "Ġperpet ually", + "Ġl icking", + "ĠFe et", + "ĠKra ken", + "ĠHold en", + "ĠCLS ID", + "m r", + "Ġproject or", + "Ġden otes", + "Ġchap el", + "ĠTor rent", + "b ler", + "R oute", + "ĠDef endant", + "ĠPublisher s", + "ĠM ales", + "ĠInn ov", + "ĠAg ility", + "rit er", + "ty mology", + "st ores", + "L ind", + "Ġf olly", + "ĠZur ich", + "B le", + "Ġnurt ure", + "Ġcoast line", + "uch in", + "D omin", + "Ġfri vol", + "ĠCons olid", + "res ults", + "M J", + "Ġphyl ogen", + "Ġha uled", + "ĠW iley", + "ĠJess ie", + "ĠPrep are", + "ĠE ps", + "Ġtreasure r", + "I AS", + "Ġcolon ists", + "Ġin und", + "ĠWW F", + "ĠCon verted", + "6 000", + "out side", + "ĠApp earance", + "ĠRel ic", + "ĠM ister", + "s aw", + "Ġresult ant", + "Ġadject ive", + "ĠLaure l", + "ĠHind i", + "b da", + "Pe ace", + "Ġreb irth", + "Ġmembr anes", + "Ġforward ing", + "Ġcoll ided", + "ĠCar olyn", + "K ansas", + "5 99", + "ĠSolid GoldMagikarp", + "Be ck", + "Ġstress ing", + "ĠGo o", + "ĠCooper ative", + "Ġf s", + "ĠAr chie", + "L iter", + "ĠK lopp", + "J erry", + "Ġfoot wear", + "War ren", + "Ġsc ree", + "h are", + "Under standing", + "P ed", + "Ġanth ology", + "ĠAnn ounce", + "M ega", + "Ġflu ent", + "Ġbond age", + "ĠDisc ount", + "il ial", + "C art", + "ĠNight mares", + "Sh am", + "ĠB oll", + "uss ie", + "H ttp", + "Atl anta", + "Ġun recogn", + "ĠB id", + "Ġunder grad", + "Ġforg iving", + "ĠGl over", + "AAAA AAAA", + "4 45", + "V G", + "pa io", + "kill ers", + "Ġrespons ibly", + "Ġmobil ize", + "Ġeffect ed", + "ĠL umin", + "Ġk ale", + "Ġinfring ing", + "ann ounced", + "Ġf itt", + "b atch", + "ĠT ackle", + "ĠL ime", + "ĠAP P", + "uke mia", + "Ġrub y", + "Ġex oner", + "ĠCas ual", + "0 70", + "Ġpel vic", + "Ġautom ate", + "ĠK ear", + "ĠCoast al", + "Ġcre ed", + "Ġbored om", + "ĠSt un", + "ri ott", + "Ĥ İ", + "Ġregener ate", + "Ġcomed ians", + "ĠOP ER", + "Sp ons", + "id ium", + "on is", + "L ocated", + "05 7", + "Ġsusp ense", + "ĠD ating", + "C ass", + "Ġneoc ons", + "ĠShin zo", + "Ġaw oken", + "ch rist", + "ĠMess ages", + "att led", + "ĠSpr ay", + "ĠSp ice", + "C W", + "Ġshield ing", + "ĠG aul", + "Am id", + "Ġparam ilitary", + "Ġmult if", + "ĠTan ner", + "il k", + "Ġgodd amn", + "g ements", + "Ġbe friend", + "m obi", + "Ġ3 88", + "fold er", + "acc a", + "Ġins in", + "g ap", + "N ev", + "fif th", + "Ġpsychiat ry", + "b anks", + "TH IS", + "Ġhar b", + "ac qu", + "Ġfac ade", + "ĠPower Point", + "80 3", + "Ġbl uff", + "Sh ares", + "Ġfavor ing", + "El izabeth", + "Ãį Ãį", + "Ġr anger", + "77 2", + "ĠAr che", + "h ak", + "ĠGen etics", + "ĠF EMA", + "Ġev olves", + "Ġest e", + "ĠP ets", + "ĠM é", + "ĠInterest ing", + "ĠCanter bury", + "ch apter", + "ĠStar fleet", + "Sp anish", + "Ġdraw back", + "ĠNor wich", + "9 70", + "n orth", + "ag anda", + "Ġtransform ative", + "ram ids", + "bi ology", + "ad ay", + "Ġpropag ation", + "ĠGam ma", + "ĠDen ise", + "ĠCalcul ator", + "ent imes", + "ĠB ett", + "Ġapp endix", + "ĠHD D", + "AK ING", + "Ġst igmat", + "Ġhol ster", + "Ġord inarily", + "Ch ance", + "ĠCont rary", + "Ġad hesive", + "Ġgather s", + "6 12", + "re au", + "ony ms", + "ew ays", + "Ġindu ces", + "Ġinterchange able", + "se m", + "Wh it", + "Ġtr ance", + "Ġincorpor ation", + "ĠExt ras", + "Fin ancial", + "Ġawkward ly", + "ĠStur geon", + "ĠH Y", + "Norm ally", + "ĠEnd ing", + "ĠAss ist", + "enc rypted", + "Ġsub jug", + "Ġn os", + "Ġfan atic", + "C ub", + "C U", + "?\" .", + "Ġirre versible", + "å Ĥ", + "03 1", + "ĠH AR", + "sp read", + "ul ia", + "= $", + "Sc ope", + "L ots", + "Ġlif estyles", + "ol on", + "Ġf eds", + "Ġcongrat ulate", + "web kit", + "Ġindist inguishable", + "ĠSw ing", + "Ġcommand ments", + "qu ila", + "ab ella", + "m ethyl", + "ann abin", + "Ġo vere", + "Ġlob ster", + "ĠQU EST", + "ĠCONT IN", + "bern atorial", + ":::: ::::", + "ĠTra ve", + "ĠSam oa", + "AN I", + "75 2", + "Ð ´", + "userc ontent", + "ĠMod erate", + "y eah", + "ĠK itt", + "Ġwe e", + "Ġstuff ing", + "ĠInter vention", + "ĠD ign", + "Ġware houses", + "ĠF iji", + "Ġpel lets", + "Ġtake away", + "ĠT ABLE", + "ĠClass ical", + "col lection", + "Ġland fall", + "ĠMus cle", + "Ġsett les", + "ĠAD V", + "Ġ3 44", + "L aura", + "Ġf ared", + "ĠPart ial", + "4 36", + "oss ibility", + "ĠD aly", + "ĠT arant", + "ĠFu ji", + "am l", + "c ence", + "55 1", + "ĠProced ures", + "ĠO CD", + "ĠU D", + "t in", + "Q UI", + "ach o", + "4 38", + "Ġgl itches", + "Ġenchant ment", + "Ġcalcul ates", + "IR O", + "ĠH ua", + "alys es", + "ĠL ift", + "um o", + "Ġle apt", + "Ġhypothes ized", + "ĠGust av", + "it ans", + "VERS ION", + "æ ł", + "Rog er", + "Ġr and", + "ĠAd apter", + "Ġ3 31", + "ĠPet ition", + "k ies", + "M ars", + "Ġunder cut", + "ze es", + "ĠLy ons", + "ĠDH CP", + "Miss ing", + "Ġretire es", + "Ġins idious", + "el i", + "> )", + ". ãĢį", + "Ġfinal ists", + "ĠA ure", + "Ġacc user", + "Ġwas tes", + "ĠY s", + "ĠL ori", + "Ġconstitu encies", + "Ġsupp er", + "Ġmay hem", + "or ange", + "Ġmis placed", + "Ġmanager ial", + "Ġex ce", + "ĠCL I", + "Ġprim al", + "ĠL ent", + "Cry stal", + "h over", + "ĠN TS", + "end um", + "Ġd w", + "ĠAl c", + "n ostic", + "Ġpres erves", + "ĠTs arnaev", + "Ġtri pled", + "rel ative", + "Arc ade", + "k illing", + "ĠW EEK", + "ĠH anna", + "D ust", + "Com pleted", + "ģ «", + "Ġappro ves", + "ĠSur f", + "ĠLuther an", + "ven ants", + "Ġrobber ies", + "we ights", + "soft ware", + "at ana", + "ug al", + "Ġgrav y", + "ĠC ance", + "OLOG Y", + "ly ak", + "Ton ight", + "Ġunve il", + "Ġ19 04", + "ĠMin ion", + "ent ious", + "st ice", + "pack ages", + "ĠG EAR", + "Ġg ol", + "ĠHutch inson", + "ĠProf ession", + "ĠG UN", + "ĠDiff erence", + "ĠTsuk uyomi", + "ĠLes bian", + "6 70", + "Ġfug itive", + "ĠPlan etary", + "-------------------------------- ------------------------", + "Ġacc rued", + "Ġch icks", + "Ġsto pp", + "Ġblock ers", + "C od", + "Ġcomment ers", + "ĠSomew here", + "ĠPhot ographer", + "the me", + "Ġmay oral", + "w u", + "Ġanten nas", + "Ġrev amped", + "ĠSubject s", + "it é", + "im ura", + "Ġentr ances", + "liter ally", + "Ġten ets", + "ĠO MG", + "ĠMP H", + "ĠDon key", + "ĠOff ense", + "Ġ\" +", + "Sn ap", + "ĠAF B", + "Ġan imate", + "ĠS od", + "His panic", + "Ġinconsist ency", + "D b", + "F Y", + "Ex port", + "Ġa pe", + "Ġpear l", + "ib el", + "ĠPAC s", + "Ġ{ \\", + "Ġact u", + "ĠHS BC", + "camp us", + "Ġpay off", + "Ġde ities", + "ĠN ato", + "ou ple", + "Ġcens ored", + "ĠCl ojure", + "Ġconf ounding", + "en i", + "Ġreck on", + "op he", + "Ġspot ting", + "Ġsign ifies", + "Ġprop el", + "Ġfest ive", + "S uggest", + "Ġpled ging", + "ĠB erman", + "Ġrebell ious", + "Ġovershadow ed", + "Ġinfiltr ated", + "j obs", + "67 2", + "Ġscal able", + "Ġdomin ion", + "ĠNew foundland", + "ĠMead ow", + "Ġpart itions", + "AM I", + "Ġsupplement ary", + "str ument", + "Ġhair y", + "Ġperpet uate", + "Ġnuts hell", + "ĠPot ato", + "ĠHob bit", + "Ġcur ses", + "Flo at", + "Ġquiet er", + "Ġfuel ing", + "Ġcaps ules", + "ĠL ust", + "ĠH aunted", + "Exec utive", + "Ġchild birth", + "G re", + "Ġrad iant", + "å İ", + "Ġm alls", + "Ġin ept", + "ĠWarrant y", + "Ġspect ator", + "E h", + "t hens", + "Ġculmin ating", + "æ ©", + "ary a", + "ãĤ ®", + "ilit arian", + "ĠOR IG", + "ĠSp ending", + "pt ives", + "ĠS iren", + "ĠRec ording", + "ay ne", + "Ġv im", + "Ġspr ang", + "T ang", + "ĠM FT", + "mor ning", + "ĠWe ed", + "m peg", + "cess ion", + "ĠCh ung", + "7 30", + "w arning", + "56 2", + "handed ly", + "P oor", + "P olitics", + ": #", + "Ġp ian", + "Ġfec es", + "ĠDocument ation", + "Ġban ished", + "Ġ3 99", + "ĠAR C", + "Ġhe inous", + "J ake", + "ĠAm ir", + "way ne", + "v re", + "os henko", + "Ġnotebook s", + "Ġfound ational", + "Ġmarvel ous", + "ixt ape", + "Ġwithdraw als", + "Ġh orde", + "ĠD habi", + "is able", + "ĠK D", + "Ġcontag ious", + "ĠD ip", + "ĠAr rows", + "Ġpronoun s", + "Ġmorph ine", + "ĠB US", + "68 2", + "Ġk osher", + "fin ished", + "ĠInstr uments", + "Ġf used", + "yd en", + "ĠSal mon", + "F ab", + "aff ected", + "K EN", + "C ENT", + "Dom ain", + "Ġpoke mon", + "ĠDr inking", + "G rowing", + "ĠInvestig ative", + "ĠA ether", + "em i", + "Ġtabl oid", + "Ġrep ro", + "ĠNot withstanding", + "ĠBers erker", + "Ġdram as", + "Ġclich é", + "Ġb ung", + "ĠU RI", + "ĠD os", + "0 44", + "Ġpast ors", + "Ġl s", + "Ġac rylic", + "aun ts", + "Ed ward", + "Ġmajor ities", + "B ang", + "Ġfield ing", + "ĠRepl acement", + "ĠAl chemy", + "pp ard", + "ĠRome o", + "ĠSan ct", + "ĠLav rov", + "ib ble", + "Inst ruct", + "Ġimp ractical", + "ĠPlay boy", + "ce phal", + "Ġsw aps", + "Ġk an", + "ĠThe o", + "Ġillust rating", + "Ġdismant led", + "ĠTrans gender", + "ĠG uth", + "UG H", + "Ġtriumph ant", + "Ġencomp ass", + "Ġbook mark", + "udd in", + "j er", + "Ġpred icate", + "ES H", + "Ġwhen ce", + "ĠAB E", + "Ġnon profits", + "Se qu", + "Ġdi abetic", + "Ġp end", + "Ġheart felt", + "sh i", + "Ġinter acts", + "ĠTele com", + "Ġbombard ment", + "dep ending", + "ĠLow ry", + "ĠAd mission", + "ĠBl ooming", + "ust ration", + "ene gger", + "B rew", + "Ġmol ten", + "ĠNer d", + "P IN", + "âĸ Ģ", + "ave ment", + "Ġtou red", + "Ġco efficients", + "ĠTray von", + "ans son", + "Ġsand y", + "t old", + "fl ows", + "Ġpop ulous", + "ĠT inder", + "ĠBl iss", + "R achel", + "Min imum", + "Ġcontest ant", + "ĠRed uce", + "ĠMor se", + "ĠGrass ley", + "ĠClick er", + "Ġexp r", + "Ġs incerity", + "Ġmar qu", + "Ġelic it", + "ĠPro position", + "ĠDemon ic", + "Ġtac os", + "G reek", + "Ġpost war", + "Ġin sofar", + "ĠP ork", + "Ġ35 2", + "doctor al", + "walk ing", + "Ġmid term", + "ĠSam my", + "sight ed", + "ĠTR ANS", + "ic i", + "AL D", + "ĠUS L", + "ĠF ISA", + "ĠAm pl", + "ĠAlex andra", + "ine lli", + "Tr ain", + "Ġsign ify", + "ĠVers us", + "Ġob fusc", + "Ġk h", + "Ġagg ro", + "ĠRen ault", + "Ġ3 48", + "5 18", + "ox icity", + "0 22", + "ĠTw ist", + "Ġgoof y", + "D ynamic", + "Ġbrief ings", + "m ight", + "8 99", + "Ġderog atory", + "T ro", + "Ġfor ging", + "ĠKor an", + "ĠMar ried", + "ĠBuc s", + "Ġpal ate", + "ĠCon version", + "m able", + "4 13", + "Ġ( _", + "Ġs iph", + "ĠN EO", + "col lege", + "Ġmarg inally", + "Ġfl irt", + "ĠTra ps", + "ĠP ace", + "é »Ĵ", + "Ġgoalt ender", + "Ġforb ids", + "Ġcler ks", + "ĠT ant", + "ĠRobb ins", + "ĠPrint ing", + "Ġpremie red", + "Ġmagn ification", + "ĠT G", + "ĠR ouse", + "ĠM ock", + "odynam ics", + "Ġpre clude", + "ism o", + "ĠPul itzer", + "Ġaval anche", + "ĠK odi", + "rib une", + "ĠL ena", + "Elect ric", + "Ġref inery", + "Ġend owed", + "Ġcounsel ors", + "Ġd olphin", + "ĠM ith", + "Ġarm oured", + "hib ited", + "Beg in", + "ĠP W", + "O il", + "ĠV or", + "ĠShar if", + "ĠFraz ier", + "est ate", + "Ġj ams", + "Pro xy", + "Ġband its", + "ĠPresbyter ian", + "ĠPrem iere", + "t iny", + "ĠCru el", + "Test ing", + "Ġhom er", + "ĠV ERS", + "ĠPro l", + "ĠDep osit", + "ĠCoff in", + "Ġsemin ars", + "Ġs ql", + "ĠDef endants", + "Altern atively", + "ĠR ats", + "ç «", + "ethy st", + "' >", + "Ġiss uer", + "58 9", + "Ġch aired", + "ĠAccess ories", + "man ent", + "Ġmar row", + "ĠPrim ordial", + "C N", + "Ġlimit less", + "ĠCarn age", + "Ġund rafted", + "q v", + "IN ESS", + "on ew", + "Ġco hesion", + "98 7", + "Ġne cks", + "Ġfootball er", + "ĠG ER", + "Ġdetect able", + "ĠSupport ing", + "ĠCS V", + "oc ally", + "k Hz", + "Ġund e", + "Ġsh one", + "Ġbud ding", + "tra k", + "Stand ing", + "ĠStar craft", + "ĠKem p", + "Ben ch", + "Ġthw arted", + "ĠGround s", + "ath i", + "L isa", + "Dial og", + "ĠS X", + "V ision", + "Ġingen ious", + "Ù IJ", + "Ġfost ering", + "ĠZ a", + "ĠIn gram", + "Ġ\" @", + "N aturally", + "6 16", + "0 35", + "ĠF AC", + "H mm", + "55 4", + "Ġacceler ator", + "ĠV end", + "Ġsun screen", + "Ġtuber culosis", + "rav iolet", + "ĠFunction al", + "ĠEr rors", + "ed ar", + "19 66", + "ĠSpect re", + "ĠRec ipes", + "88 5", + "ĠM ankind", + "L iverpool", + "Ġ| --", + "Ġsubst itutes", + "ĠX T", + "w ired", + "Ġinc o", + "ĠAf gh", + "E va", + "ic c", + "S ong", + "K night", + "Ġdilig ently", + "ĠBroad cast", + "A id", + "Ġaf ar", + "ĠH MS", + "aton in", + "ĠGr ateful", + "Ġfire place", + "ĠOm ni", + "e uro", + "ĠF RE", + "ĠSh ib", + "ĠDig est", + "t oggle", + "Ġheads ets", + "Ġdiff usion", + "ĠSqu irrel", + "ĠF N", + "Ġdark ened", + "out her", + "Ġsleep s", + "ĠX er", + "gun s", + "Ġset ups", + "Ġpars ed", + "Ġmamm oth", + "ĠCur ious", + "g ob", + "ĠFitz patrick", + "ĠEm il", + "im ov", + "........ .....", + "ĠB enny", + "Second ly", + "Ġheart y", + "Ġcons on", + "st ained", + "Ġgal actic", + "cl ave", + "Ġplummet ed", + "Ġp ests", + "Ġsw at", + "Ġrefer rals", + "ĠLion el", + "h oly", + "Ġunder dog", + "ĠSl ater", + "ĠProv ide", + "ĠAm ar", + "ress or", + "å Į", + "ong a", + "Ġtim id", + "Ġp iety", + "ĠD ek", + "Ġsur ging", + "az o", + "Ġ6 10", + "Ġdes ks", + "ĠSp okane", + "ĠAn field", + "Ġwars hips", + "ĠCob ra", + "Ġar ming", + "clus ively", + "ĠBad ge", + "ag ascar", + "ĠPR ESS", + "ĠMcK enzie", + "ĠFer dinand", + "burn ing", + "Af ee", + "Ġtyr ann", + "ĠI w", + "ĠBo one", + "100 7", + "ĠRe pt", + "Ċ Âł", + "Ġcar avan", + "ĠD ill", + "ĠBundes liga", + "Ch uck", + "Ġheal er", + "ãĥ¼ãĥ Ĩ", + "ĠH obby", + "Ġneg ate", + "Ġcrit iques", + "section al", + "mop olitan", + "Ġd x", + "Ġouts ourcing", + "ĠC ipher", + "t ap", + "Sh arp", + "Ġup beat", + "Ġhang ar", + "Ġcru ising", + "ĠNi agara", + "Ġ3 42", + "ill us", + "ĠS v", + "Ġsubt itles", + "Ġsqu ared", + "Ġbook store", + "Ġrevolution aries", + "ĠCarl ton", + "ab al", + "Ut ah", + "Ġdesp ise", + "ĠU M", + "cons ider", + "aid o", + "Ġc arts", + "ĠT urtles", + "Tr aining", + "Ġhonor ary", + " ¢", + "Ġtri angles", + "4 22", + "Ġreprint ed", + "Ġgrace ful", + "ĠMong olia", + "Ġdisrupt ions", + "ĠB oh", + "Ġ3 49", + "Ġdr ains", + "Ġcons ulate", + "Ġb ends", + "Ġm afia", + "ur on", + "ĠF ulton", + "m isc", + "Ġren al", + "Ġin action", + "ck ing", + "Ġphot ons", + "Ġbru ised", + "ĠC odes", + "og i", + "Ġn ests", + "ĠLove ly", + "ĠLib re", + "ĠD aryl", + "Ġ# ##", + "S ys", + ". ,\"", + "Ġfree zes", + "est ablishment", + "and owski", + "Ġcum bers", + "ĠSt arg", + "ĠBom bs", + "Ġleg ions", + "Ġhand writing", + "Ġgr un", + "ĠC ah", + "sequ ent", + "Ġm oth", + "ĠMS M", + "Ins ert", + "F if", + "Ġmot el", + "Ġdex ter", + "ĠB ild", + "hearted ly", + "Ġpro pe", + "ĠText ure", + "ĠJ unction", + "ynt hesis", + "oc ard", + "ĠVer a", + "ĠBar th", + "Ġμ g", + "Ġl ashed", + "Ġ35 1", + "ĠZ amb", + "ĠSt aples", + "ĠCort ex", + "ĠCork er", + "Ġcontinu um", + "ĠWR ITE", + "unt a", + "rid or", + "Ġde ems", + "0 33", + "ĠG OLD", + "p as", + "Ġrep ressive", + "ãĥĨ ãĤ£", + "Ġbaff led", + "Sc ar", + "Ġc rave", + "Ġ ______", + "Ġentrepreneurs hip", + "ĠDirector ate", + "Ġ' [", + "Ġv ines", + "Ġasc ended", + "ĠGR OUP", + "ĠGood bye", + "Ġdo gged", + "ãĥ´ ãĤ¡", + "Man ufact", + "Ġunimagin able", + "ri ots", + "ier rez", + "Ġrel ativity", + "ĠCraft ing", + "ra ught", + "ud en", + "c ookie", + "Ġassass ins", + "Ġdissatisf ied", + "ac ci", + "Ġcondu it", + "Sp read", + "ĠR ican", + "n ice", + "izz le", + "Ġsc ares", + "ĠWH Y", + "ph ans", + "5 35", + "Ġprot racted", + "ĠKrist en", + "5 36", + "ĠSc rib", + "ĠNe h", + "Ġtwent ies", + "Ġpredic ament", + "Ġhandc uffs", + "Ġfruit ful", + "ĠU L", + "ĠLud wig", + "Ġatt est", + "ĠBre aker", + "Ġbi ologically", + "ĠDeal er", + "Ġrenov ations", + "f w", + "ess en", + "Al ice", + "ĠHen ri", + "Ġun ilaterally", + "ĠS idd", + "h ai", + "ĠSt retch", + "S ales", + "Ġcumbers ome", + "ĠJ avier", + "Ġtrend y", + "Ġrot ting", + "ĠChall enges", + "Ġscra ps", + "Ġfac ets", + "ĠVer onica", + "ĠVer ge", + "ĠS ana", + "Al ien", + "ĠR ih", + "Ġrad ial", + "ect ar", + "Ġ6 30", + "cl i", + "Mar ie", + "Ġwild fire", + "ĠCat o", + "h ander", + "Ġwait ress", + "Ġch ops", + "ĠS ECTION", + "Ġblunt ly", + "ĠCat alog", + "n ian", + "stud y", + "Ġpat rolling", + "ĠT enth", + "nex us", + "ĠN ON", + "op sy", + "Ġsc athing", + "s ie", + "Ġdeterior ated", + "V B", + "Naz is", + "Ġdep ictions", + "Ġauthent icated", + "ĠCon ce", + "k rit", + "Ġpromul g", + "ĠL ONG", + "U FC", + "ĠVis itors", + "ĠRec all", + "Ġrehab ilit", + "ĠSL I", + "Ġglac ier", + "ĠB ite", + "Ġ50 3", + "Ġvom it", + "Ġfer mented", + "ĠKh alid", + "Ġgrad ed", + "ĠMag icka", + "ĠIch igo", + "power ful", + "ic ators", + "75 3", + "Ġsh rew", + "Ġ35 6", + "Ġlegal izing", + "Ġall otted", + "ĠArch demon", + "ith ing", + "igg urat", + "V OL", + "Le od", + "Ġo ily", + "Ġindu cing", + "Ġamy gdala", + "Ġadm ins", + "ĠAcqu isition", + "C AN", + "Ġsche matic", + "Ġmo an", + "ĠCamer oon", + "Ġt ink", + "Ġmer ry", + "Ġbutter flies", + "ĠGo ff", + "Ġworks pace", + "ĠCor ona", + "Ġj avascript", + "ĠD olphin", + "ĠCant or", + "4 64", + "to e", + "AP S", + "ĠAg ing", + "Ġpadd ed", + "ĠZ heng", + "ĠHe ld", + "Ġest ranged", + "Ġ7 70", + ". }", + "ĠDun ham", + "Ġsm okes", + "Ġcap itals", + "und ai", + "Sh in", + "ĠFound ing", + "Ġent itle", + "Ġcenter piece", + "D iscover", + "Ġthere to", + "al ert", + "ĠN ou", + "ĠAnaly st", + "l c", + "F H", + "FI ELD", + "ĠP OV", + "gr ay", + "Ġar cs", + "ĠH OT", + "Ġr s", + "Ġoblig atory", + "ĠArchitect s", + "ĠS ven", + "ĠF EC", + "0 200", + "Christ mas", + "ĠAlban ia", + "rat om", + "58 7", + "Ġhard ships", + "Ġaut os", + "ĠCharg es", + "Ġap es", + "Ġ3 76", + "wal let", + "Ġintox ication", + "Ġgobl in", + "Ġ5 70", + "++++++++ ++++++++", + "ĠYel p", + "ĠMag netic", + "ĠBr iggs", + "R ail", + "Ġspawn s", + "ĠW iggins", + "Ġshowc ased", + "Ġres orted", + "ub en", + "Ġwh ipping", + "Ġim itate", + "Ġdigest ion", + "ĠUS PS", + "ĠG est", + "Ġye a", + "ĠT ight", + "ind al", + "ic as", + "` .", + "C AST", + "'' ;", + "ĠF et", + "opath ic", + "In valid", + "Ġregrett ed", + "Ġbro ccoli", + "ĠSc ores", + "e ve", + "Ġpost ings", + "Ġaccum ulating", + "Ġneed less", + "elf th", + "Ġmay ors", + "Ġsc rib", + "Ġanecd otes", + "Ġbot ched", + "ĠRib bon", + "ĠConstant ine", + "i uses", + "ess es", + "Ġdev ise", + "Comp ared", + "Ġp udding", + "Ġg arg", + "Ġev oke", + "79 7", + "Ġdet ox", + "9 09", + "ĠPie ces", + "ĠMcC artney", + "Ġmet ast", + "ĠK rypt", + "P OR", + "Ġt ending", + "ĠMerch ants", + "Pro of", + "ĠV arg", + "ĠPort able", + "ãĥ¼ãĥĨ ãĤ£", + "B rain", + "25 00", + "Ġfol iage", + "Ø ¹", + "Ġment ors", + "ĠA ires", + "Ġminimal ist", + "Ġing ested", + "ĠTro jan", + "ĠQ ian", + "inv olved", + "0 27", + "Ġer oded", + "RA FT", + "Ġbl urry", + "M ob", + "Ġbuff et", + "ĠFn atic", + "ae a", + "KN OWN", + "ĠIn it", + "s afety", + "en um", + "ACT ION", + "ĠCrus her", + "ĠD ates", + "Ġ ................", + "c alling", + "ak ov", + "Ġvent ured", + "Ġ5 55", + "au ga", + "H art", + "ĠA ero", + "M AC", + "Ġthin ly", + "Ġar ra", + "ST ATE", + "ild e", + "ĠJac qu", + "ĠFem ales", + "Ġthe orem", + "Ġ3 46", + "Ġsmart est", + "ĠPU BLIC", + "ĠK ron", + "ĠB its", + "ĠV essel", + "ĠTele phone", + "Ġdec ap", + "Ġadj unct", + "ĠS EN", + "mer ga", + "Ġred acted", + "Ġpre historic", + "Ġexplan atory", + "ĠRun s", + "ĠUtt ar", + "ĠM anny", + "ĠAUTH OR", + "ĠUnle ashed", + "ĠBow ling", + "be ans", + "79 3", + "Ġunivers es", + "Ġsens it", + "ĠK ung", + "re peat", + "ctr l", + "Ġp aced", + "Ġfull er", + "Cl ock", + "Ġrec omb", + "ĠF aul", + "ĠB unker", + "Ġpool ed", + "Ġan a", + "ĠM outh", + "LL OW", + "hum ane", + "Ġbull do", + "ĠMicha els", + "f am", + "Ġwreck ed", + "Ġport rays", + "ĠWh ale", + "ĠH es", + "Ġguess es", + "ĠBrow se", + "ĠL APD", + "Ġconsequ ential", + "ĠInn ocent", + "ĠD RAG", + "Ġtrans gress", + "ĠO aks", + "Ġtri via", + "ĠRes on", + "ĠA DS", + "-- +", + "ĠT oll", + "Ġgrasp ing", + "ĠTHE M", + "ĠT ags", + "ĠCon clusion", + "Ġpract icable", + "Ġho op", + "Ġunintention ally", + "Ġign ite", + "ĠM ov", + "ur ized", + "le hem", + "Ter min", + "Ġcolour ful", + "ĠLin ear", + "ĠEll ie", + "G y", + "Ġman power", + "Ġj s", + "Ġem oji", + "ĠSHAR ES", + "_ .", + "0000 7", + "Ġsophistic ation", + "Ġunders core", + "Ġpract ise", + "Ġbl ob", + "op ens", + "Uk raine", + "Ke eping", + "Y C", + "J R", + "ult imate", + "Cl aim", + "Ġautom obiles", + "99 3", + "ste el", + "Ġpart ing", + "ĠL ank", + "... ?", + "Ġ38 5", + "Ġremem brance", + "Ġe ased", + "Ġcov ari", + "ĠS ind", + "Effect ive", + "Ġdisse mination", + "ĠMo ose", + "ĠCl apper", + "br ates", + "App ly", + "Ġinv is", + "Ġwors ened", + "âĢĶ -", + "Ġlegisl ator", + "ĠL ol", + "ĠRow e", + "Ġdealers hip", + "um ar", + "id ences", + "Ġinvestig ates", + "Ġc ascade", + "Ġbid der", + "ĠB EN", + "Iron ically", + "Ġpres iding", + "Ġd ing", + "Ġcontrad icted", + "Ġshut s", + "ĠF IX", + "Ġ3 66", + "Dist rict", + "Ġsin ful", + "ĠChar isma", + "o ops", + "Ġtot ality", + "Ġrest itution", + "ĠOpt imus", + "ĠD ah", + "Ġcl ueless", + "urn ed", + "Ġnut rit", + "Ġland owners", + "Ġfl ushed", + "Ġbroad en", + "m ie", + "Ġprint ln", + "Ġn ig", + "ĠCorp us", + "J en", + "Ġprot o", + "ĠWik imedia", + "ĠPal o", + "C OR", + "Ġstory lines", + "Ġevangel icals", + "ĠDar rell", + "Ġrot or", + "ĠH W", + "sk illed", + "ery l", + "Ġbe gg", + "ĠBl umenthal", + "Ġwe aving", + "Ġdown wards", + "ĠJack et", + "ĠANG EL", + "Te chnology", + "Ġes oteric", + "alde hyde", + "Ġfur iously", + "Ġforeign er", + "We ak", + "CH O", + "ĠH ound", + "Exper ience", + "ĠPlay station", + "ĠM IA", + "ĠU ng", + "cl oth", + "ag all", + "Ġcal ming", + "iz ens", + "St ruct", + "ĠW itches", + "ĠCeleb ration", + "Ġ........ ......", + "pt roller", + "ĠTC U", + "Ġb unny", + "ãĥ į", + "ut orial", + "Ġup scale", + "ĠSt a", + "ĠCol ossus", + "Ġchlor ide", + "ĠZ ac", + "ĠRe asons", + "ĠBrook ings", + "ĠWH ITE", + "][ /", + "ĠL ose", + "9 05", + "Ġunders ide", + "ern els", + "Ġv ape", + "do zen", + "upp et", + "ĠST OP", + "mat ical", + "ĠStat ements", + "hed dar", + "P AC", + "Custom er", + "Ġmem os", + "ĠP J", + "end ars", + "ĠLim its", + "l augh", + "Ġstabil ized", + "ĠALE C", + "Y A", + "Up grade", + "al am", + "Ġtechn o", + "Ġan ew", + "fore seen", + "Ġcolleg iate", + "ĠPy ro", + "ĠD ism", + "Ġfront line", + "Ġammon ia", + "I U", + "Qu ite", + "John ny", + "ass in", + "G OP", + "ĠSt yles", + "ĠSovere ign", + "acter ial", + "5 49", + "ĠR IP", + "ĠL ists", + "Ġ3 64", + "ĠRece p", + "s ocket", + "ĠByr d", + "ĠCand le", + "An cient", + "Ġappell ant", + "en forcement", + "ace a", + "ans ki", + "Ġold s", + "88 6", + "Ġsl urs", + "Ġem pires", + "Ġbuck le", + "Ġalien ation", + "ĠAber deen", + "Ġunic orn", + "Ġoverr iding", + "ĠL X", + "pp a", + "Ġdesp ised", + "ĠB ugs", + "ĠB ST", + "S outhern", + "5 33", + "Ġhall mark", + "ĠPost er", + "Ġstem med", + "Ġprincip als", + "ĠT ECH", + "ĠSand wich", + "It aly", + "Ġche esy", + "ĠSet TextColor", + "ĠProt ective", + "ĠC ohn", + "J O", + "apt op", + "Re ason", + "Lead er", + "ĠUnder stand", + "ĠFr idays", + "ĠContin uous", + "Ġcl ipping", + "ĠR ye", + "Ġber th", + "tim er", + "ann is", + "re act", + "Ġbuff alo", + "ĠPar as", + "Ġ6 55", + "Ġpres ided", + "ĠSun rise", + "Ġve ts", + "Ġcl oves", + "ĠMcC ull", + "Stre ngth", + "G AN", + "Ġill iter", + "ĠPric ing", + "l é", + "Ġresist or", + "Ġbr un", + "ĠSuff olk", + "Ñ ĭ", + "ĠL iver", + "Re leased", + "Ġwhat s", + "8 60", + "ĠMe asures", + "Ġden ouncing", + "ĠRy zen", + "Ġsou ven", + "Ġcareg ivers", + "ch ini", + "ĠScar lett", + "Ġt rough", + "Cong ratulations", + "Ġtax is", + "ĠTrad ition", + "j it", + "Ġtable top", + "Ġhither to", + "Ġdis information", + "off ensive", + "h ra", + "ĠDISTR ICT", + "Ġcompl icate", + "chen ko", + "ĠRecon struction", + "Ġpalp able", + "Ġa usp", + "Ġ4 28", + "Ġshowc ases", + "ĠPublic ation", + "know ledge", + "inn on", + "4 19", + "Ġretri eval", + "and ers", + "Ġref ute", + "Ġinqu ired", + "g ur", + "Ġneg ativity", + "Ġcons erve", + "Ġafter life", + "Ġpres upp", + "ĠGill espie", + "Ġm t", + "ĠD N", + "T ap", + "Ġper pend", + "ĠS my", + "does n", + "Ġsp illing", + "Ġhyp ers", + "K ate", + "® ,", + "ke pt", + "ĠP owered", + "Ġj a", + "ĠK lux", + "ard e", + "ab an", + "Ġ4 44", + "Ġflatt ened", + "ĠImprove ments", + "urg a", + "ĠK und", + "Ġins cribed", + "Ġfac ult", + "Ġunpre pared", + "ĠCons umers", + "Ġsatisf ies", + "Ġpul monary", + "Ġinf iltration", + "Ġex ternally", + "Ġcongrat ulations", + "ag han", + "Ġair liner", + "Ġfl ung", + "Ġfly ers", + "G D", + "Ġsnipp ets", + "Ġrec ursive", + "Ġmaster ing", + "L ex", + "Ġovert ly", + "v g", + "Ġluck ily", + "Ġenc ro", + "ĠLanc et", + "ĠAbyss al", + "function al", + "Ġs ow", + "Ġsqu id", + "Ġnar ration", + "Ġn aughty", + "ĠHon our", + "ĠSpart ans", + "Ġsh atter", + "ĠTac oma", + "ĠCal ories", + "ĠR aces", + "Sub mit", + "Ġpurpose fully", + "w av", + "ĠY ok", + "F est", + "ĠG err", + "Met ro", + "Ġit iner", + "f amous", + "Ġ\" {", + "in line", + "was her", + "Iss ue", + "ĠCL IENT", + "oz o", + "Vers ions", + "7 25", + "ĠGl ock", + "Ġshield ed", + "ĠPC R", + "ENC Y", + "ĠWe ld", + "ĠSim pl", + "Ġredirect ed", + "ĠK ham", + "Ġ( >", + "Ġlab ou", + "Ġdi apers", + "ss l", + "Ġcell ar", + "organ isms", + "ore sc", + "ĠBer ks", + "did n", + "Sh ipping", + "C hest", + "Ġund one", + "Ġmillion aire", + "Ġc ords", + "ĠYoung er", + "appropri ately", + "Ġsequ els", + "u ve", + "ant icipated", + "Ġle wd", + "ĠSh irt", + "ĠDmit ry", + "V eter", + "Ġsl aying", + "ĠY ar", + "Ġcompl ication", + "I owa", + "ĠEric a", + "ĠBL M", + "g irlfriend", + "b odied", + "6 26", + "19 63", + "Ġintermedi ary", + "Ġcons olation", + "M ask", + "ĠSi em", + "ow an", + "Beg inning", + "Ġfix me", + "Ġculmin ated", + "Ġcon duc", + "ĠVolunte er", + "Ġpos itional", + "Ġgre ets", + "ĠDefin itions", + "Ġthink er", + "Ġingen uity", + "Ġfresh men", + "ĠMom ents", + "Ġ35 7", + "ate urs", + "ĠFed Ex", + "s g", + "69 4", + "Ġdwind ling", + "ĠBO X", + "sel age", + "Ġt mp", + "Ġst en", + "ĠS ut", + "Ġneighbourhood s", + "Ġclass mate", + "f ledged", + "Ġleft ists", + "Ġclim ates", + "ATH ER", + "ĠScy the", + "ul iffe", + "Ġs ag", + "Ġho pped", + "ĠF t", + "ĠE ck", + "ĠC K", + "ĠDo omsday", + "k ids", + "Ġgas ped", + "Ġmon iker", + "ĠL od", + "ĠC FL", + "t ions", + "r ums", + "fol ios", + "Ġm d", + "Ġunc anny", + "Ġtrans ports", + "ĠLab rador", + "Ġrail ways", + "Ġappl iance", + "ĠCTR L", + "æ Ģ", + "Pop ulation", + "ĠConfeder acy", + "Ġunb earable", + "Ġdors al", + "ĠIn form", + "op ted", + "ĠK ILL", + "Mar x", + "Ġhypoc ritical", + "q us", + "ĠN umerous", + "ĠGeorg ian", + "ĠAmbro se", + "ĠL och", + "Ġgu bernatorial", + "ĠX eon", + "ĠSupp orts", + "ens er", + "ee ly", + "ĠAven ger", + "19 65", + "Ar my", + "Ġju xtap", + "Ġcho pping", + "ĠSpl ash", + "ĠS ustainable", + "ĠFin ch", + "Ġ18 61", + "ict ive", + "at meal", + "ĠG ohan", + "Ġlights aber", + "ĠG PA", + "ug u", + "ĠRE PL", + "vari able", + "Ġher pes", + "Ġdesert s", + "ac iously", + "Ġsitu ational", + "week ly", + "ob l", + "Ġtext ile", + "ĠCorn wall", + "Ġcontrace ptives", + "ĠA ke", + "] -", + "ä¹ ĭ", + ": ,", + "ĠW em", + "ĠB ihar", + "Ġ' .", + "Ġbe re", + "Ġanal ogue", + "ĠCook ies", + "Ġtake off", + "Whe el", + "Ġmaj estic", + "Ġcomm uting", + "0 23", + "ĠCor pse", + "ass ment", + "min i", + "Ġgor illa", + "ĠAl as", + "ere e", + "Ġacquaint ances", + "ĠAd vantage", + "Ġspirit ually", + "Ġey ed", + "pm wiki", + "ĠE nder", + "Ġtrans lucent", + "Ġnight time", + "ĠIM AGES", + "5 45", + "ĠK amp", + "ĠFre ak", + "Ġ ig", + "Port land", + "4 32", + "ĠM ata", + "Ġmar ines", + "Ġh ors", + "ater asu", + "ĠAtt ribution", + "Ġ-------- -", + "Ġk ins", + "ĠBEL OW", + "++ +", + "Ġre eling", + "ol ed", + "Ġcl utter", + "ĠRel ative", + "Ġ4 27", + "B US", + "Ġa vert", + "ĠChe ong", + "ĠA ble", + "ĠPry or", + "Develop er", + "Ġen cyclopedia", + "ĠUSA F", + "ĠG arry", + "Sp ain", + "Bl ocks", + "Ġexp osition", + "ĠGamer Gate", + "W OR", + "Ġstockp ile", + "Ġclot hed", + "ĠT one", + "ĠR ue", + "t umblr", + "Ġtreacher ous", + "Ġf rying", + "Ñ Į", + "ĠS ph", + "Ġrest raints", + "Ġemb odies", + "ĠG es", + "S afety", + "Ġnegoti ators", + "min ing", + "ĠAppalach ian", + "L OS", + "ĠJenn a", + "Ġpass ers", + "ç ĭ", + "sn ap", + "Ġshort en", + "creat or", + "Ġinn umerable", + "uther land", + "67 4", + "ĠW OM", + "ĠAs cend", + "ĠArm ory", + "ĠTrans action", + "K ick", + "Ġsuit case", + "day Name", + "Ġwaste ful", + "mar riage", + "ĠMcC abe", + "ite ch", + "ĠO ss", + "Cl osure", + "ĠTreasure r", + "Ġindec ent", + "ĠD ull", + "Ġresid ences", + "19 59", + "ĠS ettlement", + "Ham ilton", + "Ġself ies", + "ĠRank ing", + "ĠBark ley", + "ĠB ore", + "ĠW CS", + "ĠMar itime", + "ĠH uh", + "ĠForest ry", + "Ġcultiv ating", + "ĠBall ard", + "Ġg arrison", + "ĠSD L", + "9 30", + "Ġnas cent", + "Ġirresist ible", + "Ġaw fully", + "\\/ \\/", + "Ġequ ate", + "Ġanthrop ology", + "ĠSylv ia", + "Ġintest ine", + "Ġinnoc uous", + "cess ive", + "ag ra", + "ĠMet roid", + "G rant", + "8 55", + "ģ ĸ", + "Ġ\" _", + "ãĥĥ ãĥī", + "Ġappra isal", + "ĠFred dy", + "04 6", + "Ġ40 6", + "Ġ18 30", + "Ġd ocking", + "St atic", + "Ġp ont", + "ĠVolt age", + "ĠSt ead", + "ĠMort gage", + "ĠJon ah", + "Y L", + "CLASS IFIED", + "Ġas bestos", + "nik ov", + "Ġcoll agen", + "ĠOrb ital", + "P ocket", + "7 99", + "Ġhy brids", + "inc hes", + "Ġinv oice", + "und y", + "Ġinequ alities", + "T rend", + "w ashed", + "B ALL", + "Ġluc id", + "ĠComment ary", + "Ġw itty", + "Br andon", + "Ġbru ising", + "Ġ6 20", + "es cent", + "box ing", + "P OL", + "Ġ3 78", + "R ect", + "Ġlic ences", + "ĠMcG ee", + "p ressed", + "D anny", + "Ġj ammed", + "ord inate", + "Ġle th", + "Ġdistingu ishes", + "ĠYam aha", + "IL S", + "ĠH ume", + "ĠC ategories", + "Rober ts", + "Ch art", + "Ġbeet le", + "ĠGra veyard", + "Ġ($ )", + "o ÄŁ", + "Ġtw ilight", + "are lla", + "á ½", + "Ġbooth s", + "ĠH HS", + "ĠFeld man", + "Ġexcav ation", + "Ġphilosoph ies", + "at ography", + "ĠGar age", + "te chnology", + "Ġunfor gettable", + "Ġver ifying", + "Ġsubord inates", + "E ls", + "Ġne b", + "G aming", + "EN A", + "ĠAchieve ment", + "it ters", + "ĠG abe", + "Ġd umps", + "for cer", + "Ġpo ignant", + "ĠM BA", + "ĠHe idi", + "ime i", + "Ġm ages", + "Ġliber ate", + "Ġcircum cised", + "ĠMer maid", + "ĠMat th", + "t ogether", + "ĠW ichita", + "Ġstore front", + "ĠAd in", + "V II", + "Four th", + "Ġexplore rs", + "W ER", + "Not able", + "Bro ok", + "m ens", + "F aith", + "-------- -", + "ĠJ ou", + "¬ ¼", + "Ġpine apple", + "Ġam alg", + "el n", + "ark able", + "ĠãĤµ ãĥ¼ãĥĨãĤ£", + "ĠãĤµãĥ¼ãĥĨãĤ£ ãĥ¯ãĥ³", + "Ġov arian", + "ĠE choes", + "Ġhairc ut", + "Ġp av", + "Ġch illed", + "anas ia", + "Ġsty led", + "Ġd ab", + "ni per", + "Ġminister ial", + "ĠD UP", + "T an", + "Ġsul ph", + "ĠD eter", + "ĠBo hem", + "od an", + "Ġeduc ator", + "â ĵĺ", + "sp ir", + "Ch icken", + "ĠE leanor", + "Ġqu i", + "Ġheav iest", + "Ġgrasp ed", + "U RA", + "Ġcro oked", + "Jess ica", + "pro blem", + "Ġpred etermined", + "Ġman iac", + "Ġbreath s", + "ĠLauder dale", + "Ġh obbies", + "y z", + "Cr ime", + "Ġcharism a", + "d L", + "Ġle aping", + "Ġk ittens", + "Ang elo", + "ĠJ ACK", + "ĠSu zanne", + "Ġhal ting", + "ENT ION", + "Ġswall owing", + "ĠEarthqu ake", + "Ġeight eenth", + "ĠN IC", + "ĠIN F", + "ĠCons cious", + "Ġparticular s", + "circ le", + "7 40", + "Ġbene volent", + "Ġ7 47", + "Ġ4 90", + "Ġr undown", + "ĠVal erie", + "ĠB UR", + "Ġcivil isation", + "ĠS chn", + "W B", + "ot ide", + "intern ational", + "Ġj ohn", + "Ġ19 02", + "Ġpe anuts", + "Ġflav ored", + "k us", + "Ġro ared", + "Ġcut off", + "é £", + "Ġorn ament", + "Ġarchitect ures", + "Ġ3 69", + "ol or", + "ĠWild e", + "ĠC RC", + "ĠAdjust ed", + "Ġprov oking", + "land ish", + "Ġrational ity", + "Ġjust ifies", + "Ġdisp el", + "Ġa meric", + "ĠPol es", + "Ø ©", + "Ġen vis", + "ĠD oodle", + "ä½ ¿", + "igs aw", + "auld ron", + "Techn ical", + "T een", + "up hem", + "ĠX iang", + "Ġdetract ors", + "ĠZ i", + "ĠJournal ists", + "Ġconduc ive", + "ĠVolunte ers", + "Ġs d", + "Know ing", + "Ġtrans missions", + "ĠPL AN", + "ĠL IB", + "Ġall uded", + "Ġob e", + "Ġd ope", + "ĠGold stein", + "Ġwavelength s", + "ĠDest ination", + "nd a", + "ug i", + "Ġattent ive", + "ĠLe an", + "ral tar", + "Ġman g", + "mb uds", + "ak ings", + "b ender", + "Ġacc ol", + "Ġcraw led", + "N OW", + "Min nesota", + "Ġflour ished", + "ĠZ up", + "ĠSuper visor", + "ĠOliv ier", + "Ex cellent", + "Ġwid en", + "D one", + "Ġw ig", + "Ġmiscon ceptions", + "Cor p", + "W an", + "Ġvener able", + "ĠNot ably", + "ĠKling on", + "an imate", + "Bo ost", + "ĠS AY", + "miss ing", + "ibli ography", + "mel on", + "Ġpay day", + "Ø ³", + "bo le", + "Ġve iled", + "ĠAl phabet", + "It alian", + "Ġever lasting", + "ĠR IS", + "ĠC ree", + "rom pt", + "Ġh ating", + "Ġgrin ning", + "Ġge ographically", + "OS H", + "Ġwe eping", + "ĠÂłĠÂłĠÂłĠÂł ĠÂłĠÂłĠÂłĠÂł", + "Ġimpe cc", + "Let ter", + "Ġblo ated", + "PL A", + "ĠFe in", + "Ġper sever", + "Th under", + "Ġa ur", + "ĠR L", + "Ġpit falls", + "âĸ º", + "Ġpredomin ant", + "Ġ5 25", + "7 18", + "AP E", + "7 14", + "Ġfarm land", + "ĠQ iao", + "Ġv iolet", + "ĠBah amas", + "Ġinflic ting", + "ĠE fficiency", + "Ġhome brew", + "Ġundert ook", + "Ġcur ly", + "ĠHard ing", + "man ia", + "59 6", + "Ġtem pered", + "Ġhar rowing", + "ĠP ledge", + "ĠFranken stein", + "è ª", + "M otion", + "Ġpredict ably", + "ĠExpl osion", + "oc using", + "er d", + "col o", + "FF ER", + "Ġback field", + "ĠV IDE", + "ue bl", + "N arr", + "ĠArg ument", + "Ġgen omic", + "Ġbout ique", + "Ġbatt ed", + "ĠB inary", + "Ġg amb", + "ĠRh ythm", + "67 3", + "Ġa float", + "ĠOlymp ia", + "Y ING", + "Ġend if", + "is in", + "Ġwin ters", + "Ġsc attering", + "I v", + "D istance", + "Ġtr u", + "ĠCom fort", + "Ġne xus", + "Ġair flow", + "ĠByz antine", + "p ayers", + "con i", + "ĠB etsy", + "D eal", + "ĠN ug", + "ĠContin ent", + "red ibly", + "Ġoptim izing", + "al beit", + "Ġec static", + "ĠPro to", + "ç ·", + "iv ot", + "âĸ Ħ", + "em p", + "rou nder", + "Ġcl out", + "ĠI ST", + "66 3", + "ĠDoll ars", + "ĠD AC", + "Ġsubsc ribed", + "Ġrehears al", + "Ġam ps", + "ĠSh ang", + "es m", + "Ġspr inkle", + "Ġassail ant", + "ĠO o", + "ĠCoin base", + "T act", + "Ġret ina", + "Ġn uns", + "R ON", + "att o", + "Ġj ug", + "ĠSV G", + "Ġb ikini", + "ĠFI LE", + "ĠFound ers", + "ep ort", + "ĠK P", + "Ġrest ores", + "ĠTh ick", + "Ġash ore", + "Ġappro vals", + "R ender", + "M AG", + "G raham", + "ĠCort ana", + "ãĥ³ ãĤ¸", + "ss h", + "or ians", + "ars ity", + "ĠInsp ired", + "u pper", + "Ġsign alling", + "Ġreb uke", + "Ġfl ares", + "Ġdownt ime", + "Stud ies", + "Ġstagn ation", + "ĠSequ ence", + "Ġgr unt", + "Ġass ures", + "ĠPL A", + "59 2", + "Ġintra ven", + "d epend", + "Sus an", + "ĠManz iel", + "Man ia", + "Cont ract", + "Ġsl ams", + "Ġcult ured", + "Ġcred itor", + "L IST", + "ĠH UM", + "ĠChatt anooga", + "serv ed", + "Ġclo aked", + "ĠF TP", + "p owder", + "ĠSt ella", + "uct ive", + "Ġcheap ly", + "ĠMU CH", + "ĠGalile o", + "Ġsu ites", + "spe ech", + "Ġdeliber ations", + "ĠCh ips", + "« ĺ", + "Bal ance", + "ĠWyn ne", + "ĠAk ron", + "Ass et", + "Ġhon oured", + "Ġed ged", + "Like wise", + "anim ous", + "ĠW age", + "ĠEz ek", + "ad vertisement", + "ĠRT X", + "ĠM AD", + "Ġmigr ating", + "ĠS QU", + "Ġ4 75", + "Ed ited", + "Ġshorth and", + "ĠBas ics", + "Ġcro tch", + "ĠEV EN", + "Ġv m", + "effic iency", + "Ġcal ves", + "ĠF rie", + "ĠBrill iant", + "Ġstri kers", + "Ġrepent ance", + "Ġarter ies", + "r l", + "B ed", + "h ap", + "Ġcrypt ography", + "ĠSab res", + "Ġ4 14", + "vi ks", + "ih ara", + "aps es", + "T alking", + "Ġintertw ined", + "Ġdoc ks", + "Ġalle le", + "ĠArt ifact", + "ĠH IM", + "t orn", + "ç ķ", + "Ġop acity", + "ĠE ly", + "os uke", + "Ġn ipple", + "Ġhand written", + "ĠV K", + "ĠChamber lain", + "ĠLa os", + "ig raph", + "g row", + "Ġtr illions", + "Ġdescend ant", + "ĠSail or", + "as uring", + "Ġce ilings", + "ĠWare house", + "f lying", + "ĠGl ow", + "Ġn ont", + "Ġmiscar riage", + "Ġrig s", + "Ġmin istries", + "Ġelabor ated", + "Ġdel usional", + "ĠHum ane", + "Ġ3 79", + "n ets", + "Ġblack out", + "add ers", + "Ġn p", + "ĠT ire", + "ro sc", + "Ġsub div", + "Ġlink age", + "Ġchron ological", + "ĠHER O", + "Ġres ettlement", + "ĠVin yl", + "Ġpast oral", + "ĠMob il", + "ĠBar bar", + "Co oldown", + "ĠF ritz", + "c riminal", + "re pe", + "Ġbell ig", + "ĠBre ed", + "Ġ4 18", + "Ġsem blance", + "ij k", + "Ġcur tail", + "Ġclin ch", + "cont ained", + "ĠProm pt", + "ast on", + "Ġw i", + "Ġpursu its", + "5 15", + "ĠGl oss", + "Ġfl ips", + "Ġcoup ons", + "Ġcl oning", + "ĠLike ly", + "Rem oved", + "ĠQu artz", + "r ices", + "ĠSpe ars", + "Ġp ious", + "Ġdep reciation", + "ĠD are", + "oun ces", + "am az", + "O nt", + "Ġp innacle", + "d ocker", + "0 26", + "ĠW yr", + "ĠPro per", + "Ë Ī", + "n il", + "By tes", + "Ġseek er", + "t rial", + "Ġunf olds", + "ĠMar se", + "Ġextravag ant", + "ĠSurviv ors", + "RED ACTED", + "ĠSpeed way", + "ĠCra igslist", + "sub mit", + "ĠGener ations", + "Ġup holding", + "Ġblood stream", + "ĠMiss ions", + "ĠL awn", + "Ġlim bo", + "ene i", + "H uh", + "ĠWild cats", + "pre p", + "ĠMark us", + "ĠFor bidden", + "rit ic", + "IN O", + "Ġexhib iting", + "requ ent", + "ch uk", + "Ġhabit ual", + "ĠComp atibility", + "Dr ag", + "RIP T", + "uj ah", + "GR OUND", + "Ġdelinqu ent", + "Ġburn er", + "Ġcontempor aries", + "Ġgimm ick", + "load s", + "Ġno zzle", + "p odcast", + "ĠW ak", + "ĠStat en", + "ĠK uh", + "ãģ ĵ", + "inter rupted", + "Ġinv incible", + "ĠBurn ett", + "cig arette", + "ĠPeb ble", + "ĠTem porary", + "ĠMar ino", + "58 2", + "Ġwast eland", + "ident ly", + "T x", + "Ġr ite", + "ĠPan asonic", + "ĠM iddles", + "ĠHort on", + "ae us", + "Ġc uring", + "Ġm ats", + "Ġadj ourn", + "Ġfears ome", + "pe z", + "bo ats", + "Ġpro pell", + "Ġconflic ted", + "ĠAng er", + "Ġinsurg ent", + "K arl", + "Ġco ales", + "Ġsouth western", + "Ġdis su", + "ĠO vert", + "******** ****", + "Ġbox ed", + "ĠBr une", + "aa a", + "Ġgard ening", + "ĠEng el", + "tr acks", + "Ġpur ified", + "Ġplace holder", + "ĠL ikes", + "Ġd an", + "G ab", + "Ġe ct", + "ĠF aw", + "ĠEl iot", + "Ġ' ,", + "otrop ic", + "ĠRu in", + "hed on", + "Ġca ul", + "Ġa ft", + "ĠCad illac", + "gh a", + "ass ian", + "ud eb", + "ĠT ick", + "Ġadjust s", + "AR GET", + "5 37", + "isc he", + "ant y", + "ĠFried rich", + "ĠBl izz", + "ĠA OL", + "Camp aign", + "Ġmamm al", + "ĠVe il", + "ĠK ev", + "ĠMaur it", + "ĠDam ien", + "N ation", + "E astern", + "Ġ{ :", + "Ġ= ================================", + "Ġstereotyp ical", + "Ġatt ic", + "ĠCy borg", + "requ ire", + "Ġaward ing", + "ĠPap ua", + "bt n", + "b ent", + "B oo", + "Ġ( =", + "ĠX ander", + "ĠSomers et", + "Ġcatch y", + "Ġcert ify", + "STR UCT", + "Ġit al", + "Ġt ides", + "ĠBr ands", + "G ray", + "comp etitive", + "Ġcur ator", + "ĠD G", + "omin ium", + "ĠGM Os", + "ci ating", + "ĠCarm en", + "ow ard", + "Balt imore", + "Ġr gb", + "C u", + "Ġwip es", + "spe ll", + "IT NESS", + "Ġsummar izes", + "ĠRe vis", + "Ġwhistlebl owers", + "ĠBre ach", + "Ġcro chet", + "k os", + "ews ki", + "Ġrep et", + "Ġcrim son", + "ĠKar achi", + "read able", + "dim ension", + "ĠI gor", + "ild ed", + "ĠZ ed", + "ĠKe ane", + "ĠCos metic", + "DE P", + "Ġretreat ing", + "ĠU A", + "ens ical", + "Ġd usk", + "ĠDick ens", + "Ġaren as", + "ĠPass age", + "level s", + "Ġcur v", + "P ope", + "Ġch ores", + "ĠEl ise", + "ĠComp ass", + "b ub", + "Ġmamm alian", + "ĠSans krit", + "ĠAN C", + "ĠCr ack", + "Q ual", + "L aun", + "amp unk", + "Ġlearn ers", + "Ġglam orous", + "Ġfur the", + "erm ott", + "c and", + "Gener ic", + "Ġnarr ated", + "Ġdisorder ly", + "ĠTrans actions", + "ĠDet ention", + "ĠR oku", + "Ä į", + "Ġunder statement", + "ĠS aur", + "ĠRodrig o", + "ĠAS AP", + "S in", + "Ġre joice", + "Method s", + "Ġelectro de", + "Ġworsh ipped", + "Ġid i", + "ĠPhys icians", + "Ġpop up", + "Ġde ft", + "ĠRem oval", + "ĠBu enos", + "ver bs", + "Ġfun k", + "ush a", + "rict ion", + "ore a", + "ĠBang alore", + "ĠKen obi", + "zz i", + "Ġnorm ative", + "Ġgobl ins", + "Ġcaf es", + "ĠUN CLASSIFIED", + "ĠF ired", + "S IGN", + "Ġs clerosis", + "ĠV oter", + "ĠSon ny", + "ĠExt end", + "ĠEV s", + "Ar senal", + "Ġp si", + "Ġwid est", + "ĠT us", + "Ġlo oms", + "Ġjust ifying", + "ĠGr anger", + "è ¯", + "Ref er", + "58 3", + "Ġflour ishing", + "ab re", + "Ġr ave", + "ĠCont ra", + "Ġ18 98", + "Add s", + "Ġf ul", + "ĠCo oke", + "some one", + "= #", + "67 1", + "Ġy ak", + "Ġar te", + "ĠMis cellaneous", + "ĠDet ection", + "ĠCl ancy", + "â ģ", + "ass ies", + "Ġval iant", + "ĠFemin ist", + "cor ruption", + "V el", + "P ear", + "Ġsucc inct", + "Ġquick est", + "k w", + "Ġsp itting", + "ĠL ibraries", + "åħ ī", + "ant z", + "D ad", + "ĠSpec ifications", + "rup ulous", + "and r", + "RES ULTS", + "Ġsnow ball", + "Ġpred is", + "ĠB axter", + "ĠNurs ing", + "ĠCh aff", + "s we", + "Ġout age", + "Ġnest ing", + "Ġnotor iety", + "tr igger", + "on ite", + "j on", + "Ġf ou", + "ook ed", + "ĠCelebr ity", + "re ality", + "Ġfat ig", + "Ġhug ging", + "Ġbother s", + "ĠPan zer", + "ĠCh andra", + "fig ured", + "Ġvol ts", + "ĠCloud s", + "Ġfee ble", + "ĠCur ve", + "ĠAs us", + "78 6", + "abs or", + "ĠV ICE", + "ĠH ess", + "Ġmanufact ures", + "Ġgri zz", + "ĠPower ful", + "ac id", + "Ġsub sections", + "ĠKrug man", + "ĠAl ps", + "is u", + "Ġsequ est", + "ĠUlt ron", + "ĠT inker", + "ĠGo ose", + "Ġmism atch", + "Att orney", + "Ġmorph ology", + "ĠSix ers", + "ut tered", + "ĠE LECT", + "gr an", + "Rus sell", + "ĠG SL", + "Ġfort night", + "Ġ. )", + "Ġapost le", + "pr one", + "el ist", + "Unt itled", + "ĠIm plementation", + "ist ors", + "Ġtank er", + "Ġpl ush", + "Ġattend ants", + "ĠT ik", + "ĠGreen wich", + "ĠY on", + "ĠSP L", + "cell s", + "unt led", + "S olution", + "ĠQu é", + "Ġvac ated", + "Ġupt ick", + "ĠMer idian", + "æ ĥ", + "ĠDr ill", + "9 25", + "58 4", + "Ġrenov ated", + "ĠKub rick", + "zy k", + "Ġl ousy", + "pp el", + "ohyd rate", + "ĠI zzy", + "lesi astical", + "CC C", + "ĠAj ax", + "Ġad apters", + "ĠPetra eus", + "Ġaffirm ation", + "ĠST OR", + "le ms", + "ad oes", + "ĠConstantin ople", + "Ġp onies", + "Ġl ighthouse", + "Ġadherent s", + "ĠBre es", + "omorph ic", + "Fight ing", + "Ġpl aster", + "ĠP VC", + "ĠOb st", + "Ġdear ly", + "ĠTo oth", + "icks on", + "Ġsh aming", + "P lex", + "A gg", + "ĠâĢ¦ \"", + "Ġsub reddits", + "Ġpige on", + "ĠResident ial", + "ĠPass ing", + "Ġl um", + "ĠP ension", + "Ġpessim istic", + "Ġ4 32", + "z inski", + "c ade", + "0 75", + "Ġapolog ised", + "iy ah", + "Put ting", + "Ġgloom y", + "ĠLy me", + "=-=-=-=- =-=-=-=-", + "ĠT ome", + "ĠPsych iatric", + "ĠH IT", + "c ms", + "ap olog", + "Ġbreak er", + "Ġdeep en", + "Ġtheor ist", + "ĠHigh lands", + "Ġb aker", + "Ġst aples", + "Ġinterf ered", + "ĠAb ortion", + "jo ined", + "ch u", + "Ġform ulate", + "Ġvacc inations", + "Ġban ter", + "phe us", + "Ġoutfield er", + "ĠM eter", + "Ġ# ####", + "Ġ18 95", + "Ġnarrow ing", + "ĠST ORY", + "f p", + "ĠC ST", + "ign ore", + "Ġproclaim ing", + "ĠR U", + "ĠB ALL", + "yn a", + "65 3", + "Ġpos it", + "P RE", + "59 4", + "ĠRegist rar", + "ĠPil grim", + "ic io", + "Ġpre tt", + "Ġlif eless", + "Ġ__ _", + "Ne igh", + "ĠCh urches", + "orn o", + "Ġor cs", + "Ġkind red", + "ĠAud it", + "Ġmillenn ial", + "ĠPers ia", + "g ravity", + "ĠDis ability", + "ĠD ARK", + "W s", + "od on", + "Ġgrand daughter", + "ĠBro oke", + "ĠA DA", + "ER A", + "Ġpick ups", + "ĠWil kinson", + "ĠSh ards", + "ĠN K", + "Ġexp el", + "ĠKis lyak", + "Ġj argon", + "Ġpolar ized", + "ian e", + "Pub lisher", + "Ġreb utt", + "Ġapprehens ion", + "ĠK essler", + "Ġpr ism", + "F UL", + "19 64", + "ĠL oll", + "ä ¿", + "le thal", + "Å Ł", + "Ġg hetto", + "Ġb oulder", + "ĠSlow ly", + "ĠOsc ars", + "ĠInst ruction", + "ĠUl tr", + "ĠM oe", + "N ich", + "ĠP ATH", + "( *", + "ĠRE LEASE", + "un ing", + "rou se", + "en eg", + "Ġre imb", + "ĠDet ected", + "Do S", + "Ġster ling", + "Ġaggreg ation", + "ĠLone ly", + "ĠAtt end", + "hig her", + "Ġairst rike", + "ks on", + "SE LECT", + "Ġdef lation", + "ĠHer rera", + "C ole", + "rit ch", + "Ġadvis able", + "F ax", + "Ġwork around", + "Ġp id", + "mort em", + "ers en", + "Ġtyp o", + "Ġal um", + "78 2", + "ĠJam al", + "script s", + "Ġcapt ives", + "ĠPres ence", + "ĠLie berman", + "angel o", + "Ġalcohol ism", + "ass i", + "Ġrec ite", + "Ġgap ing", + "Ġbask ets", + "ĠG ou", + "Brow ser", + "ne au", + "Ġcorrect ive", + "und a", + "sc oring", + "ĠX D", + "Ġfil ament", + "Ġdeep ening", + "ĠStain less", + "Int eger", + "Ġbu ggy", + "Ġten ancy", + "ĠMub arak", + "Ġt uple", + "ĠD roid", + "ĠS itting", + "Ġforfe it", + "ĠRasm ussen", + "ixt ies", + "es i", + "ĠKim mel", + "Ġmetic ulously", + "Ġap opt", + "ĠS eller", + "08 8", + "ec ake", + "hem atically", + "T N", + "Ġmind less", + "Ġdig s", + "ĠAcc ord", + "ons ense", + "em ing", + "br ace", + "Ġe Book", + "ĠDist ribut", + "ĠInvest ments", + "w t", + "] ),", + "beh avior", + "56 3", + "Ġbl inding", + "ĠPro testers", + "top ia", + "Ġreb orn", + "ĠKel vin", + "ĠDo ver", + "ĠD airy", + "ĠOut s", + "Ġ[ /", + "Ï Ģ", + "b p", + "ĠVan ity", + "ĠRec ap", + "ĠHOU SE", + "ĠF ACE", + "Ġ4 22", + "69 2", + "ĠAnt ioch", + "cook ed", + "Ġcoll ide", + "Ġa pr", + "Ġsle eper", + "ĠJar vis", + "Ġalternative ly", + "ĠLe aves", + "ĠM aw", + "Ġantiqu ity", + "ĠAdin ida", + "Ġab user", + "Poké mon", + "Ġass orted", + "ĠRev ision", + "ĠP iano", + "ĠG ideon", + "O cean", + "Ġsal on", + "Ġbust ling", + "ogn itive", + "ĠRah man", + "Ġwa iter", + "Ġpres ets", + "ĠO sh", + "ĠG HC", + "oper ator", + "Ġrept iles", + "Ġ4 13", + "ĠG arr", + "ĠCh ak", + "Ġhas hes", + "Ġfail ings", + "Ġfolk lore", + "Ġab l", + "ĠC ena", + "ĠMac Arthur", + "ĠCOUR T", + "Ġperipher y", + "app ers", + "Ġreck oned", + "ĠInf lu", + "ĠC ET", + "Ġ3 72", + "ĠDefin itive", + "ass ault", + "4 21", + "Ġreservoir s", + "Ġd ives", + "ĠCo il", + "DA Q", + "Ġvivid ly", + "ĠR J", + "ĠBel lev", + "Ġec lectic", + "ĠShow down", + "ĠK M", + "ip ed", + "reet ings", + "ĠAs uka", + "L iberal", + "ĠÏ Ħ", + "Ġbystand ers", + "ĠGood win", + "uk ong", + "S it", + "ĠT rem", + "Ġcrim inally", + "ĠCirc us", + "ch rome", + "88 7", + "Ġnan op", + "ĠOb i", + "ĠL OW", + "o gh", + "ĠAuth ors", + "ob yl", + "Ur ban", + "Ġt i", + "ĠWe ir", + "t rap", + "ag y", + "Ġparent heses", + "Ġout numbered", + "Ġcounter productive", + "ĠTob ias", + "ub is", + "P arser", + "ST AR", + "Ġsyn aptic", + "ĠG ears", + "Ġh iber", + "Ġdebunk ed", + "Ġex alted", + "aw atts", + "H OU", + "Ch urch", + "ĠPix ie", + "ĠU ri", + "ĠForm ation", + "ĠPred iction", + "C EO", + "Ġthro tt", + "ĠBrit ann", + "ĠMad agascar", + "ë ĭ", + "Ġbill boards", + "ĠRPG s", + "ĠBe es", + "complete ly", + "F IL", + "Ġdoes nt", + "ĠGreen berg", + "re ys", + "Ġsl ing", + "Ġempt ied", + "ĠPix ar", + "ĠDh arma", + "l uck", + "ingu ished", + "Ġend ot", + "Ġbab ys", + "05 9", + "che st", + "r ats", + "Ġr idden", + "Ġbeet les", + "Ġillum inating", + "Ġfict itious", + "ĠProv incial", + "Ġ7 68", + "Ġshe pherd", + "ĠR ender", + "Ġ18 96", + "C rew", + "Ġmold ed", + "ĠXia omi", + "ĠSp iral", + "Ġdel im", + "Ġorgan ising", + "Ġho ops", + "ĠBe i", + "z hen", + "Ġfuck in", + "Ġdec ad", + "Ġun biased", + "am my", + "sw ing", + "Ġsmugg led", + "Ġk ios", + "ĠP ERSON", + "ĠInquis itor", + "Ġsnow y", + "Ġscrap ing", + "ĠBurg ess", + "P tr", + "ag ame", + "R W", + "Ġdro id", + "ĠL ys", + "ĠCass andra", + "Jac ob", + "Ġ35 4", + "Ġpast ure", + "Ġfr anc", + "ĠScot ch", + "ĠEnd s", + "ĠI GF", + "def inition", + "Ġhyster ical", + "ĠBrown e", + "77 1", + "Ġmobil ization", + "æ ķ", + "iqu eness", + "Th or", + "Ġspear headed", + "Ġembro iled", + "Ġconject ure", + "jud icial", + "Ch oice", + "Ġpaper back", + "P ir", + "Ġrec overs", + "ĠSur ge", + "ĠSh ogun", + "ĠPed iatrics", + "ãģ ł", + "Ġsweep s", + "ĠLabor atories", + "ĠP acks", + "al us", + "add in", + "Ġhead lights", + "g ra", + "Ev idence", + "COL OR", + "Ad min", + "Ĭ ±", + "Ġconco ct", + "s ufficient", + "Ġun marked", + "Ġrich ness", + "Ġdiss ertation", + "Ġseason ing", + "Ġg ib", + "ĠM ages", + "un ctions", + "ĠN id", + "che at", + "ĠTM Z", + "c itizens", + "ĠCatholic ism", + "n b", + "Ġdisemb ark", + "ĠPROG RAM", + "a ques", + "Ty ler", + "Or g", + "ĠSl ay", + "ĠN ero", + "ĠTown send", + "IN TON", + "te le", + "Ġmes mer", + "9 01", + "Ġfire ball", + "ev idence", + "aff iliated", + "ĠFrench man", + "ĠAugust a", + "0 21", + "Ġs led", + "Ġre used", + "ĠImmun ity", + "Ġwrest le", + "assemb led", + "Mar ia", + "Ġgun shots", + "ĠBarb ie", + "Ġcannabin oids", + "ĠTo ast", + "ĠK inder", + "IR D", + "Ġre juven", + "Ġg ore", + "Ġrupt ure", + "Ġbre aching", + "ĠCart oon", + "Ġ4 55", + "ĠPale o", + "6 14", + "Ġspe ars", + "ĠAm es", + "ab us", + "Mad ison", + "GR OUP", + "Ġab orted", + "y ah", + "Ġfel on", + "Ġcaus ation", + "Ġprep aid", + "Ġp itted", + "op lan", + "ĠShel ley", + "ĠRus so", + "ĠP agan", + "Ġwill fully", + "ĠCan aver", + "und rum", + "ĠSal ary", + "ĠAr paio", + "read er", + "ĠR ational", + "ĠOver se", + "ĠCa uses", + "Ġ* .", + "Ġw ob", + "Ke ith", + "ĠCons ent", + "man ac", + "77 3", + "6 23", + "Ġfate ful", + "et imes", + "Ġspir ited", + "ĠD ys", + "Ġhe gemony", + "Ġboy cot", + "ĠEn rique", + "em outh", + "Ġtim elines", + "ĠSah ara", + "ĠRel ax", + "ĠQuin cy", + "ĠLess ons", + "ĠE QU", + "SE A", + "N K", + "ĠCost co", + "Incre ase", + "Ġmotiv ating", + "ĠCh ong", + "am aru", + "ĠDiv ide", + "Ġped igree", + "ĠTasman ia", + "ĠPrel ude", + "L as", + "9 40", + "57 4", + "Ġch au", + "ĠSp iegel", + "un ic", + "-- >", + "ĠPhil ips", + "ĠKaf ka", + "Ġuphe aval", + "Ġsent imental", + "Ġsa x", + "ĠAk ira", + "ser ial", + "Mat rix", + "Ġelect ing", + "Ġcomment er", + "ĠNeb ula", + "ple ts", + "ĠNad u", + "ĠAd ren", + "Ġen shr", + "ĠR AND", + "fin ancial", + "ĠCly de", + "uther ford", + "Ġsign age", + "Ġde line", + "Ġphosph ate", + "rovers ial", + "f ascist", + "ĠV all", + "ĠBeth lehem", + "Ġfor s", + "Ġeng lish", + "S olid", + "N ature", + "Ġv a", + "ĠGu ests", + "Ġtant al", + "Ġauto immune", + ";;;;;;;; ;;;;", + "ĠTot ally", + "ĠO v", + "Ġdef ences", + "ĠCoc onut", + "Ġtranqu il", + "Ġpl oy", + "Ġflav ours", + "ĠFl ask", + "ãĤ¨ ãĥ«", + "ĠWest on", + "ĠVol vo", + "8 70", + "Ġmicro phones", + "ver bal", + "R PG", + "Ġi ii", + "; }", + "0 28", + "Ġhead lined", + "Ġprim ed", + "Ġho ard", + "ĠSh ad", + "ĠEN TER", + "Ġtri angular", + "Ġcap it", + "l ik", + "ĠAn cients", + "Ġl ash", + "Ġconv ol", + "Ġcolon el", + "en emy", + "G ra", + "Ġpub s", + "ut ters", + "Ġassign s", + "ĠPen et", + "ĠMon strous", + "ĠBow en", + "il ver", + "H aunted", + "ĠD ing", + "start ed", + "pl in", + "Ġcontamin ants", + "ĠDO E", + "ff en", + "ĠTechn ician", + "R y", + "Ġrob bers", + "Ġhot line", + "ĠGuard iola", + "ĠKau fman", + "row er", + "ĠDres den", + "ĠAl pine", + "E lf", + "Ġf mt", + "ĠS ard", + "urs es", + "g pu", + "Un ix", + "Ġunequiv ocally", + "ĠCitizens hip", + "qu ad", + "m ire", + "ĠS weeney", + "B attery", + "6 15", + "Ġpanc akes", + "Ġo ats", + "M aps", + "ĠCont rast", + "mbuds man", + "ĠE PS", + "Ġsub committee", + "Ġsour cing", + "Ġs izing", + "ĠBuff er", + "ĠMand atory", + "Ġmoder ates", + "ĠPattern s", + "ĠCh ocobo", + "ĠZ an", + "ĠSTAT ES", + "ĠJud ging", + "ĠIn her", + "* :", + "Ġb il", + "ĠY en", + "Ġexh ilar", + "oll ower", + "z ers", + "Ġsn ug", + "max imum", + "Ġdesp icable", + "ĠP ACK", + "ĠAn nex", + "Ġsarcast ic", + "Ġlate x", + "Ġt amp", + "ĠS ao", + "b ah", + "ĠRe verend", + "ĠChin atown", + "ĠA UT", + "d ocumented", + "ĠGA BA", + "ĠCan aan", + "ĠÙ ħ", + "Ġgovern s", + "pre v", + "E sc", + "ĠEst imates", + "OS P", + "Ġendeav our", + "ĠCl osing", + "omet ime", + "every one", + "Ġwor sen", + "Ġsc anners", + "Ġdev iations", + "ĠRobot ics", + "ĠCom pton", + "Ġsorce rer", + "Ġend ogenous", + "Ġem ulation", + "ĠPier cing", + "ĠA ph", + "ĠS ocket", + "Ġb ould", + "ĠO U", + "ĠBorder lands", + "Ġ18 63", + "G ordon", + "ĠW TO", + "Ġrestrict s", + "Ġmosa ic", + "Ġmel odies", + "ç Ħ", + "T ar", + "Ġdis son", + "ĠProv ides", + "Ġ ......", + "b ek", + "F IX", + "Ġbro om", + "ans hip", + "Do ctors", + "Ġner ds", + "ĠReg ions", + "na issance", + "Ġmet e", + "Ġcre pt", + "pl ings", + "Ġgirlfriend s", + "kn it", + "ig ent", + "ow e", + "Ġus hered", + "ĠB az", + "M obil", + "4 34", + "ĠPres ents", + "orig in", + "Ġins omnia", + "ĠA ux", + "4 39", + "ĠCh ili", + "irs ch", + "G AME", + "Ġgest ation", + "alg ia", + "rom ising", + "$ ,", + "c row", + "ĠIn spection", + "at omic", + "Rel ations", + "J OHN", + "rom an", + "ĠClock work", + "ĠBak r", + "m one", + "M ET", + "Ġthirst y", + "Ġb c", + "Ġfacult ies", + "R um", + "Ġnu ance", + "ĠD arius", + "ple ting", + "fter s", + "etch up", + "Reg istration", + "ĠK E", + "R ah", + "Ġpref erential", + "ĠL ash", + "ĠH H", + "Val id", + "ĠN AV", + "Ġstar ve", + "ĠG ong", + "z ynski", + "ĠAct ress", + "Ġw ik", + "Ġun accompanied", + "lv l", + "Br ide", + "AD S", + "ĠCommand o", + "ĠVaugh n", + "Wal let", + "Ġho pping", + "ĠV ie", + "Ġcave ats", + "Ġal as", + "if led", + "ab use", + "66 1", + "Ġib n", + "Ġg ul", + "Ġrob bing", + "t il", + "IL A", + "Ġmit igating", + "Ġapt ly", + "Ġty rant", + "Ġmid day", + "ĠGil more", + "ĠDe cker", + "Ġ§ §", + "part ial", + "Ex actly", + "Ġphen otype", + "Ġ[+ ]", + "ĠP lex", + "ĠI ps", + "vers ions", + "Ġe book", + "Ġch ic", + "g ross", + "\":\" \"},{\"", + "ĠSur prisingly", + "M organ", + "Ġresid ues", + "ĠConf ederation", + "in feld", + "Ġl yr", + "mod erate", + "Ġperpend icular", + "V K", + "Ġsynchron ized", + "Ġrefres hed", + "Ġad ore", + "ĠTor ment", + "ol ina", + "Ġ26 00", + "Item Tracker", + "Ġp ies", + "ĠF AT", + "ĠR HP", + "0 48", + "ĠRES P", + "ĠB J", + "all ows", + "P and", + "Ġunw elcome", + "ĠV oc", + "ĠBast ard", + "ĠO W", + "ĠL AR", + "ĠHeal er", + "Environment al", + "ĠKen yan", + "ĠTr ance", + "ĠP ats", + "Ġali ases", + "ĠGar field", + "Ġcampaign er", + "Ġadvance ments", + "ĠOkin awa", + "ĠC oh", + "ows ky", + "Ġstar ved", + "Ġsize able", + "Ġ: -)", + "Ġm RNA", + "Ġsusp ensions", + "ist ar", + "Scot land", + "Pr in", + "-------------------------------- ----------------", + "Ġ50 2", + "Ġteasp oons", + "Ġ10 50", + "Ġcoerc ive", + "ĠMason ic", + "edd ed", + "ĠPass enger", + "Ġl att", + "Ġbr aces", + "ĠSt eal", + "ĠNY T", + "ĠK ats", + "ĠCel est", + "ae z", + "T u", + "ĠCoul ter", + "ðŁ ĺ", + "Fl ickr", + "ĠWil mington", + "ith s", + "++ ;", + "Ġv ending", + "Ġneg ro", + "ĠPh i", + "ĠYellow stone", + "Call back", + "Ġsh ampoo", + "ĠSh ades", + "w at", + "Ġsuper human", + "Ġridic uled", + "Ġhol iest", + "om bo", + "Ġintern s", + "Ġh one", + "ĠPar agu", + "UR I", + "Ġd angling", + "ãĤ »", + "so v", + "ict ional", + "av ailability", + "Ġrev ocation", + "Ġd ow", + "in ic", + "ĠTHE IR", + "Ġis o", + "Ġout ings", + "ĠLeth al", + "Ġ) ))", + "Ġinacc ur", + "Ġout landish", + "Ġan us", + "let ico", + "id on", + "l ol", + "Ġun regulated", + "Ġsuccumb ed", + "Ġc uff", + "ĠWast eland", + "let al", + "Ġsub str", + "Ġcoff ers", + "Ġautom akers", + "ov i", + "ĠX ue", + "ĠDayton a", + "Ġjar ring", + "Ġf umes", + "Ġdisband ed", + "z ik", + "itt on", + "Ġstriking ly", + "Ġsp ores", + "Ad apter", + ".) :", + "ĠLynd on", + "ival ry", + "Ġor ally", + "Ġtumult uous", + "Ġdisple asure", + "Ġcon es", + "or rect", + "Ġappe ase", + "Ġder by", + "ĠTrip oli", + "ĠAl ess", + "Ġp oked", + "ĠGu ilty", + "v P", + "En ough", + "Ġorig inals", + "6 99", + "Ġrabb i", + "Ġproverb ial", + "Ġpostp one", + "el ope", + "ĠMist y", + "Ġstaff ed", + "ĠUn employment", + "redit ary", + "Ġdilig ent", + "re comm", + "me asures", + "as in", + "8 25", + "Ġpond s", + "Ġmm ol", + "ĠS AR", + "ĠC ARE", + "Ġ3 71", + "Ġclen ched", + "ĠCors air", + "Ġcaric ature", + "z n", + "att ach", + "ĠSch ro", + "spe ak", + "p ainted", + "ĠS uc", + "ĠE NT", + "Ġcell ul", + "ĠP aid", + "di agn", + "WH ERE", + "Ġtext ed", + "B arn", + "Ġret racted", + "ĠRe ferred", + "S av", + "Ġup keep", + "Ġwork places", + "ĠTok ens", + "Ġampl ify", + "cl inical", + "Ġmult ic", + "mber g", + "Ġconvol uted", + "Reg ion", + "5 65", + "ĠTop ic", + "Ġsn ail", + "Ġsal ine", + "Ġins urrection", + "ĠPet r", + "f orts", + "B AT", + "ĠNav ajo", + "Ġrud imentary", + "ĠLak sh", + "OND ON", + "Me asure", + "Ġtransform er", + "ĠGodd ard", + "Ġcoinc ides", + "ir in", + "R ex", + "ĠB ok", + "qu it", + "Ġshotgun s", + "Ġprolet arian", + "Ġsc orp", + "ĠAd a", + "5 14", + "Ġsl ander", + "record ed", + "Ġemb ell", + "ris ome", + "Ġapolog izing", + "ĠMul cair", + "ĠGib raltar", + "Cl a", + "Ġall ot", + "ĠAtt ention", + "Ġ4 33", + "le ave", + "Ġwh ine", + "ĠIss a", + "ĠFa ust", + "ĠBar ron", + "hen y", + "Ġvictim ized", + "J ews", + "Ġnurt uring", + "ett el", + "W inged", + "ĠSub tle", + "Ġflavor ful", + "ĠRep s", + "eng ed", + "call back", + "Ġdirection al", + "Ġcl asp", + "ĠDirect ions", + "plan et", + "icult ure", + "Hel per", + "ic ion", + "ac ia", + "Ġç ¥ŀ", + "Ġsur ges", + "Ġcan oe", + "ĠPrem iership", + "be en", + "Ġdef ied", + "ĠTro oper", + "Ġtrip od", + "Ġgas p", + "ĠE uph", + "ĠAd s", + "vern ight", + "high ly", + "R ole", + "Ġent angled", + "ĠZe it", + "6 18", + "ĠRust y", + "Ġhaven s", + "ĠVaugh an", + "HA EL", + "ĠSER VICE", + "/ ,", + "Ġstr icken", + "Ġdel usions", + "Ġb is", + "ĠH af", + "Ġgrat ification", + "Ġent icing", + "UN CH", + "Ad ams", + "ĠOL ED", + "ĠBeet le", + "Ġ18 99", + "ĠSO FTWARE", + "ateg or", + "V L", + "ĠTot em", + "ĠG ators", + "AT URES", + "Ġimped ance", + "Reg istered", + "ĠC ary", + "ĠAer ial", + "on ne", + "en ium", + "Ġd red", + "ĠBe g", + "Ġconcurrent ly", + "Ġsuper power", + "ĠX an", + "j ew", + "imes ter", + "ĠDick inson", + "âĶ ģ", + "F la", + "Ġp ree", + "ĠRoll ins", + "© ¶æ", + "Ġden omination", + "ĠL ana", + "5 16", + "Ġinc iting", + "sc ribed", + "j uries", + "ĠWond ers", + "app roximately", + "Ġsusp ending", + "Ġmountain ous", + "ĠL augh", + "oid al", + "N s", + "Det ect", + ") =", + "ĠL uthor", + "ĠSchwarz enegger", + "ĠMull er", + "ĠDev i", + "ec ycle", + "J ar", + "6 13", + "ĠL ongh", + "B ah", + "ĠSP ORTS", + "n w", + "Ġref inement", + "Ġwater ways", + "Ġd iner", + "Bl ade", + "68 3", + "F ac", + "Ġinitial s", + "Ġro g", + "Ġparan ormal", + "B UT", + "Ġ[ (", + "ĠSw anson", + "ĠM esh", + "âĸ ¬", + "Impro ve", + "ĠRad iation", + "ĠEst her", + "ĠE sk", + "ĠA ly", + "ik y", + "Ġir rad", + "ĠBuck ingham", + "Ġref ill", + "Ġ. _", + "Re pe", + "CON CLUS", + "Ġdifferent iated", + "Ġchi rop", + "ĠAt kins", + "Pat tern", + "Ġexc ise", + "Ġcab al", + "N SA", + "ĠST A", + "ĠS IL", + "ĠPar aly", + "Ġr ye", + "ĠHow ell", + "ĠCount down", + "ness es", + "alys ed", + "Ġres ize", + "ãĤ ½", + "Ġbudget ary", + "ĠStr as", + "w ang", + "Ġap iece", + "Ġprecinct s", + "Ġpe ach", + "Ġsky line", + "Ġ35 3", + "pop ular", + "App earances", + "ĠMechan ics", + "ĠDev Online", + "S ullivan", + "Z en", + "Ġp u", + "op olis", + "5 44", + "Ġde form", + "Ġcounter act", + "ĠL ange", + "Ġ4 17", + "Con sole", + "77 4", + "Ġnodd ing", + "Ġpopul ism", + "Ġhe p", + "Ġcoun selling", + "compl iance", + "U FF", + "Ġunden iably", + "Ġrail ing", + "ĠHor owitz", + "ĠSim one", + "ĠBung ie", + "Ġa k", + "ĠTal ks", + "x ff", + "fl ake", + "Cr ash", + "Ġsweat y", + "Ġban quet", + "ĠOFF IC", + "Ġinvent ive", + "Ġastron omer", + "ĠStam ford", + "ĠSc are", + "ĠGRE EN", + "olic ited", + "Ġr usher", + "Ġcent rist", + "ight ing", + "Ġsub class", + "Ġdis av", + "Ġdef und", + "ĠN anto", + "oci ate", + "m ast", + "Ġpac if", + "Ġm end", + "e ers", + "imm igration", + "ESS ION", + "Ġnumber ing", + "Ġlaugh able", + "ĠEnd ed", + "v iation", + "em ark", + "P itt", + "Ġmetic ulous", + "ĠL F", + "Ġcongrat ulated", + "ĠBir ch", + "Ġsway ed", + "Ġsemif inals", + "Ġhum ankind", + "m atter", + "ĠEqu ip", + "opa usal", + "S aid", + "ĠLay out", + "Ġvo icing", + "Ġth ug", + "Ġporn ographic", + "I PS", + "Ġmo aning", + "Ġgriev ance", + "Ġconf essions", + "esc al", + "TEXT URE", + "Aut hent", + "os aurus", + "P urchase", + "Ġreleg ation", + "al ter", + "ĠÂł Âł", + "Ġr iddled", + "Ġo gre", + "ĠLow ell", + "Occ up", + "E at", + "ĠHy der", + "ĠAdvis er", + "Com merce", + "H unt", + "ĠOr th", + "ĠComp etitive", + "ĠCL A", + "CD C", + "Ġsal ads", + "F le", + "Ġindustrial ized", + "` ,", + "ĠO WN", + "Ġbec k", + "ĠPart icularly", + "oub t", + "Ġm M", + "ĠHuss ain", + "ĠChen nai", + "Ġ9 20", + "Ġappoint ing", + "ĠCull en", + ",,,, ,,,,", + "Ġp ores", + "ver ified", + "Ġbi ochemical", + "em ate", + "Ġcoward ly", + "ĠHels inki", + "ĠEthiop ian", + "S OURCE", + "ER C", + "est ro", + "Ġbi otech", + "ĠS our", + "Ġbrew er", + "Bloom berg", + "Ġintens ify", + "Gl ass", + "an co", + "ĠF DR", + "gre SQL", + "ĠF ires", + "©¶æ ¥µ", + "ec o", + "100 1", + "ĠHom eless", + "Ġinstant aneous", + "ĠH aste", + "ig el", + "D iamond", + "Ġp aving", + "Ġland fill", + "Ġd ads", + "h oun", + ": ]", + "Ġinc endiary", + "ĠLiving ston", + "ĠHil bert", + "ĠChe cks", + "st yles", + "in ators", + "ĠCl ive", + "ph rine", + "Ġchimpan zees", + "Ġp all", + "ĠJ M", + "ĠAad haar", + "ð Ŀ", + "Ġachie vable", + "dis abled", + "P ET", + "OOOO OOOO", + "M ot", + "Ġint angible", + "Ġbal let", + "ĠWe bs", + "ĠEst imated", + "Effect s", + "Ġb ailed", + "Josh ua", + "Ġturb ulence", + "Ġoccup ant", + "ĠDay light", + "Ġ36 1", + "me et", + "Ġstat ically", + "Ġon look", + "Ġk i", + "il legal", + "Ġvel vet", + "Ġdehyd ration", + "Ġacqu ies", + "ĠRe z", + "ak ura", + "ĠU pton", + "at ro", + "Ġincomp rehensible", + "Ġback door", + "ĠRh ino", + "7 27", + "Ġmath s", + ") +", + "Ġhe resy", + "Ġd f", + "ĠRoc he", + "ĠL ydia", + "Ġpanc reat", + "re ply", + "arre ll", + "Ġsolicit ation", + "Ġcirc adian", + "BI P", + "Ġfor ay", + "Ġcrypt ic", + "iz u", + "ime o", + "ĠTom ato", + "ĠH oms", + "ex amination", + "Ġqu arry", + "ĠVal iant", + "ĠJer icho", + "ĠIN CLUD", + "Ġ18 40", + "5 19", + "Ġres ists", + "Ġsnap shots", + "ĠSp ur", + "ĠAnt iqu", + "Log in", + "Ġbest selling", + "Ġant ic", + "ĠS utherland", + "ãĤ¢ ãĥ«", + "Ġ~ /", + "ĠP arm", + "è ĥ", + "P ages", + "int ensity", + "Ġimm obil", + "Ġ18 65", + "zz o", + "Ġn ifty", + "Ġf entanyl", + "ĠPres ervation", + "op hen", + "Ġd arts", + "ĠD inosaur", + "po inters", + "ĠR ite", + "s uggest", + "aware ness", + "ĠSher idan", + "Ġst ances", + "Ġsor cery", + "Ġper jury", + "ĠNik ola", + "ie ver", + "Ġf iance", + "ĠJordan ian", + "ĠBall oon", + "Ġn ab", + "Ġk b", + "Ġhuman ities", + "ĠTan aka", + "hill ary", + "Ġconsult ancy", + "ĠZ ub", + "Ġrem ission", + "Ġconf id", + "CH Q", + "ĠF ug", + "Ġimpro vis", + "Y ep", + "/ _", + "Ġunwilling ness", + "Ġport folios", + "05 5", + "ĠInstruct or", + "aim an", + "Ġclaim ants", + "M bps", + "ĠBy e", + "re ceived", + "T weet", + "Ġind emn", + "ri z", + "am ara", + "N at", + "Ġeval uates", + "ĠL ur", + "ep ad", + "FO X", + "ĠTh ro", + "Ġrust y", + "Ġbed rock", + "ĠOp rah", + "J B", + "Ġmanip ulative", + "Ġwill ful", + "Ġrel apse", + "Ġext ant", + "The me", + "S ensor", + "ĠSt ability", + "go vern", + "Ġpo ppy", + "Ġkn ack", + "Ġins ulated", + "ĠT ile", + "ĠExt rem", + "Ġunt old", + "Ġconver ge", + "Ġref uel", + "ig roup", + "Ġdistort ions", + "Ġrav aged", + "Ġmechan ically", + "ĠRe illy", + "ĠN ose", + "ĠIncarn ation", + "ĠBeck y", + "abb ling", + "Ġt aco", + "Ġr ake", + "Ġmelanch oly", + "Ġillust rious", + "ĠDart mouth", + "Gu ide", + "ĠR azer", + "ĠBen z", + "Ult imate", + "ĠSur prise", + "Ġpage ant", + "off er", + "Who ever", + "Ġw iser", + "Ġchem ist", + "ĠHE LL", + "ĠBul k", + "Ġpl utonium", + "ĠCO VER", + "Ö ¼", + "f ailed", + "Ġtire lessly", + "Ġinf ertility", + "ĠTr ident", + "ĠShow time", + "ĠC iv", + "V ice", + "requ ires", + "itt ance", + "Ġun controlled", + "interest ing", + "56 1", + "Ġinnov ate", + "ateg ic", + "L ie", + "ĠS elling", + "U l", + "Ġsav ior", + "ĠT osh", + "Ġsw ast", + "P ASS", + "Ġr ink", + "Ġcard io", + "ĠI ro", + "ud i", + "Ġv antage", + "Ġv ans", + "ĠNi ño", + "+ =", + "Ġpropag ate", + "< ?", + "Ġmethod ological", + "204 39", + "Ġtrig lycer", + "Ġing rained", + "ĠAn notations", + "arr anted", + "6 17", + "ĠS odium", + "ĠA AC", + "techn ical", + "mult ipl", + "Ġ3 73", + "å ĭ", + "Ġdec isively", + "Ġboost ers", + "Ġdessert s", + "ĠGren ade", + "Ġtest ifying", + "ĠSc ully", + "ID s", + "Ġlock down", + "ĠSc her", + "ĠR é", + "ĠWhit man", + "ĠRams ay", + "rem ote", + "Ġh ikers", + "ĠHy undai", + "Ġcons cientious", + "Ġcler ics", + "ĠSiber ian", + "ut i", + "is bury", + "Ġrel ayed", + "Ġqu artz", + "ĠC BI", + "seek ers", + "ull a", + "Ġweld ing", + "ĠSh al", + "ble acher", + "T ai", + "ĠSam son", + "Ġt umble", + "ĠInvest or", + "Ġsub contract", + "ĠShin ra", + "ow icz", + "j andro", + "d ad", + "Ġtermin ating", + "ĠNe ural", + "ä» £", + "Ġleak age", + "ĠMid lands", + "ĠCaucas us", + "í ķ", + "c it", + "ll an", + "iv ably", + "ĠAlb ion", + "Ġ4 57", + "Ġregist rations", + "Ġcomr ade", + "Ġclip board", + "0 47", + "Ġdiscour aging", + "ĠO ops", + "Ad apt", + "Ġem path", + "n v", + "ĠPR OT", + "ĠDon n", + "ĠP ax", + "ĠB ayer", + "t is", + "Squ are", + "Ġfoot prints", + "part icip", + "ĠChile an", + "B rend", + "ind ucing", + "M agn", + "Ġclub house", + "ĠMagn um", + "Ġenc amp", + "ĠEth nic", + "uch a", + "ere y", + "Ġw atered", + "ĠCal ais", + "Ġcomplex ion", + "Ġsect s", + "Ġren ters", + "Ġbr as", + "oÄŁ an", + "Time out", + "Man agement", + "Ġinf ographic", + "P okemon", + "Cl ar", + "Ġloc ality", + "Ġfl ora", + "as el", + "P ont", + "Ġpop ulate", + "ĠO ng", + "Ġsubs istence", + "Ġa uctions", + "ĠMcA uliffe", + "ĠL OOK", + "br inger", + "Ġtit an", + "Ġmanif old", + "ĠâĹ ı", + "Ġcalibr ated", + "Ġcal iphate", + "ĠSH E", + "ĠCommission ers", + "ce ivable", + "j c", + "W inner", + "5 24", + "Ġcond one", + "Other wise", + "Ġp iling", + "Ġem body", + "ĠCrime an", + "ut ics", + "ĠEx hibition", + "Ġ4 26", + "e ering", + "Ġv ying", + "ĠH UGE", + "* =-", + "Ġprin cipled", + "à ¦", + "Ġquir ks", + "ĠEdit ors", + "put ing", + "G ES", + "ĠF TA", + "ठ¾", + "add on", + "ĠH AM", + "ĠFrie za", + "W oman", + ". $", + "Ġc rib", + "ĠHer od", + "Ġtim ers", + "ĠSp aces", + "ĠMac intosh", + "at aka", + "Ġgl ide", + "Ġsmell ing", + "ĠB AL", + "Ġun su", + "Ġcond os", + "Ġbicy cl", + "ĠRev ival", + "55 3", + "Ġjugg ling", + "H ug", + "ĠKardash ian", + "ĠBalk ans", + "mult iple", + "Ġnutrit ious", + "oc ry", + "19 00", + "Ġinteg rates", + "Ġad joining", + "ĠF older", + "roll ment", + "ven ient", + "Ġu ber", + "y i", + "Ġwh iff", + "ĠJu ven", + "ĠB orough", + "net te", + "Ġb ilingual", + "ĠSp arks", + "ph thal", + "man ufact", + "Ġt outing", + "ĠPH I", + "Ke efe", + "Rew ard", + "Ġinf all", + "ĠTem per", + "typ ically", + "ĠNik ol", + "Ġregular s", + "Ġpseud onym", + "Ġexhib itions", + "Ġbl aster", + "Ġ40 9", + "w arming", + "Ġrever ber", + "Ġrecip rocal", + "Ġ6 70", + "ip ient", + "b ett", + "ĠBe gins", + "Ġit ching", + "ĠPh ar", + "Ass uming", + "Ġem itting", + "ĠML G", + "Ġbirth place", + "Ġt aunt", + "ĠL uffy", + "ĠAm it", + "Ġcir cled", + "ĠN ost", + "enn ett", + "Ġde forestation", + "ĠHist orically", + "ĠEvery day", + "Ġovert ake", + "79 2", + "Ġn un", + "ĠLuc ia", + "Ġaccompan ies", + "ĠSe eking", + "ĠTr ash", + "an ism", + "R ogue", + "Ġnorth western", + "ĠSupplement al", + "ĠNY U", + "ĠF RI", + "ĠSat isf", + "x es", + "5 17", + "Ġreass ured", + "Ġspor adic", + "Ġ7 01", + "Ġmed ial", + "Ġcannabin oid", + "Ġbarbar ic", + "Ġep is", + "ĠExplos ive", + "ĠD ough", + "Ġuns olved", + "Support ed", + "Ġacknowled gment", + "sp awn", + "Ġkit chens", + "Ġ- =", + "talk ing", + "ic ist", + "ĠPeg asus", + "ĠPS U", + "Ġphot on", + "ĠAuthent ication", + "R G", + "@# &", + "76 2", + "ĠCl air", + "Ġdi aper", + "Ġbr ist", + "ĠProsecut ors", + "ĠJ em", + "6 28", + "ĠEvery where", + "ĠJean ne", + "equ ality", + "ãĥ© ãĥ³", + "object s", + "ĠPel icans", + "Ġ39 2", + "Ġbl u", + "b ys", + "ĠA go", + "Ġinstruction al", + "Ġdiscrim inating", + "ĠTR AN", + "ĠCorn el", + "ag os", + "Ġty re", + "Ġas piration", + "ĠBrid gewater", + "\": -", + "! \".", + "ĠEn s", + "ĠCoc o", + "P ie", + "Ġdet ach", + "ĠC ouch", + "Ġphys ique", + "ĠOccup ations", + "osc opic", + "en ough", + "B uzz", + "App earance", + "Y P", + "Ġrac er", + "Ġcompl icity", + "r pm", + "T oy", + "Ġinterrupt s", + "ĠCat alyst", + "Ġut ilitarian", + "imp act", + "Ġsp aghetti", + "Ġp orous", + "Ġeste emed", + "Ġinc iner", + "ĠI OC", + "7 48", + "Ġesp resso", + "ĠSm ile", + "abil ia", + "6 35", + "Ġmathematic ian", + "Ġ4 24", + "ĠK L", + "ĠH IP", + "Ġover heard", + "ĠT ud", + "ĠT ec", + "Ġqu izz", + "Ġfl attering", + "Ġcon n", + "âĢ İ", + "Ġatt aches", + "ĠR OS", + "ĠAC S", + "Ġt cp", + "ĠSh ame", + "sk ip", + "res pected", + "ĠTrin idad", + "gr ain", + "Ġfooth old", + "ĠUnch arted", + "ĠJul io", + "z l", + "av ored", + "ĠAn xiety", + "er rors", + "ĠCent auri", + "its ch", + "D addy", + "Ġclutch ing", + "ĠIm plement", + "ĠGut ierrez", + "Ġ7 60", + "Ġtele portation", + "end ra", + "Ġrevers ible", + "st ros", + "Ad venture", + "08 3", + "Ġliber ating", + "Ġas phalt", + "ĠSp end", + "AR DS", + "im sy", + "PR ES", + "ĠEmer ging", + "Ġwild fires", + "Ġtechn ologically", + "Ġem its", + "ĠART ICLE", + "Ġirregular ities", + "Ġcher ish", + "çī Ī", + "Ġst ink", + "ĠR ost", + "Econom ic", + "Ġcough ing", + "ĠMcC ann", + "pro perties", + "ilant ro", + "Ġreneg oti", + "Trans lation", + "Ġin quest", + "ĠGra pe", + "oot ers", + "gu i", + "ĠSwords man", + "ace ae", + "h itting", + "Ġr c", + "Ġexert ed", + "ĠS AP", + "it ent", + "Ġperil ous", + "Ġobsc urity", + "Ġassass inate", + "Ġab original", + "Ġresc uing", + "ĠSh attered", + "lock ing", + "all ion", + "Ch anging", + "ĠHar rington", + "ĠB ord", + "ĠAfgh ans", + "Jam ie", + "aret z", + "ĠAugust us", + "Ġ38 6", + "8 30", + "Ġj og", + "ok ingly", + "Tr igger", + "ĠH OR", + "Stat istics", + "Ġviewers hip", + "Ġadd itives", + "h ur", + "Ġmaxim izing", + "ĠR ove", + "ĠLou ie", + "ĠBuck et", + "ĠCHR IST", + "ou sel", + "Ġstre aks", + "ir ted", + "Ġt ert", + "Ġcolonial ism", + "Ġbur ying", + "y k", + "Cond ition", + "ĠDPR K", + "By Id", + "75 1", + "âĹ ¼", + "Ġwor risome", + "Ġvoc ational", + "sl ice", + "Ġsa ils", + "ĠCorrection al", + "95 4", + "Ġt ul", + "K id", + "l uster", + "Ġfam ilial", + "ĠSp it", + "ĠEp iscopal", + "Specific ally", + "ĠVol cano", + "run s", + "q s", + "Ġve tted", + "Ġcram med", + "t rop", + "here r", + "Thank fully", + "Ġper cussion", + "Ġor anges", + "Ġround up", + "Ġ4 99", + "x ious", + "Char acters", + "ĠZion ism", + "ĠR ao", + "ÃĽ ÃĽ", + "W F", + "Ġunintention al", + "ONE Y", + "Gr ab", + "Com mercial", + "Ġglut amate", + "ĠMcK enna", + "ru ciating", + "ning ton", + "ih u", + "Ch an", + "ĠSw ap", + "Ġleaf lets", + "Ġfunction ally", + "er ous", + "F arm", + "Ġcal oric", + "ĠLiter ally", + "con cert", + "Ġshe nan", + "Ġrep aid", + "ey es", + "Ġbas hing", + "ĠG orge", + "Ġcollabor ations", + "Ġun account", + "itch ie", + "Ġteam work", + "pp elin", + "Ġpip ing", + "Ġmin ced", + "Ġd iam", + "ri eg", + "Ġmasc ara", + "Ġsuck er", + "ĠMo ons", + "App s", + "ĠPe ck", + "Ġper v", + "ĠFl oat", + "o ley", + "ĠN ish", + "im ize", + "Ġarom atic", + "u in", + "end ish", + "! /", + "ĠB icycle", + "ĠAS IC", + "ile ged", + "ĠQuad ro", + "ios yn", + "Ġlock out", + "ĠW ink", + "SP EC", + "Attempt s", + "Ġseed ed", + "red o", + "ias is", + "Ġsn ag", + "ãĥķ ãĤ©", + "ãĤ ¶", + "Ġground ing", + "Ġrelie ver", + "Ġfrivol ous", + "ĠG ifts", + "ĠF aces", + "Es pecially", + "Ġmicrobi ome", + "im ag", + "ĠSch l", + "ĠP les", + "ĠBle ach", + "ĠIr win", + "ĠE aton", + "ĠDisc iple", + "Ġmultipl ication", + "Ġcoer ced", + "Ġ4 19", + "st h", + "E vil", + "B omb", + "Ġex orc", + "Ġstag gered", + "L ESS", + "Ġinert ia", + "ĠED IT", + "Ġgo b", + "Tr aditional", + "Ġclass y", + "Lear y", + "ĠP AGE", + "yr s", + "Ġtrans porter", + "Ġmat ured", + "Ġhij ab", + "Ġbi ome", + "Where as", + "Ġex termination", + "ĠT ues", + "ĠT akeru", + "ĠAud rey", + "er ial", + "ĠAd en", + "aff les", + "Ġnarciss istic", + "ĠB aird", + "UT F", + "I re", + "ĠCon nie", + "Ch amp", + "Ġwhis pering", + "ĠH att", + "D K", + "Ġdis infect", + "Ġdeduct ed", + "Ġpart ake", + "Ġdown grade", + "ĠEs ports", + "ĠContin uing", + "Ġdemocr atically", + "icro bial", + "itt a", + "Ġlim estone", + "Ġexempt ed", + "ĠFren zy", + "H erm", + "7 28", + "Ġfled gling", + "Met a", + "765 61", + "69 3", + "% :", + "w ake", + "5 26", + "ĠDis cipline", + "Ġvirgin ity", + "ĠLeg ions", + "ĠFrank ie", + "int ent", + "Ġrest rooms", + "ĠRou ter", + "da q", + "Ġobjection able", + "âĨ ij", + "w ark", + "ĠRah ul", + "g ain", + "activ ation", + "abs olute", + "ĠAccess ed", + "Ġ24 00", + "ogg les", + "Ġsecond ly", + "ĠDEF ENSE", + "Ġpost age", + "wra pper", + "sh arp", + "7 29", + "Ġcommun icates", + "Ġadd on", + "ĠMil itia", + "H ong", + "Ġsl umped", + "ĠJP EG", + "ĠI car", + "ad ish", + "68 1", + "Ġmaj esty", + "ĠWolf gang", + "ĠEl astic", + "u per", + "Ġv iz", + "Ġunconscious ly", + "ĠST D", + "ĠS ass", + "Ġflower ing", + "ĠHel ic", + "ĠDra per", + "ĠAm ateur", + "Ġman ure", + "Ġdis ingen", + "ĠLe i", + "br ing", + "9 49", + "Ġinhib ited", + "Ġhead quartered", + "Ġen igmatic", + "�� �", + "Ġred ress", + "R H", + "Ġratt led", + "Ġd iction", + "l io", + "ĠT BA", + "ĠSN AP", + "C alling", + "Ġfasc ists", + "ĠD ove", + "iew icz", + "0 36", + "Ġco asts", + "ĠR ect", + "Ġ) ]", + "L ot", + "6 29", + "ĠS EM", + "ĠPeters en", + "ĠExpl ain", + "ĠBo ards", + "ĠBe zos", + "ĠJ ournals", + "Ġ20 24", + "p arser", + "Ġmist rust", + "Ġgr ate", + "ĠL ocked", + "bo a", + "S aint", + "g aming", + "Ġvow el", + "in ately", + "bl ow", + "All ah", + "Ġun matched", + "Ġb ordering", + "ĠExp end", + "n r", + "Or acle", + "rou ch", + "Ġcont iguous", + "ac us", + "Ġdist raught", + "58 1", + "Ġanat omical", + "O X", + "ap ixel", + "8 33", + "ĠPL US", + "Ġres usc", + "Ġab iding", + "57 3", + "Ġvac ancies", + "Em ily", + "Ġhyp othal", + "ĠWer ner", + "ĠWe e", + "ĠDJ s", + "5 13", + "Ġwitch craft", + "Ġac upuncture", + "ent ary", + "benef it", + "Product s", + "ĠP SP", + "ĠMP G", + "ĠJ inn", + "ĠJ arrett", + "Ġ4 45", + "ĠIm aging", + "ĠP yth", + "Fin ish", + "Ġte x", + "Ġjuven iles", + "Ġhero ism", + "Ġdoubt less", + "ĠA ki", + "ĠT end", + "ĠPatri arch", + "Ġbit ters", + "ĠTele communications", + "it atively", + "ag na", + "Ġr g", + "ĠS OLD", + "Ġcomp ulsion", + "ĠN asa", + "ĠKath ryn", + "Ġmillion aires", + "Ġintrins ically", + "Ġbolst ered", + "time out", + "fl o", + "Ġtut or", + "p our", + "Stat ement", + "Ġ{ *", + "ĠRud olph", + "ĠKimber ly", + "rog ens", + "adi q", + "] +", + "Ġindign ation", + "Ġfract uring", + "ĠRe leases", + "ĠGr ain", + "pro tein", + "L ago", + "Ġvac ations", + "Ġboot ed", + "ĠTH REE", + "ĠH G", + "oresc ence", + "Ġt f", + "Ġso ar", + "iosyn cr", + "Ġgl ances", + "ĠSp oon", + "ĠJ ury", + "ĠCow boy", + "Ġcreat ively", + "Hig her", + "Ġsolic itor", + "Ġhaw k", + "ac io", + "89 6", + "Ġsuperf lu", + "Ġbombs hell", + "ct ure", + "Ġbroker age", + "Ġraid ing", + "Ġf rench", + "Ġang led", + "Trans action", + "ĠGen ocide", + "u pe", + "ĠHait ian", + "57 2", + "! :", + "Ġunwitting ly", + "iter ator", + "sc roll", + "Ġtall ied", + "Ġbi omedical", + "ĠC ARD", + "Ġe uphem", + "Ġbrain storm", + "a quin", + "K o", + "Mic helle", + "ĠR unes", + "ĠBall istic", + "ud ers", + "Ġmod esty", + "ĠiP ads", + "ĠEzek iel", + "Y E", + "Ġstars hip", + "Ġpower fully", + "Ġper l", + "ĠSh ade", + "ĠQu art", + "ĠE EG", + "Ġfisher man", + "OS ED", + "ĠTyp ical", + "df x", + "Ġmes hes", + "Ġet ched", + "worth iness", + "Ġtopp led", + "Ġ3 96", + "or ius", + "We iss", + "Ġmy sql", + "ĠVal halla", + "Ù Ĵ", + "le asing", + "Ġrec omp", + "rap nel", + "S el", + "04 3", + "Ġder ailed", + "ĠGu ides", + "IR T", + "Ġde human", + "ĠBritt any", + "\" ))", + "Ġex claim", + "Ġb alk", + "Ġ8 40", + "CLA IM", + "int el", + "L AB", + "Ġpe gged", + "Ġast roph", + "sm oking", + "Ġrig ging", + "Ġfix ation", + "Ġcat apult", + "ins ide", + "ĠC ascade", + "ĠBolshe vik", + "G aza", + "Dep th", + "Ġloud spe", + "Ġalmond s", + "me yer", + "l eness", + "j en", + "f resh", + "Ġunbeat en", + "ĠSqu id", + "ĠPres umably", + "Tim er", + "B W", + "Ġro sters", + "Ġell ipt", + "ĠHar riet", + "dat abase", + "ĠMut ual", + "ĠComm odore", + "uk ed", + "kn ife", + "ĠCOMM UN", + "h ya", + "Ġmel ts", + "arch ives", + "Ġrat ification", + "Ġmultip lying", + "Ġinter oper", + "Ġasc ert", + "w ings", + "ver ting", + "ĠScorp ion", + "ay e", + "ĠPorts mouth", + "ĠM TA", + "n it", + "iaz ep", + "Ġqu arantine", + "Ġslides how", + "Ġcent imeters", + "Ġsyn opsis", + "Ġsp ate", + "th irst", + "Ġnom inating", + "ĠMel vin", + "Pre view", + "Ġthro b", + "Ġgener ational", + "ĠRad ius", + "rest ling", + "put able", + "aw ar", + "N ECT", + "Ġunlaw fully", + "ĠRevel ations", + "Wik ipedia", + "sur v", + "Ġeye ing", + "ij n", + "ĠF W", + "Ġbr unt", + "Ġinter stellar", + "Ġcl itor", + "ĠCroat ian", + "ĠCh ic", + "ev a", + "ĠDis app", + "ĠA kin", + "iner ies", + "d ust", + "Interest ed", + "Ġgen esis", + "ĠE ucl", + "ö n", + "p icking", + "Ġmut ated", + "Ġdisappro ve", + "ĠHD L", + "Ġ6 25", + "Ì ¶", + "c ancer", + "Ġsqu ats", + "Ġle vers", + "Disc uss", + "= ]", + "D ex", + "ĠVIDE OS", + "A UD", + "Ġtrans act", + "ĠKin ect", + "ĠK uala", + "ĠC yp", + "7 47", + "Ġsh attering", + "Ġarsen ic", + "ĠInt ake", + "ĠAngel o", + "ĠQu it", + "ĠK he", + "Ġ18 93", + "M aker", + "0 29", + "ĠPain ting", + "Dis able", + "9 16", + "Ġanal ges", + "Ġtact ile", + "Ġprop hes", + "Ġd iced", + "ĠTravel s", + "ĠHe ader", + "ĠClub s", + "Ass istant", + "Ġinc rim", + "Ġd ips", + "Ġcruc ifix", + "ĠShan ahan", + "ĠInter pret", + "Ġ40 90", + "al ogy", + "abb a", + "Ġsimul ac", + "hus band", + "S IM", + "Ġrecy cle", + "uc er", + "ed ged", + "Ġre naissance", + "ĠBomb ay", + "Cath olic", + "ĠL INE", + "ĠCl othing", + "re ports", + "Ġpl aus", + "Ġd ag", + "ĠM ace", + "Z I", + "Ġintr uder", + "ĠVeter inary", + "g ru", + "Ġsne aky", + "ĠS ie", + "ĠC innamon", + "P OSE", + "Ġcou rier", + "ĠC NS", + "Ġemanc ipation", + "s it", + "Ġplay through", + "ĠFac ilities", + "v irt", + "ĠG auntlet", + "Thom pson", + "Ġunbeliev ably", + "Param eters", + "Ġst itching", + "ign e", + "ĠTH ESE", + "Priv acy", + "Ġshenan igans", + "Ġvit ri", + "ĠVal id", + "59 1", + "Ń ·", + "ĠProt otype", + "ink a", + "SC P", + "ĠT id", + "è Ī", + "old ed", + "Ġindividual ity", + "Ġbark ing", + "Ġm ars", + "ĠW D", + "Ġ8 20", + "Ġt ir", + "Ġsl apping", + "Ġdisgr untled", + "ĠAng ola", + "ri us", + "ĠTorn ado", + "ĠTh urs", + "Ġcapt cha", + "Ġang st", + "ĠP og", + "ĠAssass ins", + "ĠAd idas", + "Ġjoy ful", + "Ġwh ining", + "Emer gency", + "Ġphosph orus", + "Ġatt rition", + "oph on", + "ĠTimber wolves", + "ĠJ ah", + "ĠBr inging", + "ĠW ad", + "ĠEn sure", + "oh l", + "ĠX ie", + "omm el", + "c mp", + "Ġz ipper", + "Ġrel at", + "ĠCor ridor", + "m ilo", + "T ING", + "Av g", + "Ġcro pped", + "] }", + "Ġr aged", + "ĠLump ur", + "ĠGuer rero", + "our ke", + "N ut", + "Ġoff sets", + "og lu", + "dr m", + "Ġmort als", + "lat able", + "Ġdismiss ive", + "ä¸ ī", + "Ġthro ats", + "Ġchips et", + "ĠSpot light", + "Catal og", + "art ist", + "G b", + "Ġch illy", + "Ġst oked", + "Ġ3 74", + "W ard", + "L atin", + "Ġf iasco", + "Ġble ach", + "Ġb rav", + "Enh anced", + "Ġin oc", + "ĠFior ina", + "_ >", + "Ġle ukemia", + "Ġel uc", + "Ġannoun cer", + "ĠLith uan", + "ĠArm ageddon", + "å ĩ", + "Len in", + "ĠR uk", + "Ġpe pp", + "ĠRom antic", + "ĠP IT", + "ĠInter stellar", + "ĠAt kinson", + "R aid", + "J s", + "Go al", + "C ourse", + "Ġvan ishing", + "es ley", + "ĠR ounds", + "Els a", + "59 3", + "Ġredund ancy", + "ĠST AND", + "Ġprop hetic", + "Ġhabit able", + "ry u", + "Ġfaint ly", + "M ODE", + "Ġfl anked", + "IR C", + "Aw esome", + "Ġsp urious", + "ĠZ ah", + "ĠMS G", + "Ġsh ading", + "Ġmotiv ational", + "ĠSant ana", + "ĠS PR", + "Ġexc ruciating", + "om ial", + "ĠM iko", + "ĠLe opard", + "A byss", + "Ġ[ |", + "d irty", + "Ġbath s", + "Ġdem oral", + "and re", + "P B", + "Ġun ification", + "Ġsac rament", + "Ġ[ &", + "Ġpric eless", + "Ġgel atin", + "Ġeman ating", + "ĠAll aah", + "98 6", + "Ġout burst", + "Ġer as", + "ĠX VI", + "ĠSP I", + "O tt", + "ĠLaz arus", + "PL IED", + "F lying", + "blog s", + "W isconsin", + "R aven", + "Ġreb ate", + "Ġcreep s", + "ĠSp an", + "ĠPain ter", + "ĠKir a", + "ĠAm os", + "ĠCor vette", + "Cons umer", + "ĠRec over", + "ck i", + "Ġpes ky", + "ĠIn vention", + "Compan ies", + "Ġchalleng ers", + "ad emic", + "ĠUkrain ians", + "ĠNeuro log", + "ĠFors aken", + "Ġent rants", + "Ġemb attled", + "Ġdef unct", + "ĠGlac ier", + "Ġpo isons", + "ĠH orses", + "m akes", + "ĠD irt", + "Ġ4 23", + "hh h", + "ĠTrans formation", + "QUI RE", + "................ ..", + "Ġtrave ller", + "ĠSe xy", + "ĠK ern", + "ip olar", + "Ġransom ware", + "oooooooo oooooooo", + "E c", + "rub y", + "Prof essional", + "ĠOut break", + "arg ument", + "G rey", + "ĠFif a", + "ĠCH O", + "ĠFOR M", + "ĠAm trak", + "- [", + "Ġcr adle", + "Ġantioxid ants", + "ãģ®å ®", + "7 36", + "ĠNAS L", + "ĠContribut ions", + "Ind iana", + "ĠST EP", + "C SS", + "Ġsal ient", + "Ġall ocations", + "yr ights", + "Ġm ashed", + "ĠCut ter", + "Sex ual", + "Ġp ounded", + "Ġfan base", + "Ġc asc", + "ĠTrans parency", + "Ġanaly tic", + "ĠSummon er", + "× ŀ", + "ĠAD C", + "det ail", + "Ġvan quished", + "Ġcr abs", + "ar ie", + "Dest roy", + "ĠS ack", + "Ġtrans istor", + "Al abama", + "ĠK oen", + "ĠFisher ies", + "c one", + "Ġannex ed", + "ĠM GM", + "es a", + "Ġf aked", + "ĠCong ratulations", + "Ġhind ered", + "Ġcorrection al", + "ĠI TV", + "lee ve", + "Ġin appropriately", + "lic ks", + "Ġtresp ass", + "Ġp aws", + "Ġnegoti ator", + "ĠChrist ensen", + "lim its", + "ĠDian ne", + "Ġeleg ance", + "ĠContract s", + "an ke", + "Ob j", + "Ġvigil ance", + "Ġcast les", + "ĠN AD", + "ĠHol o", + "Ġemph atically", + "ĠTit us", + "ĠServ ing", + "ĠRich ie", + "ĠP igs", + "5 68", + "Ġanim osity", + "ĠAtt ributes", + "ĠU riel", + "M Q", + "my ra", + "ĠApplic ant", + "Ġpsychiat rists", + "ĠV ij", + "ĠAb by", + "ag ree", + "P ush", + "Ġk Wh", + "hib a", + "Ġinc ite", + "ĠWe asley", + "ĠTax i", + "minist ic", + "hy per", + "ĠF arn", + "Ġ6 01", + "ĠNation wide", + "F ake", + "95 2", + "Ġma ize", + "Ġinteract ed", + "Ġtransition ed", + "Ġparas itic", + "Ġharm onic", + "Ġdec aying", + "Ġbas eless", + "ns ics", + "Ġtrans pired", + "Ġabund antly", + "ĠFore nsic", + "Ġtread mill", + "ĠJ av", + "ab and", + "Ġssh d", + "Ġfront man", + "ĠJak arta", + "oll er", + "dro ps", + "ĠSERV ICES", + "rompt u", + "oph ical", + "h ospital", + "bled on", + "6 45", + "Ġmid range", + "ĠEV ENT", + "cul ated", + "raw led", + "Ġper ched", + "Ġover board", + "ĠPe el", + "ĠP wr", + "ĠCar th", + "ĠCOM PLE", + "co e", + "sh all", + "Ġdeter rence", + "M ETHOD", + "ĠAbs ent", + "M EN", + "Ġs ill", + "ĠLE VEL", + "Y ork", + "Ġsin ners", + "ĠOP EC", + "ĠN ur", + "ĠDesign s", + "se lection", + "Ġunw orthy", + "CH A", + "Ġstreng thens", + "88 3", + "ed ly", + "Ġslic ing", + "Ġmal nutrition", + "Ġfilm making", + "ĠPol k", + "ur ated", + "Ġ4 21", + "bre akers", + "!' \"", + "Ġwet lands", + "ĠDisc rimination", + "Ġallow able", + "Ġste ered", + "ĠSic ily", + "S AM", + "Ġmust ache", + "Ġm ids", + "Ġcl ipped", + "Ġcirc ulate", + "Ġbr ittle", + "ĠBuild ings", + "ra ised", + "ĠRound up", + "Ġwealth ier", + "Ġoverw rite", + "Ġover powered", + "ĠGerr ard", + "s ites", + "PD ATED", + "Ġacute ly", + "ĠGam ble", + "Ġp im", + "ĠK us", + "Typ ically", + "De ploy", + "ĠMoroc can", + "p otion", + "com be", + "Ġvigil ante", + "Ġ36 3", + "St ew", + "ĠB agg", + "Ġres ided", + "ĠSp o", + "Ġrem nant", + "Ġempt iness", + "br ainer", + "Ġout patient", + "pri ority", + "Ġle ptin", + "ĠPay ton", + "ĠGle aming", + "ĠS hed", + "ĠPol o", + "ĠMormon ism", + "rest ricted", + "arl ane", + "w x", + "Ġcreat ine", + "ĠAn on", + "ĠST UD", + "ĠJ UL", + "ĠT ee", + "5 28", + "08 9", + "Ġhat ched", + "Dis patch", + "ĠCompos ite", + "Ġ45 1", + "p uff", + "ĠX COM", + "ĠOr n", + "ĠTH ANK", + "END ED", + "ĠAshe ville", + "Ġà ľ", + "Ġman go", + "ĠS lightly", + "world ly", + "ĠW ander", + "ĠExp and", + "ĠCh r", + "M ist", + "Ġorthodox y", + "ĠUN ESCO", + "reg ate", + "Else where", + "k ie", + "ir led", + "Ġtopp le", + "Ġadopt ive", + "ĠLeg s", + "d ress", + "ĠS agan", + "b are", + "ĠGl ou", + "Cr unch", + "Ġhelp ers", + "Ġchron ically", + "ĠH uma", + "1 0000", + "Ġaccommod ating", + "äº Ķ", + "Ġwrink les", + "Ġdod ged", + "four th", + "Ġpre con", + "Ġcompress or", + "ĠK are", + "Ġev ict", + "ĠWar wick", + "im ar", + "Ġmodern ization", + "Ġband wagon", + "Ġref uted", + "Ġnet ted", + "ĠNa ples", + "ĠGen ie", + "per ors", + "Ġfield ed", + "Ġde re", + "ĠPar ables", + "le es", + "Ġtr out", + "asp ers", + "Ġn ihil", + "Ġhapp iest", + "Ġflo ppy", + "ĠLo ft", + "ĠHe ard", + "Ġun ison", + "Ġl ug", + "ĠRed mond", + "class ic", + "Supp orters", + "SH IP", + "G MT", + "Ġfue lled", + "ç IJ", + "Ġd d", + "ĠEmin em", + "Ġ18 97", + "NY SE", + "Ġsecret aries", + "ĠF IA", + "ĠCanaver al", + "F avorite", + "Ġp omp", + "Ġdetain ee", + "ers hip", + "aim on", + "i our", + "ĠA pex", + "Ġplant ations", + "am ia", + "ac ion", + "R ust", + "Ġtow ed", + "ĠTru ly", + "5 77", + "Ġshel tered", + "r ider", + "W o", + "Ġl air", + "ĠInt elligent", + "impro ve", + "m atically", + "Ġet iquette", + "ad ra", + "all o", + "ĠJun o", + "any thing", + "ĠStru ggle", + "ĠPred ict", + "ĠGr imes", + "ĠAMER ICA", + "ct x", + "ĠSit uation", + "W OOD", + "Ġsol uble", + "me ier", + "Ġintoler able", + "ang ering", + "Ġun interrupted", + "Ġtool tip", + "Ġinterrog ated", + "Ġgun ned", + "ĠSne ak", + "æŃ ¦", + "Ġt ether", + "Ġcr umble", + "L ens", + "Ġclust ered", + "ĠSy l", + "ĠHas an", + "Ġdystop ian", + "w ana", + "Ġjoy stick", + "ĠTh ib", + "amm u", + "Tom orrow", + "5 46", + "Ġoverc ame", + "Ġminim ized", + "cept or", + "Run ner", + "ENG TH", + "ĠBrend a", + "ĠAchieve ments", + "Ġtor ches", + "Ġrapp ort", + "ĠInvestig ator", + "ĠHand ling", + "rel ation", + "g rey", + "8 15", + "Ġk cal", + "ĠComm ands", + "d q", + "Ġcur ls", + "Ġbe arer", + "Ġcyn icism", + "it ri", + "ĠUse ful", + "B ee", + "D CS", + "Ġab ras", + "P ract", + "BIL ITIES", + "7 12", + "Ġdebug ger", + "Ġdebt or", + "ĠL ia", + "ĠK ers", + "Ġexacerb ate", + "ĠSt acy", + "ĠB land", + "ĠSc enes", + "Ġbranch ing", + "âĸĪâĸĪâĸĪâĸĪ âĸĪâĸĪâĸĪâĸĪ", + "ape ake", + "Ġs alsa", + "Ġmish and", + "ĠKon ami", + "ĠN ib", + "Ġanecd ote", + "Ġagree able", + "Ï ī", + "ĠNath aniel", + "ĠHe isman", + "ĠB eware", + "Ġ18 86", + "spect ive", + "69 1", + "5 22", + "Ġinhib its", + "Ġhas hing", + "Ġ18 89", + "å° Ĩ", + "v ich", + "P ure", + "Ġsolid ly", + "Ġaspir in", + "im aru", + "Ġstreet car", + "ĠU CS", + "ĠJ udd", + "Ġflash backs", + "p ins", + "Ġ14 40", + "ĠUN HCR", + "ĠSym ptoms", + "T IT", + "5 38", + "F ra", + "% );", + "Ġo oz", + "Ġcur few", + "Ġcal med", + "Ġparticip ates", + "Te X", + "Ġnons ensical", + "Ġfull back", + "ĠDe L", + "mon key", + "h ari", + "Ġmetabol ites", + "Ġloot ed", + "ĠAL WAYS", + "ĠB CC", + "L t", + "oc het", + "B one", + "Ġveto ed", + "Ġg cc", + "ĠCL ICK", + "Ġ18 88", + "s af", + "Ġstiff ness", + "Ġlow ly", + "ĠGe h", + "vers on", + "ors et", + "Ġun foreseen", + "Ġan esthesia", + "ĠOpt ical", + "Ġrecon structed", + "ĠT up", + "sh ows", + "NEW S", + "ĠNewsp aper", + "ĠA SA", + "ter a", + "N umbers", + "Ġinexpl icable", + "× ij", + "Ġhard ness", + "unt arily", + "ĠA cer", + "grad ient", + "ARD IS", + "Ġwood land", + "Ġmetaph ors", + "ĠWem bley", + "ĠPa vel", + "phil is", + "Ġre writing", + "Ġpercept ual", + "Ġ10 70", + "worm s", + "ĠDown s", + "Ġunsur prisingly", + "Ġtag ging", + "fl ame", + "Ġlit res", + "Ġboun ces", + "ĠB abe", + "sh ut", + "Ġoverd oses", + "ĠShe ila", + "ĠCh au", + "ĠBl ess", + "Capt ure", + "ĠSign ificant", + "ĠSc ion", + "Ġ38 9", + "ĠMc H", + "ĠTitan ium", + "ĠMe al", + "amed a", + "ag ents", + "agg ressive", + "B illy", + "76 3", + "ĠS aying", + "DER R", + "it one", + "Coll ins", + "B ound", + "Ġbol ted", + "ĠDM CA", + "95 3", + "Ġun iqueness", + "Ġep igen", + "un ci", + "ant am", + "Ġreck oning", + "ch airs", + "OG R", + "ĠSen egal", + "Ġ18 62", + "re levant", + "Ġ ¯", + "Ġpharm acies", + "ĠG eral", + "v ier", + "Y an", + "OR PG", + "Ġrab id", + "b ending", + "ĠUN ITED", + "Ġ4 65", + "As sembly", + "Ġwe ep", + "Ġbe hest", + "ĠMother s", + "ĠJ ace", + "h id", + "Ġwh irlwind", + "ĠUN IVERS", + "Ġut opian", + "Ġkidn ap", + "Ph ilipp", + "K in", + "89 3", + "Ġlivest ream", + "ĠM ISS", + "Ġsub versive", + "ĠTechn iques", + "ĠJUST ICE", + "ĠB ASE", + "Ġ38 7", + "Ġassail ants", + "ĠHard core", + "Ġsprink led", + "ĠP se", + "é ļ", + "print ed", + "ĠH au", + "OR GE", + "ĠT OUR", + "Ġl aced", + "Ġit ch", + "G iving", + "Ġport ed", + "78 1", + "//////////////// ////////////////", + "bre eding", + "Ġlog ger", + "ĠH OL", + "inn ie", + "First ly", + "Ġembry onic", + "Ġdeleg ated", + "p ai", + "O IL", + "Ġcentr ally", + "ĠR x", + "ĠSc outing", + "D utch", + "Ġhe reditary", + "ĠCru iser", + "s at", + "5 29", + "ĠMar riott", + "other mal", + "Ġprohib itions", + "E arn", + "ĠSt ab", + "ĠColleg es", + "ĠBel ief", + "st retched", + "ĠL H", + "ĠEntity Item", + "C IA", + "Ġun rem", + "Ġlaure ate", + "Ġdenomin ations", + "sum mary", + "h ler", + "S pect", + "ĠK laus", + "ĠBe ans", + "Ġins ur", + "ĠPA X", + "Ġfield er", + "ĠV et", + "ĠSp arrow", + "z ie", + "ĠS Q", + "ĠMond ays", + "ĠOff line", + "ĠLer ner", + "ĠExt ensions", + "Ire land", + "Ġpatron age", + "Ġcontrast ed", + "ĠMan ia", + "h irt", + "Mos cow", + "Ġcondem ns", + "ĠAn ge", + "Ġcomp osing", + "ĠPe pe", + "ĠP addock", + "Ġheter ogeneity", + "Ġide ologically", + "Ġf ishes", + "Ġcur sing", + "ĠR utherford", + "ĠFlo ating", + "ĠAm elia", + "Te a", + "Syn opsis", + "Ġstun ts", + "Ġbe ad", + "Ġstock ing", + "ĠM ILL", + "ob ook", + "mass ive", + "\\ <", + "Ġh ump", + "ĠPref erences", + "Engine Debug", + "ge ist", + "ĠNiet o", + "ome ver", + "ish y", + "eval uate", + "col onial", + "Altern ative", + "ĠGo Pro", + "ĠV ortex", + "ĠNET WORK", + "ans ky", + "Sec ure", + "ĠTh rust", + "Sn ake", + "Ġparcel s", + "Ġsam urai", + "Ġactress es", + "N ap", + "M F", + "ifer ation", + "Be er", + "5 23", + "ĠI ly", + "oint ment", + "P ing", + "Ġstri ped", + "ĠMell on", + "oss ession", + "Ġneut ron", + "end ium", + "Ġa ph", + "ĠFlav oring", + "Ġ38 3", + "Ġrespons iveness", + "ĠJ indal", + "ĠHitch cock", + "Den ver", + "ĠDRAG ON", + "sm anship", + "ĠDu pl", + "Ġs ly", + "Ġweb cam", + "ĠTw ain", + "ĠDar ling", + "ili ate", + "cons umer", + "D IT", + "Ġnames ake", + "Ġun orthodox", + "Ġfun er", + "ĠPL oS", + "ĠCONTR OL", + "ozy g", + "ogl obin", + "F ACE", + "ER G", + "ĠD ia", + "ĠF iesta", + "ce le", + "0 34", + "Ġencl ave", + "âĸ¬ âĸ¬", + "on ement", + "al ist", + "M and", + "Ġhome grown", + "ĠF ancy", + "Ġconcept ions", + "ĠCont ains", + "ure en", + "Ġreiter ate", + "Ġme ager", + "Ġinstall ments", + "Sp awn", + "6 27", + "Ġphot oc", + "ĠCab rera", + "ĠRos enthal", + "ĠLans ing", + "is ner", + "Ġinvest s", + "ĠUFO s", + "EX P", + "Hard ware", + "Ġtr agically", + "Ġconced es", + "ie ft", + "ch am", + "bor gh", + "ĠSch r", + "ĠMel anie", + "ĠH oy", + "Ġvisit ation", + "Ġid iosyncr", + "Ġfract ions", + "Ġfore skin", + "ob os", + "Ġpo aching", + "ĠVI EW", + "Ġstimul ates", + "ĠG ork", + "can on", + "M IC", + "ĠNem esis", + "ĠInd ra", + "ĠDM V", + "Ġ5 29", + "Ġinspect ing", + "Ġgrand ma", + "ĠW hedon", + "ĠSh ant", + "ĠP urg", + "ik an", + "ĠT eg", + "ĠCL R", + "z ac", + "Vict oria", + "ĠVer ify", + "ion ics", + "Ġpart ying", + "ĠM ou", + "col our", + "Ġtestim onies", + "l ations", + "Ġpress uring", + "hi ro", + "ac ers", + "Ġf id", + "ang ler", + "ĠCS I", + "Ġhere after", + "Ġdiss idents", + "report ing", + "iph any", + "che v", + "Ġsol itude", + "Ġl obe", + "Ġind is", + "Ġcred ential", + "re cent", + "ad ult", + "ĠNir vana", + "ĠFranch ise", + "L ayer", + "H yp", + "ĠBerks hire", + "Ġwill s", + "t if", + "Ġtot em", + "ĠJud ah", + "rep air", + "Inst ant", + "5 48", + "Ġemb assies", + "Ġbott leneck", + "Ġb ount", + "Ġtyp ew", + "ĠAl vin", + "j ing", + "im ilar", + "R ush", + "Ġbr im", + "ĠHEL P", + "A im", + "] '", + "Ġpass ively", + "Ġbound ed", + "ĠR ated", + "Ġcriminal ity", + "Ġbiom ark", + "Ġdisp atcher", + "ĠTow ards", + "Ġ+ ++", + "right eous", + "f rog", + "ĠP anc", + "C arter", + "0 32", + "æ© Ł", + "Ġult raviolet", + "ĠLic ensed", + "ĠT ata", + "ĠBl essing", + "ĠG AM", + "Ġchem ically", + "ĠSe af", + "ĠRE LE", + "ĠMerc enary", + "capital ist", + "Ġform ulations", + "Ġann ihilation", + "ĠVer b", + "ĠAr gon", + "Ġun loaded", + "Ġmorp hed", + "Ġconqu ering", + "back er", + "I ELD", + "Ġtheft s", + "Ġfront runner", + "ĠRoy ale", + "ĠFund amental", + "el ight", + "C hip", + "necess ary", + "ay n", + "ĠSl ip", + "Ġ4 48", + "cern ed", + "P ause", + "Ġshock ingly", + "ĠAB V", + "Ġcomp osure", + "7 33", + "ĠMotors port", + "ah ime", + "Mur ray", + "M ach", + "Ġgr ids", + "Ġdeb ian", + "Ġfurther more", + "Ġdexter ity", + "ĠCollect ions", + "os lov", + "il age", + "b j", + "ĠMont eneg", + "Ġstrut Connector", + "Ġmassac res", + "Ġbrief s", + "fet ched", + "uv ian", + "ol ition", + "Fail ure", + "emon ic", + "Ġfl ared", + "Ġclaim ant", + "Ġc ures", + "Ġgive aways", + "ĠSubst ance", + "al ions", + "Ġcr inge", + "ĠK ul", + "Ġarist ocracy", + "ĠUl ster", + "ol ated", + "h ousing", + "ĠM IS", + "Ġgl ared", + "ĠWil helm", + "ne eds", + "lam bda", + "build ers", + "ĠV IS", + "Ġradi ator", + "ĠGhost busters", + "Ġ4 36", + "act ual", + "Ġher ds", + "ç a", + "watch ing", + "Ġcounter ing", + "Ch arge", + "Ġchar red", + "Ġwar heads", + "Ġiod ine", + "ĠM acy", + "04 1", + "Ġdepart ures", + "ĠS ins", + "Ġdy ed", + "ĠConcept s", + "g ado", + "7 13", + "Ġquot ations", + "Ġg ist", + "ĠChrist y", + "Ġant igen", + "ĠHem p", + "ĠD rawn", + "ĠB arg", + "ez vous", + "Ġp aternity", + "Ġar du", + "ĠAnch orage", + "ĠR ik", + "Ġover loaded", + "ĠUs ername", + "ĠTam my", + "ĠN au", + "ĠCell ular", + "Ġw aning", + "Ġrod ent", + "ĠWor cester", + "il ts", + "ĠT ad", + "Ġdwell ings", + "Ġbull ish", + "4 31", + "Ġretali ate", + "Ġmig raine", + "ĠChev ron", + "CH ECK", + "Ġdon key", + "c rim", + "SP A", + "ĠAn alog", + "Ġmarqu ee", + "ĠHa as", + "B ir", + "ĠGD DR", + "ĠDownload s", + "Ġwill power", + "ĠFor th", + "ĠRecord ed", + "Ġimp ossibility", + "ĠLog ged", + "ĠFr anks", + "ĠR att", + "in itions", + "Ġclean ers", + "Ġsore ly", + "Ġflick ering", + "ĠEx amination", + "c atching", + "allow een", + "Ms g", + "Ġdun no", + "F a", + "Ġdys ph", + "c razy", + ".' '.", + "Ġmain line", + "Ġc s", + "Ġp tr", + "ĠW ally", + "ig un", + "95 1", + "ĠBig foot", + "f ights", + "Ġretrie ving", + "J r", + "Ġdupl ication", + "ĠExpl an", + "Ġrel ational", + "Ġqu aint", + "Ġbisc uits", + "Ġad o", + "Ġsh udder", + "Ġantid ote", + "blood ed", + "ks h", + "Ġsa uces", + "Ġrein vest", + "Ġdispens ary", + "ĠD iver", + "Ġ9 000", + "stud ent", + "Ġin separ", + "esc ap", + "Ġtodd lers", + "ĠGP IO", + "ĠAss ignment", + "head ers", + "Ġlack luster", + "Ġab ack", + "95 6", + "Ġtool bar", + "7 45", + "Ġo ust", + "Ġcontempl ation", + "ĠPRES IDENT", + "Ġ4 58", + "==== ==", + "Ġguarantee ing", + "ĠHe ist", + "ĠCann es", + "Ļ ½", + "Ġcollabor ator", + "ĠAm p", + "Ġg ou", + "ĠSH ALL", + "st ories", + "78 3", + "Ġmobil ized", + "Ġbro od", + "ĠL U", + "ĠðŁ ij", + "Ġref in", + "ĠAnthrop ology", + "v ind", + "ill i", + "Ġwarrant ies", + "ĠB abel", + "Ġsw ath", + "Ġc aches", + "Ġantagon ists", + "art ifacts", + "Ġhot ly", + "ĠSt arts", + "ĠG ö", + "z ag", + "!! !!!", + "Ġsc ourge", + "Ġcons piring", + "ru its", + "re verse", + "ĠShe en", + "ĠJes uit", + "ĠGiov anni", + "ad ies", + "Ġbutt ocks", + "ear cher", + "ac an", + "Ġvolley ball", + "Ġshroud ed", + "Ġscore board", + "b ats", + "ĠI PM", + "Ġass es", + "Ġde regulation", + "ĠTe legram", + "ĠReb oot", + "Ġ7 000", + "ĠCan ary", + "Ġk ernels", + "ĠFranç ois", + "ĠD uff", + "ĠP on", + "ĠLe ica", + "ĠGar min", + "Ġor phans", + "ĠClaud ia", + "Ġcal endars", + "ĠLe ilan", + "ent o", + "R ocket", + "Ġbr unch", + "ĠHaw king", + "ain ers", + "Ġsens ibilities", + "Ġk W", + "ĠK and", + "Ġre claimed", + "Ġinteresting ly", + "× ©", + "rom y", + "J M", + "ĠEnhance ment", + "b ush", + "Sk ip", + "Ġrapp ers", + "Ġg azing", + "p edia", + "ath lon", + "Rev olution", + "Ġsn ipers", + "Ġre verted", + "Ġconglomer ate", + "T erry", + "79 4", + "Ġhars her", + "Ġdes olate", + "ĠHit man", + "Comm ission", + "Ġ( /", + "âĢ¦ .\"", + "Com par", + "Ġampl ification", + "om inated", + "Ġreg ress", + "ĠColl ider", + "Ġinform ants", + "Ġg azed" + ] + } +} diff --git a/tests/data/sample_prompt.txt b/tests/data/sample_prompt.txt new file mode 100644 index 0000000000000000000000000000000000000000..3fa3429641adb30c5effb3982a8ba12561966ea7 --- /dev/null +++ b/tests/data/sample_prompt.txt @@ -0,0 +1 @@ +Hello, I'm a language model diff --git a/tests/model/__init__.py b/tests/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d38c7d4d032b891ee65e526e3b324553c9550b0a --- /dev/null +++ b/tests/model/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/model/test_fused_kernels.py b/tests/model/test_fused_kernels.py new file mode 100644 index 0000000000000000000000000000000000000000..125eb6c52d97a8279da08c9ca2c4dd7cb5ef2d3d --- /dev/null +++ b/tests/model/test_fused_kernels.py @@ -0,0 +1,253 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import pytest +import torch + +from transformers import BertTokenizer +from transformers.models.bert.modeling_bert import BertModel + +from transformers import BertTokenizer, GPT2Tokenizer +from transformers.models.bert.modeling_bert import BertModel +from transformers.models.gpt2.modeling_gpt2 import GPT2Model +from megatron.fused_kernels import load +import transformers + +transformers.logging.set_verbosity( + transformers.logging.FATAL, +) + + +@pytest.mark.xfail(reason="SystemExit: None") +def test_load_fused_kernels(): + load() + try: + import scaled_masked_softmax_cuda + import scaled_upper_triang_masked_softmax_cuda + import fused_rotary_positional_embedding + import torch + + print("[Success] load_fused_kernels") + except ImportError as e: + print("[Fail] load_fused_kernels") + raise e + + +@pytest.mark.xfail(reason="SystemExit: None") +def test_fused_softmax(): + load() + from megatron.model.fused_softmax import FusedScaleMaskSoftmax, SoftmaxFusionTypes + from megatron.model.gpt2_model import ( + gpt2_attention_mask_func as attention_mask_func, + ) + + bert = BertModel.from_pretrained("bert-base-cased").cuda().half() + tokenizer = BertTokenizer.from_pretrained("bert-base-cased") + test_text = ( + "Hello. How are you? I am fine thank you and you? yes Good. " + "hi hi hi hi hi hi hi hi hi hi hi hi hi" # 32 + ) + + tokens = tokenizer( + [test_text] * 4, + return_tensors="pt", + ) + + embedding_output = bert.embeddings( + input_ids=tokens["input_ids"].cuda(), + position_ids=None, + token_type_ids=tokens["token_type_ids"].cuda(), + inputs_embeds=None, + past_key_values_length=0, + ) + + # (bsz, 1, 1, seq_len) + mask = bert.get_extended_attention_mask( + attention_mask=tokens["attention_mask"].cuda(), + input_shape=tokens["input_ids"].shape, + device=bert.device, + ) + # (bsz, 1, seq_len, seq_len) + mask = mask.repeat(1, 1, mask.size()[-1], 1) + + attention = bert.encoder.layer[0].attention.self + key_layer = attention.transpose_for_scores(attention.key(embedding_output)) + query_layer = attention.transpose_for_scores(attention.query(embedding_output)) + + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores /= math.sqrt(key_layer.size()[-1]) + + fused_softmax = ( + FusedScaleMaskSoftmax( + input_in_fp16=True, + input_in_bf16=False, + fusion_type=SoftmaxFusionTypes.general, + mask_func=attention_mask_func, + scale=None, + softmax_in_fp32=False, + ) + .cuda() + .half() + ) + + fused_softmax_output = fused_softmax( + attention_scores, + (mask != 0), + ) + + torch_softmax = ( + FusedScaleMaskSoftmax( + input_in_fp16=True, + input_in_bf16=False, + mask_func=attention_mask_func, + fusion_type=SoftmaxFusionTypes.none, + scale=None, + softmax_in_fp32=False, + ) + .cuda() + .half() + ) + + torch_softmax_output = torch_softmax( + attention_scores, + (mask != 0), + ) + + test_result = (fused_softmax_output - torch_softmax_output).abs() + + while test_result.dim() != 1: + test_result = test_result.mean(dim=-1) + + diff = test_result.mean(dim=-1) + + if diff <= 1e-3: + print( + f"\n[Success] test_fused_softmax" + f"\n > mean_difference={diff}" + f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}" + f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}" + ) + else: + print( + f"\n[Fail] test_fused_softmax" + f"\n > mean_difference={diff}, " + f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}, " + f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}" + ) + + +@pytest.mark.xfail(reason="SystemExit: None") +def test_fused_upper_triangle_mask_softmax(): + load() + from megatron.model.gpt2_model import ( + gpt2_attention_mask_func as attention_mask_func, + ) + from megatron.model.fused_softmax import FusedScaleMaskSoftmax, SoftmaxFusionTypes + + gpt = GPT2Model.from_pretrained("gpt2").cuda().half() + tokenizer = GPT2Tokenizer.from_pretrained("gpt2") + test_text = ( + "Hello. How are you? I am fine thank you and you? yes Good. " + "hi hi hi hi hi hi hi" # 24 + ) + + tokens = tokenizer( + [test_text] * 4, + return_tensors="pt", + ) + + attention_mask = tokens["attention_mask"].cuda() + attention_mask = attention_mask.view(attention_mask.size(0), -1) + attention_mask = attention_mask[:, None, None, :] + attention_mask = (1.0 - attention_mask) * -10000.0 + attention_mask = attention_mask.repeat(1, 1, attention_mask.size()[-1], 1) + attn = gpt.h[0] + + hidden_states = gpt.wte(tokens["input_ids"].cuda()) + q, k, v = attn.attn.c_attn(hidden_states).split(768, dim=-1) + q = attn.attn._split_heads(q, attn.attn.num_heads, attn.attn.head_dim) + k = attn.attn._split_heads(k, attn.attn.num_heads, attn.attn.head_dim) + attn_weights = torch.matmul(q, k.transpose(-1, -2)) + + sq, sk = q.size(-2), k.size(-2) + causal_mask = attn.attn.bias[:, :, sk - sq : sk, :sk].bool() + total_mask = ~(causal_mask & (attention_mask == 0)) + """ + tensor([[[[False, True, True, ..., True, True, True], + [False, False, True, ..., True, True, True], + [False, False, False, ..., True, True, True], + ..., + [False, False, False, ..., False, True, True], + [False, False, False, ..., False, False, True], + [False, False, False, ..., False, False, False]]] + """ + + fused_softmax = ( + FusedScaleMaskSoftmax( + input_in_fp16=True, + input_in_bf16=False, + mask_func=attention_mask_func, + fusion_type=SoftmaxFusionTypes.upper_triang, + scale=None, + softmax_in_fp32=False, + ) + .cuda() + .half() + ) + + fused_softmax_output = fused_softmax( + attn_weights, + total_mask, + ) + + torch_softmax = ( + FusedScaleMaskSoftmax( + input_in_fp16=True, + input_in_bf16=False, + fusion_type=SoftmaxFusionTypes.none, + mask_func=attention_mask_func, + scale=None, + softmax_in_fp32=False, + ) + .cuda() + .half() + ) + + torch_softmax_output = torch_softmax( + attn_weights, + total_mask, + ) + + test_result = (fused_softmax_output - torch_softmax_output).abs() + + while test_result.dim() != 1: + test_result = test_result.mean(dim=-1) + + diff = test_result.mean(dim=-1) + + if diff <= 1e-3: + print( + f"\n[Success] test_fused_upper_triangle_mask_softmax" + f"\n > mean_difference={diff}" + f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}" + f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}" + ) + else: + print( + f"\n[Fail] test_fused_upper_triangle_mask_softmax" + f"\n > mean_difference={diff}, " + f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}, " + f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}" + ) diff --git a/tests/model/test_model_checkpoint.py b/tests/model/test_model_checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..96f51683b848589b327edfdd080374c666081922 --- /dev/null +++ b/tests/model/test_model_checkpoint.py @@ -0,0 +1,136 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +instantiate models, save checkpoints, load checkpoints, compare loaded parameters to saved parameters and compare forward pass outputs + +This tests contain a relatively large number of functions. They are not split into separate tests because a lot of boilerplate (e.g. instantiate model) needs +to run in order to perform follow up tests. Joining in one test reduces runtime at the expense of decreased transparency of test results in case of failures. +""" +import os +import shutil +import torch + +import pytest +from tests.common import ( + DistributedTest, + clear_test_dirs, + model_setup, + binary, + parametrize, +) +import torch + +PARAMS_TO_TEST = { + "pipe_parallel_size,model_parallel_size": [[0, 1], [1, 2], [0, 2], [2, 1]], + "checkpoint_validation_with_forward_pass": [True], + "fp16,fp32_allreduce": [ + [ + { + "enabled": True, + "type": "bfloat16", + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1, + }, + True, + ], + [ + { + "enabled": True, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1, + }, + False, + ], + ], +} + +parameters, names = parametrize( + PARAMS_TO_TEST, max_tests=int(os.getenv("MAX_TESTCASES", 50)), seed=None +) + + +@pytest.mark.skip +@pytest.mark.parametrize("param_dict", parameters, ids=names) +def test_train(param_dict): + import tempfile + + d = tempfile.mkdtemp() + param_dict["save"] = d + + t1 = test_run_checkpoint_test_class() + t1.run_checkpoint_test(param_dict=param_dict) + + +class test_run_checkpoint_test_class(DistributedTest): + def run_checkpoint_test(yaml_list=None, param_dict=None): + + from megatron.checkpointing import load_checkpoint + from megatron.checkpointing import save_checkpoint + + model, optimizer, lr_scheduler, args_loaded = model_setup( + yaml_list, param_dict, clear_data=True + ) + + # save model checkpoint + save_checkpoint( + neox_args=args_loaded, + iteration=42, + model=model, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + ) + + # reload model from checkpoint + ( + reloaded_model, + reloaded_optimizer, + reloaded_lr_scheduler, + args_reloaded, + ) = model_setup(yaml_list, param_dict, clear_data=False) + iteration = load_checkpoint( + neox_args=args_reloaded, + model=reloaded_model, + optimizer=reloaded_optimizer, + lr_scheduler=reloaded_lr_scheduler, + ) + + # ensure same checkpoint is loaded + assert ( + iteration == 42 + ), "run_checkpoint_test() iteration loaded from checkpoint correct" + + # check all weight groups are the same + for idx, ((n1, p1), (n2, p2)) in enumerate( + zip( + list(model.module.named_parameters()), + list(reloaded_model.module.named_parameters()), + ) + ): + assert n1 == n2 + params_equal = (p1 == p2).all().item() + assert params_equal, "run_checkpoint_test() params equal: " + str(n1) + + +if __name__ == "__main__": + params = list( + parametrize( + PARAMS_TO_TEST, max_tests=int(os.getenv("MAX_TESTCASES", 50)), seed=None + ) + ) + test_train(params[0]) diff --git a/tests/model/test_model_generation.py b/tests/model/test_model_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..6dd93f355920625eeeab8f39f4d2caa59482f2f0 --- /dev/null +++ b/tests/model/test_model_generation.py @@ -0,0 +1,113 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +instantiate models, save checkpoints, load checkpoints, compare loaded parameters to saved parameters and compare forward pass outputs + +This tests contain a relatively large number of functions. They are not split into separate tests because a lot of boilerplate (e.g. instantiate model) needs +to run in order to perform follow up tests. Joining in one test reduces runtime at the expense of decreased transparency of test results in case of failures. +""" + + +import os +import pytest +from tests.common import DistributedTest, model_setup, parametrize + +PARAMS_TO_TEST = { + "pipe_parallel_size,model_parallel_size,world_size": [ + [0, 1, 1], + [0, 1, 2], + [1, 2, 2], + [0, 2, 2], + [2, 1, 2], + ], + "top_p,temperature,top_k": [[0.0, 0.5, 0], [0.5, 0.0, 100], [0.5, 0.5, 0]], + "prompt": ["", "hello world"], + "fp16,fp32_allreduce": [ + [ + { + "enabled": True, + "type": "bfloat16", + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1, + }, + True, + ], + [ + { + "enabled": True, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1, + }, + False, + ], + ], +} + +parameters, names = parametrize( + PARAMS_TO_TEST, max_tests=int(os.getenv("MAX_TESTCASES", 50)), seed=None +) + + +@pytest.mark.skip +@pytest.mark.parametrize("param_dict", parameters, ids=names) +def test_train(param_dict): + t1 = run_generate_test_class() + t1.run_generate_test(param_dict, param_dict.pop("prompt")) + + +class run_generate_test_class(DistributedTest): + world_size = 2 + + def run_generate_test(param_dict, prompt): + from megatron.text_generation_utils import generate_samples_from_prompt + from megatron.utils import is_mp_rank_0 + + fixed_params = { + "num_samples": 3, + "maximum_tokens": 50, + "make_vocab_size_divisible_by": 2, + "sample_output_file": "test_sample_output.txt", + "checkpoint_activations": False, + "partition_activations": False, + "no_load_optim": True, + } + + param_dict.update(fixed_params) + # TODO: we don't need to reinstantiate the model every time if we're only changing sampling settings - should be a workaround for this + model, _, _, args_loaded = model_setup(None, param_dict, clear_data=True) + model.eval() + + prompts = [prompt for _ in range(args_loaded.num_samples)] + output = generate_samples_from_prompt( + neox_args=args_loaded, + model=model, + text=prompts, + maximum_tokens=args_loaded.maximum_tokens, + recompute=False, + temperature=args_loaded.temperature, + top_k=args_loaded.top_k, + top_p=args_loaded.top_p, + ) + + # outputs only get generated on mp rank 0 + if is_mp_rank_0(): + assert len(output) == len(prompts) + for prompt, out in zip(prompts, output): + assert prompt == out["context"] + assert len(out["text"]) > 0 diff --git a/tests/model/test_model_instantiation.py b/tests/model/test_model_instantiation.py new file mode 100644 index 0000000000000000000000000000000000000000..81c5cae4cf1e27bb262a3dcc8071ae18a03c2b53 --- /dev/null +++ b/tests/model/test_model_instantiation.py @@ -0,0 +1,128 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +instantiate models with different configurations as a first possible point of failure +""" + +import pytest + +import torch +import os +from tests.common import ( + DistributedTest, + model_setup, + clear_test_dirs, + parametrize, + binary, +) + +PARAMS_TO_TEST = { + "pipe_parallel_size,model_parallel_size,world_size": [ + [0, 1, 1], + [1, 2, 2], + [0, 2, 2], + ], + "no_weight_tying": binary, + "attention_config": [ + [[["global"], "all"]], + [[["local"], "all"]], + [[["sparse_variable"], "all"]], + [[["sparse_fixed"], "all"]], + ], + "scaled_upper_triang_masked_softmax_fusion,bias_gelu_fusion": [ + [True, False], + [False, True], + ], + "fp16,fp32_allreduce": [ + [ + { + "enabled": True, + "type": "bfloat16", + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1, + }, + True, + ], + [ + { + "enabled": True, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1, + }, + False, + ], + ], +} + +parameters, names = parametrize( + PARAMS_TO_TEST, max_tests=int(os.getenv("MAX_TESTCASES", 50)), seed=None +) + + +@pytest.mark.xfail( + reason="Either fused kernels are not installed, or Cannot re-initialize CUDA in forked subprocess'" +) +@pytest.mark.parametrize("param_dict", parameters, ids=names) +def test_instantiate(param_dict): + t1 = test_instantiate_optimizers_class() + t1.run_test_model_instantiation(param_dict) + + +OPTIMIZER_PARAMS = { + "optimizer": [ + {"type": "adam", "params": {"lr": 0.0006}}, + {"type": "onebitadam", "params": {"lr": 0.0006}}, + {"type": "cpu_adam", "params": {"lr": 0.0006}}, + {"type": "cpu_torch_adam", "params": {"lr": 0.0006}}, + {"type": "sm3", "params": {"lr": 0.0006}}, + {"type": "lion", "params": {"lr": 0.0006}}, + {"type": "madgrad_wd", "params": {"lr": 0.0006}}, + ] +} +opt_params, opt_name = parametrize( + OPTIMIZER_PARAMS, max_tests=int(os.getenv("MAX_TESTCASES", 50)), seed=None +) + + +@pytest.mark.xfail( + reason="Either fused kernels are not installed, or 'Cannot re-initialize CUDA in forked subprocess'" +) +@pytest.mark.parametrize("param_dict", opt_params, ids=opt_name) +def test_instantiate_optimizers(param_dict): + t1 = test_instantiate_optimizers_class() + t1.run_test_model_instantiation(param_dict) + + +class test_instantiate_optimizers_class(DistributedTest): + world_size = 2 + + def run_test_model_instantiation(yaml_list=None, param_dict=None): + from deepspeed.runtime.pipe.engine import PipelineEngine, DeepSpeedEngine + + model, optimizer, lr_scheduler, args_loaded = model_setup(yaml_list, param_dict) + if args_loaded.pipe_parallel_size < 2: + assert isinstance( + model, DeepSpeedEngine + ), "test model instantiation " + str(yaml_list) + else: + assert isinstance(model, PipelineEngine), "test model instantiation " + str( + yaml_list + ) + if torch.distributed.get_world_size() == 1 or torch.distributed.get_rank() == 0: + clear_test_dirs() diff --git a/tests/model/test_model_train.py b/tests/model/test_model_train.py new file mode 100644 index 0000000000000000000000000000000000000000..65adfcdeeab3374628e05a0f7a8c530ea426007a --- /dev/null +++ b/tests/model/test_model_train.py @@ -0,0 +1,93 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Instantiate models, run for a small number of iterations, and check that training loss improves. + +Performs testing using a linear grid search over important parameter values, so that each setting that differs from the base is tested in isolation. + +Potentially use fuzzing to test parameters in combination. +""" +import pytest +import train +from copy import deepcopy +from unittest.mock import patch +from megatron.neox_arguments import NeoXArgs +from tests.common import simulate_deepy_env, BASE_CONFIG + +PARAMS_TO_TEST = { + "gpt_j_residual": [True, False], + "pos_emb": ["learned", "rotary", "sinusoidal", "rpe", "alibi", "none"], + "attention_config": [ + "global", + "local", + "sparse_fixed", + "sparse_variable", + "bigbird", + "bslongformer", + "gmlp", + "flash", + ], + "hidden_dropout": [0, 0.1], + "weight_decay": [0, 0.1], + "use_bias_in_attn_linear": [True, False], + "use_bias_in_norms": [True, False], + "precision": ["fp16", "fp32", "bfloat16"], +} + +keys_to_test = PARAMS_TO_TEST.keys() + +# TODO: fix model training tests +@pytest.mark.skip( + reason="All model tests are skipped until we fix the CUDA + torch multiprocessing issue." +) +@pytest.mark.parametrize( + "key, value", + [(key, value) for key in keys_to_test for value in PARAMS_TO_TEST[key]], +) +def test_model_training_options(monkeypatch, key, value): + # TODO: Possibly add testing over world_size=2 back in + neox_args = NeoXArgs.from_dict(BASE_CONFIG) + if getattr(neox_args, key) == value: + pytest.skip("Skipping to avoid redundancy as no change in base config") + if key == "precision" and value == "bfloat16": + pytest.xfail( + reason="Assumes that ZeRO optimization stage has been set in the YAML" + ) + param_dict = {key: value} + run_train_test(monkeypatch, overwrite_values=param_dict) + + +def run_train_test(monkeypatch, overwrite_values: dict): + max_train_iters = 32 + checkpoint_args = {"train_iters": max_train_iters} + overwrite_values = checkpoint_args + input_args = ["train.py", "tests/config/test_setup.yml"] + deepspeed_main_args = simulate_deepy_env(monkeypatch, input_args) + + # Train model, whilst patching collect_loss_for_unit_test to track model loss at each step + loss_per_iteration = [] + with patch( + "megatron.training.collect_loss_for_unit_test", + side_effect=lambda x: loss_per_iteration.append(x), + ): + train.main(input_args=deepspeed_main_args, overwrite_values=overwrite_values) + assert ( + len(loss_per_iteration) == max_train_iters + ), "patching should have collected loss values from each train step" + + # loss should have decreased by now (otherwise increasing the max_steps parameter could have the testcase pass) + assert min(loss_per_iteration) < loss_per_iteration[0], ( + "training loss should improve within " + str(max_train_iters) + " steps" + ) diff --git a/tests/neox_args/__init__.py b/tests/neox_args/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b2ef7435cc559c47f6bdbd51ab873bb3eefe785e --- /dev/null +++ b/tests/neox_args/__init__.py @@ -0,0 +1,3 @@ +""" +testing of implementation of command line arguments and configuration (NeoXArgs) +""" diff --git a/tests/neox_args/test_neoxargs_commandline.py b/tests/neox_args/test_neoxargs_commandline.py new file mode 100644 index 0000000000000000000000000000000000000000..0d3c7e5fb15d42e022c9f0cbcdba5e34531ab8ce --- /dev/null +++ b/tests/neox_args/test_neoxargs_commandline.py @@ -0,0 +1,165 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +verify parsing and handover of command line arguments +""" +import pytest +import sys +from unittest.mock import patch + +from ..common import get_root_directory, get_config_directory, get_configs_with_path + + +@pytest.mark.cpu +def test_neoxargs_consume_deepy_args_with_config_dir(): + """ + verify consume_deepy_args processes command line arguments without config dir + """ + + from megatron.neox_arguments import NeoXArgs + + # load neox args with command line + with patch( + "sys.argv", + [str(get_root_directory() / "deepy.py"), "train.py"] + + get_configs_with_path(["125M.yml", "local_setup.yml"]), + ): + args_loaded_consume = NeoXArgs.consume_deepy_args() + + # load neox args directly from yaml files + args_loaded_yamls = NeoXArgs.from_ymls( + get_configs_with_path(["125M.yml", "local_setup.yml"]) + ) + + # update values from yaml files that cannot otherwise be matched + args_loaded_yamls.update_value("user_script", "train.py") + args_loaded_yamls.wandb_group = args_loaded_consume.wandb_group + + assert args_loaded_yamls == args_loaded_consume + + +@pytest.mark.cpu +def test_neoxargs_consume_deepy_args_without_yml_suffix(): + """ + verify consume_deepy_args processes command line arguments without yaml suffix + """ + + from megatron.neox_arguments import NeoXArgs + + # load neox args with command line + with patch( + "sys.argv", + [str(get_root_directory() / "deepy.py"), "train.py"] + + get_configs_with_path(["125M", "local_setup", "cpu_mock_config.yml"]), + ): + args_loaded_consume = NeoXArgs.consume_deepy_args() + + # load neox args directly from yaml files + args_loaded_yamls = NeoXArgs.from_ymls( + get_configs_with_path(["125M.yml", "local_setup.yml", "cpu_mock_config.yml"]) + ) + + # update values from yaml files that cannot otherwise be matched + args_loaded_yamls.update_value("user_script", "train.py") + args_loaded_yamls.wandb_group = args_loaded_consume.wandb_group + + assert args_loaded_yamls == args_loaded_consume + + +@pytest.mark.cpu +def test_neoxargs_consume_deepy_args_with_hostfile_param(): + """ + Verify consume_deepy_args processes command line arguments without yaml suffix. + Also test the hostfile CLI arg + """ + + from megatron.neox_arguments import NeoXArgs + + # load neox args with command line + with patch( + "sys.argv", + [str(get_root_directory() / "deepy.py"), "train.py"] + + get_configs_with_path(["125M", "local_setup", "cpu_mock_config.yml"]) + + ["--hostfile=/mock_path"], + ): + args_loaded_consume = NeoXArgs.consume_deepy_args() + + # load neox args directly from yaml files + args_loaded_yamls = NeoXArgs.from_ymls( + get_configs_with_path(["125M.yml", "local_setup.yml", "cpu_mock_config.yml"]) + ) + + # update values from yaml files that cannot otherwise be matched + args_loaded_yamls.update_value("user_script", "train.py") + args_loaded_yamls.wandb_group = args_loaded_consume.wandb_group + + assert args_loaded_yamls == args_loaded_consume + + +@pytest.mark.cpu +def test_neoxargs_consume_deepy_args_with_config_dir(): + """ + verify consume_deepy_args processes command line arguments including config dir + """ + + from megatron.neox_arguments import NeoXArgs + + # load neox args with command line + with patch( + "sys.argv", + [ + str(get_root_directory() / "deepy.py"), + "train.py", + "-d", + str(get_config_directory()), + ] + + ["125M.yml", "local_setup.yml", "cpu_mock_config.yml"], + ): + args_loaded_consume = NeoXArgs.consume_deepy_args() + + # load neox args directly from yaml files + args_loaded_yamls = NeoXArgs.from_ymls( + get_configs_with_path(["125M.yml", "local_setup.yml", "cpu_mock_config.yml"]) + ) + + # update values from yaml files that cannot otherwise be matched + args_loaded_yamls.update_value("user_script", "train.py") + args_loaded_yamls.wandb_group = args_loaded_consume.wandb_group + + assert args_loaded_yamls == args_loaded_consume + + +@pytest.mark.cpu +def test_neoxargs_consume_neox_args(): + """ + verify megatron args are correctly consumed after sending via deepspeed + """ + from megatron.neox_arguments import NeoXArgs + + # intitially load config from files as would be the case in deepy.py + yaml_list = get_configs_with_path( + ["125M.yml", "local_setup.yml", "cpu_mock_config.yml"] + ) + args_baseline = NeoXArgs.from_ymls(yaml_list) + args_baseline.update_value("user_script", str(get_root_directory() / "train.py")) + deepspeed_main_args = args_baseline.get_deepspeed_main_args() + + # patch sys.argv so that args can be access by set_global_variables within initialize_megatron + with patch("sys.argv", deepspeed_main_args): + args_loaded = NeoXArgs.consume_neox_args() + + # TODO is the wandb group really to be changed? + args_loaded.wandb_group = args_baseline.wandb_group + assert args_baseline.megatron_config == args_loaded.megatron_config diff --git a/tests/neox_args/test_neoxargs_implementation.py b/tests/neox_args/test_neoxargs_implementation.py new file mode 100644 index 0000000000000000000000000000000000000000..176887c9c3a1b5fcea244b1e6c7d51e56c05c912 --- /dev/null +++ b/tests/neox_args/test_neoxargs_implementation.py @@ -0,0 +1,28 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +check implementation of NeoXArgs for duplication errors (would overwrite) +""" +import pytest + + +@pytest.mark.cpu +def test_neoxargs_duplicates(): + """ + tests that there are no duplicates among parent classes of NeoXArgs + """ + from megatron import NeoXArgs + + assert NeoXArgs.validate_keys(), "test_neoxargs_duplicates" diff --git a/tests/neox_args/test_neoxargs_load.py b/tests/neox_args/test_neoxargs_load.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d278112c087fdc71c9f2fbbd83c7ca7966fb36 --- /dev/null +++ b/tests/neox_args/test_neoxargs_load.py @@ -0,0 +1,163 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +load all confings in neox/configs in order to perform validations implemented in NeoXArgs +""" +import pytest +import yaml +from ..common import get_configs_with_path + + +def run_neox_args_load_test(yaml_files): + from megatron.neox_arguments import NeoXArgs + + yaml_list = get_configs_with_path(yaml_files) + args_loaded = NeoXArgs.from_ymls(yaml_list) + assert isinstance(args_loaded, NeoXArgs) + + # initialize an empty config dictionary to be filled by yamls + config = dict() + + # iterate of all to be loaded yaml files + for conf_file_name in yaml_list: + + # load file + with open(conf_file_name) as conf_file: + conf = yaml.load(conf_file, Loader=yaml.FullLoader) + + # check for key duplicates and load values + for conf_key, conf_value in conf.items(): + if conf_key in config: + raise ValueError( + f"Conf file {conf_file_name} has the following duplicate keys with previously loaded file: {conf_key}" + ) + + conf_key_converted = conf_key.replace( + "-", "_" + ) # TODO remove replace and update configuration files? + config[conf_key_converted] = conf_value + + # validate that neox args has the same value as specified in the config (if specified in the config) + for k, v in config.items(): + neox_args_value = getattr(args_loaded, k) + assert v == neox_args_value, ( + "loaded neox args value " + + str(k) + + " == " + + str(neox_args_value) + + " different from config file " + + str(v) + ) + + +@pytest.mark.cpu +def test_neoxargs_load_arguments_125M_local_setup(): + """ + verify 125M.yml can be loaded without raising validation errors + """ + run_neox_args_load_test(["125M.yml", "local_setup.yml", "cpu_mock_config.yml"]) + + +@pytest.mark.cpu +def test_neoxargs_load_arguments_125M_local_setup_text_generation(): + """ + verify 125M.yml can be loaded together with text generation without raising validation errors + """ + run_neox_args_load_test( + ["125M.yml", "local_setup.yml", "text_generation.yml", "cpu_mock_config.yml"] + ) + + +@pytest.mark.cpu +def test_neoxargs_load_arguments_350M_local_setup(): + """ + verify 350M.yml can be loaded without raising validation errors + """ + run_neox_args_load_test(["350M.yml", "local_setup.yml", "cpu_mock_config.yml"]) + + +@pytest.mark.cpu +def test_neoxargs_load_arguments_760M_local_setup(): + """ + verify 760M.yml can be loaded without raising validation errors + """ + run_neox_args_load_test(["760M.yml", "local_setup.yml", "cpu_mock_config.yml"]) + + +@pytest.mark.cpu +def test_neoxargs_load_arguments_2_7B_local_setup(): + """ + verify 2-7B.yml can be loaded without raising validation errors + """ + run_neox_args_load_test(["2-7B.yml", "local_setup.yml", "cpu_mock_config.yml"]) + + +@pytest.mark.cpu +def test_neoxargs_load_arguments_6_7B_local_setup(): + """ + verify 6-7B.yml can be loaded without raising validation errors + """ + run_neox_args_load_test(["6-7B.yml", "local_setup.yml", "cpu_mock_config.yml"]) + + +@pytest.mark.cpu +def test_neoxargs_load_arguments_13B_local_setup(): + """ + verify 13B.yml can be loaded without raising validation errors + """ + run_neox_args_load_test(["13B.yml", "local_setup.yml", "cpu_mock_config.yml"]) + + +@pytest.mark.cpu +def test_neoxargs_load_arguments_1_3B_local_setup(): + """ + verify 1-3B.yml can be loaded without raising validation errors + """ + run_neox_args_load_test(["1-3B.yml", "local_setup.yml", "cpu_mock_config.yml"]) + + +@pytest.mark.cpu +def test_neoxargs_load_arguments_175B_local_setup(): + """ + verify 13B.yml can be loaded without raising validation errors + """ + run_neox_args_load_test(["175B.yml", "local_setup.yml", "cpu_mock_config.yml"]) + + +@pytest.mark.cpu +def test_neoxargs_fail_instantiate_without_required_params(): + """ + verify assertion error if required arguments are not provided + """ + + try: + run_neox_args_load_test(["local_setup.yml"]) + assert False + except Exception as e: + assert True + + +@pytest.mark.cpu +def test_neoxargs_fail_instantiate_without_any_params(): + """ + verify assertion error if required arguments are not provided + """ + from megatron.neox_arguments import NeoXArgs + + try: + args_loaded = NeoXArgs() + assert False + except Exception as e: + assert True diff --git a/tests/neox_args/test_neoxargs_usage.py b/tests/neox_args/test_neoxargs_usage.py new file mode 100644 index 0000000000000000000000000000000000000000..5f8ba7bd2f3cd1a380c892b8254e15ace1e35677 --- /dev/null +++ b/tests/neox_args/test_neoxargs_usage.py @@ -0,0 +1,85 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +plausibility check for the usage of neox_args in the megatron codebase +""" +import pytest +import re +from ..common import get_root_directory + + +@pytest.mark.cpu +def test_neoxargs_usage(): + """ " + checks for code pieces of the pattern "args.*" and verifies that such used arg is defined in NeoXArgs + """ + from megatron.neox_arguments import NeoXArgs + + declared_all = True + neox_args_attributes = set(NeoXArgs.__dataclass_fields__.keys()) + + # we exclude a number of properties (implemented with the @property decorator) or functions that we know exists + exclude = set( + [ + "params_dtype", + "deepspeed_config", + "get", + "pop", + "get_deepspeed_main_args", + 'optimizer["params"]', + "attention_config[layer_number]", + "adlr_autoresume_object", + "update_value", + "all_config", + "tensorboard_writer", + "tokenizer", + "train_batch_size]", + "items", + "configure_distributed_args", + "build_tokenizer", + "attention_config[i]", + "print", + "update", + ] + ) + + # test file by file + for filename in (get_root_directory() / "megatron").glob("**/*.py"): + if filename.name in ["text_generation_utils.py", "train_tokenizer.py"]: + continue + + # load file + with open(filename, "r") as f: + file_contents = f.read() + + # find args matches + matches = list( + re.findall( + r"(?<=neox_args\.).{2,}?(?=[\s\n(){}+-/*;:,=,[,\]])", file_contents + ) + ) + if len(matches) == 0: + continue + + # compare + for match in matches: + if match not in neox_args_attributes and match not in exclude: + print( + f"(arguments used not found in neox args): {filename.name}: {match}", + flush=True, + ) + declared_all = False + + assert declared_all, "all arguments used in code defined in NeoXArgs" diff --git a/tests/pytest.ini b/tests/pytest.ini new file mode 100644 index 0000000000000000000000000000000000000000..6fd100ea2be8927d9ac7c235b0d1580c1452be43 --- /dev/null +++ b/tests/pytest.ini @@ -0,0 +1,20 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[pytest] +markers = + cpu: marks tests that can be run on cpu +filterwarnings = + ignore::DeprecationWarning:pkg_resources.* + ignore::DeprecationWarning:torch.* diff --git a/tests/test_configs/test_train_base.yml b/tests/test_configs/test_train_base.yml new file mode 100644 index 0000000000000000000000000000000000000000..bb66a5b9734d073002dcced57dd8a0c2e6bf8046 --- /dev/null +++ b/tests/test_configs/test_train_base.yml @@ -0,0 +1,119 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# GPT_2 pretraining setup +{ + # parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages + # across the node boundaries ) + "pipe_parallel_size": 0, + "model_parallel_size": 1, + + # model settings + "num_layers": 2, + "hidden_size": 192, + "num_attention_heads": 6, + "seq_length": 1024, + "max_position_embeddings": 1024, + "norm": "layernorm", + "pos_emb": "rotary", + "no_weight_tying": true, + + # these should provide some speedup but takes a while to build, set to true if desired + "scaled_upper_triang_masked_softmax_fusion": false, + "bias_gelu_fusion": false, + "rope_fusion": false, + "layernorm_fusion": false, + + # optimizer settings + "optimizer": { + "type": "Adam", + "params": { + "lr": 0.0006, + "betas": [0.9, 0.999], + "eps": 1.0e-8, + } + }, + + # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training + "zero_optimization": { + "stage": 0, + "allgather_partitions": True, + "allgather_bucket_size": 500000000, + "overlap_comm": True, + "reduce_scatter": True, + "reduce_bucket_size": 500000000, + "contiguous_gradients": True, + }, + + # batch / data settings + "train_micro_batch_size_per_gpu": 4, + "data_impl": "mmap", + "split": "949,50,1", + + # activation checkpointing + "checkpoint_activations": true, + "checkpoint_num_layers": 1, + "partition_activations": true, + "synchronize_each_layer": true, + + # regularization + "gradient_clipping": 1.0, + "weight_decay": 0.0, + "hidden_dropout": 0.0, + "attention_dropout": 0.0, + + # precision settings + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + # misc. training settings + "train_iters": 320000, + "lr_decay_iters": 320000, + "distributed_backend": "nccl", + "lr_decay_style": "cosine", + "warmup": 0.01, + "checkpoint_factor": 10000, + "eval_interval": 1000, + "eval_iters": 10, + + # logging + "log_interval": 100, + "steps_per_print": 10, + "keep_last_n_checkpoints": 4, + "wall_clock_breakdown": true, + + # Suggested data paths when using GPT_NeoX locally + "data_path": "data/enwik8/enwik8_text_document", + + # or for weighted datasets: + # "train-data-paths": ["data/enwik8/enwik8_text_document", "data/enwik8/enwik8_text_document"], + # "test-data-paths": ["data/enwik8/enwik8_text_document", "data/enwik8/enwik8_text_document"], + # "valid-data-paths": ["data/enwik8/enwik8_text_document", "data/enwik8/enwik8_text_document"], + # "train-data-weights": [1., 2.], + # "test-data-weights": [2., 1.], + # "valid-data-weights": [0.5, 0.4], + + "vocab_file": "data/gpt2-vocab.json", + "merge_file": "data/gpt2-merges.txt", + "save": "test_checkpoint", + "load": "test_checkpoint", + "tensorboard_dir": "test_tensorboard", + "log_dir": "test_logs", + +} diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/unit/test_arguments.py b/tests/unit/test_arguments.py new file mode 100644 index 0000000000000000000000000000000000000000..b52a3b065fae97ff89b69181eb1900d6b4354146 --- /dev/null +++ b/tests/unit/test_arguments.py @@ -0,0 +1,49 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from megatron.neox_arguments import NeoXArgs +from tests.common import BASE_CONFIG, DistributedTest + + +def test_main_constructor(): + input_args = ["train.py", "tests/config/test_setup.yml"] + neox_args = NeoXArgs.consume_deepy_args(input_args) + deepspeed_main_args = neox_args.get_deepspeed_main_args() + neox_args = NeoXArgs.consume_neox_args(input_args=deepspeed_main_args) + neox_args.configure_distributed_args() + + +class test_constructor_from_ymls_class(DistributedTest): + world_size = 2 + + def test(self): + neox_args = NeoXArgs.from_ymls(["tests/config/test_setup.yml"]) + neox_args.configure_distributed_args() + + +def test_constructor_from_ymls(): + t1 = test_constructor_from_ymls_class() + t1.test() + + +class test_constructor_from_dict_class(DistributedTest): + world_size = 2 + + def test(self): + neox_args = NeoXArgs.from_dict(BASE_CONFIG) + + +def test_constructor_from_dict(): + t1 = test_constructor_from_dict_class() + t1.test() diff --git a/tests/unit/test_dependencies.py b/tests/unit/test_dependencies.py new file mode 100644 index 0000000000000000000000000000000000000000..d870921e855e50f4df5e10b7967413ee0d46a7a0 --- /dev/null +++ b/tests/unit/test_dependencies.py @@ -0,0 +1,7 @@ +import pytest +from megatron import fused_kernels + + +def test_fused_kernels(): + pytest.xfail(reason="Fused kernels require manual intervention to install") + fused_kernels.load_fused_kernels() diff --git a/tests/unit/test_format_conversion_scripts.py b/tests/unit/test_format_conversion_scripts.py new file mode 100644 index 0000000000000000000000000000000000000000..6935e480a805f6b12a372ffa9af58bdbd10fecaa --- /dev/null +++ b/tests/unit/test_format_conversion_scripts.py @@ -0,0 +1,28 @@ +import pytest +from tools.ckpts import convert_neox_to_hf +from tests.common import simulate_deepy_env, save_random_model +from megatron.neox_arguments.neox_args import NeoXArgsTokenizer + + +@pytest.mark.skip( + reason="Conversion test is skipped until we fix the CUDA + torch multiprocessing issue." +) +def test_gpt_neox_to_huggingface(monkeypatch, tmpdir, tmp_path): + # Generate random GPT-NEOX model, check we can convert to hf format + + model_dir = str(tmpdir) + input_args = ["train.py", "tests/config/test_setup.yml"] + deepspeed_main_args = simulate_deepy_env(monkeypatch, input_args) + save_random_model(deepspeed_main_args, model_dir, train_iters=1) + + # Generate output + script_args = [ + "--config_file", + "tests/config/test_setup.yml", + "--input_dir", + model_dir + "/global_step1", + "--output_dir", + model_dir, + ] + overwrite_values = {"tokenizer_type": NeoXArgsTokenizer.tokenizer_type} + convert_neox_to_hf.main(input_args=script_args, overwrite_values=overwrite_values) diff --git a/tests/unit/test_launcher_scripts.py b/tests/unit/test_launcher_scripts.py new file mode 100644 index 0000000000000000000000000000000000000000..bdc38f11161f7043e09fc03313b4c8b376646f58 --- /dev/null +++ b/tests/unit/test_launcher_scripts.py @@ -0,0 +1,120 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + +import pytest + +import eval +import generate +import train +from megatron.neox_arguments import NeoXArgs +from tests.common import save_random_model, simulate_deepy_env +from tools.datasets import preprocess_data + + +@pytest.fixture( + params=[ + "HFGPT2Tokenizer", + "HFTokenizer", + "GPT2BPETokenizer", + "CharLevelTokenizer", + "TiktokenTokenizer", + "SPMTokenizer", + ] +) +def tokenizer_type(request): + return request.param + + +@pytest.fixture(params=[None, "tests/data/sample_prompt.txt"]) +def sample_input_file(request): + return request.param + + +@pytest.mark.cpu +def test_preprocess_data(tokenizer_type): + if tokenizer_type == "SPMTokenizer": + pytest.xfail( + reason="Expected easy resolution: Need to provide a valid model file from somewhere" + ) + vocab_file = { + "HFTokenizer": "tests/data/hf_cache/tokenizer/gpt2.json", + "TiktokenTokenizer": "cl100k_base", + "HFGPT2Tokenizer": "gpt2", + } + input_args = [ + "--input", + "./tests/data/enwik8_first100.txt", + "--output-prefix", + "./tests/data/enwik8_first100", + "--vocab", + vocab_file.get(tokenizer_type, "./data/gpt2-vocab.json"), + "--tokenizer-type", + tokenizer_type, + "--merge-file", + "./data/gpt2-merges.txt", + "--append-eod", + ] + preprocess_data.main(input_args) + + +@pytest.mark.skip( + reason="All model tests are skipped until we fix the CUDA + torch multiprocessing issue." +) +def test_generate(monkeypatch, tmpdir, tmp_path, sample_input_file): + model_dir = str(tmpdir) + sample_output_file = str(tmp_path) + ".txt" + input_args = ["generate.py", "tests/config/test_setup.yml"] + deepspeed_main_args = simulate_deepy_env(monkeypatch, input_args) + save_random_model(deepspeed_main_args, model_dir) + + # Generate output + generate_args = { + "load": model_dir, + "sample_input_file": sample_input_file, + "sample_output_file": sample_output_file, + } + generate.main(input_args=deepspeed_main_args, overwrite_values=generate_args) + + +@pytest.mark.skip( + reason="All model tests are skipped until we fix the CUDA + torch multiprocessing issue." +) +def test_evaluate(monkeypatch, tmpdir, tmp_path): + model_dir = str(tmpdir) + sample_output_file = str(tmp_path) + input_args = ["generate.py", "tests/config/test_setup.yml"] + deepspeed_main_args = simulate_deepy_env(monkeypatch, input_args) + save_random_model(deepspeed_main_args, model_dir) + + # Generate output + evaluate_args = { + "load": model_dir, + "eval_tasks": ["lambada"], # ["lambada", "hellaswag", "piqa", "sciq"], + "eval_results_prefix": sample_output_file, + } + eval.main(input_args=deepspeed_main_args, overwrite_values=evaluate_args) + + +@pytest.mark.skip( + reason="All model tests are skipped until we fix the CUDA + torch multiprocessing issue." +) +def test_finetuning(monkeypatch, tmpdir, tmp_path): + # Save random model, load random model, keep training + # TODO: add mocking to check that we're not ignoring the previously loaded model + model_dir = str(tmpdir) + sample_output_file = str(tmp_path) + input_args = ["generate.py", "tests/config/test_setup.yml"] + deepspeed_main_args = simulate_deepy_env(monkeypatch, input_args) + save_random_model(deepspeed_main_args, model_dir) + + # Generate output + finetune_args = {"load": model_dir, "finetune": True} + train.main(input_args=deepspeed_main_args, overwrite_values=finetune_args) + + +@pytest.mark.skip( + reason="All model tests are skipped until we fix the CUDA + torch multiprocessing issue." +) +def test_train_launcher(monkeypatch): + input_args = ["train.py", "tests/config/test_setup.yml"] + deepspeed_main_args = simulate_deepy_env(monkeypatch, input_args) + train.main(input_args=deepspeed_main_args) diff --git a/tests/unit/test_tokenizer.py b/tests/unit/test_tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..88e8f513cc3f5f8c14e9589c632f1d67b51ad738 --- /dev/null +++ b/tests/unit/test_tokenizer.py @@ -0,0 +1,14 @@ +import pytest +from megatron.tokenizer import train_tokenizer + + +@pytest.mark.cpu +def test_train_tokenizer(): + input_args = [ + "--json_input_dir", + "./tests/data/enwik8_first100.txt", + "--tokenizer_output_path", + "", + ] + args = train_tokenizer.parse_args(input_args) + train_tokenizer.main(args) diff --git a/tests/unit/test_url_accessibility.py b/tests/unit/test_url_accessibility.py new file mode 100644 index 0000000000000000000000000000000000000000..861723f95366966c8677f601440277a09dafa239 --- /dev/null +++ b/tests/unit/test_url_accessibility.py @@ -0,0 +1,25 @@ +import pytest +import requests + +from tools.datasets.corpora import DATA_DOWNLOADERS + + +def check_url_accessible(url): + try: + response = requests.head(url, timeout=5) + response.raise_for_status() + return True + except requests.exceptions.RequestException as e: + print(f"Error: Unable to access URL - {e}") + return False + + +@pytest.mark.cpu +@pytest.mark.parametrize("dataset_name", list(DATA_DOWNLOADERS.keys())) +def test_url_accessibility(dataset_name): + if dataset_name == "pass": + return + elif not dataset_name == "enwik8": + pytest.xfail() + for url in DATA_DOWNLOADERS[dataset_name].urls: + assert check_url_accessible(url) diff --git a/tools/README.md b/tools/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ccfb1c5f88851d3669d36fe2588c2add368a9aad --- /dev/null +++ b/tools/README.md @@ -0,0 +1,15 @@ +# GPT-NeoX Auxiliary Tools + +This directory contains a number of auxiliary tools that are useful for working with GPT-NeoX but not part of the main training code. + +## Bash + +This directory contains some simple, frequently used bash commands to make working on multiple machines easier. + +## Checkpoints + +This directory contains tools for manipulating and converting checkpoints including changing the parallelism settings of a pretrained model, converting between GPT-NeoX and the transformers library, and updating checkpoints trained with Version 1.x of this library to be compatible with Version 2.x. + +## Datasets + +This directory contains tools for downloading and preprocessing datasets to the format expected by the GPT-NeoX library. diff --git a/tools/__init__.py b/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tools/bash/README.md b/tools/bash/README.md new file mode 100644 index 0000000000000000000000000000000000000000..95a307240e4a18f95ce9384490b780eecade5b44 --- /dev/null +++ b/tools/bash/README.md @@ -0,0 +1,8 @@ +# Bash Scripts +Useful for running distributed per-node scripts on e.g. Kubernetes + +* `kill.sh` kills all python processes +* `killall.sh` uses pdsh to kill all `train.py` processes on the nodes listed in `/job/hosts/` +* `sync_cmd.sh` uses pdsh to run a command on all the nodes listed in `/job/hosts/` +* `sync.sh` uses pdcp to copy every file in a provided path to all of the nodes listed in `/job/hosts/` +* `syncdir.sh` uses pdcp to copy every file in a provided path to all of the nodes listed in `/job/hosts/` diff --git a/tools/bash/kill.sh b/tools/bash/kill.sh new file mode 100644 index 0000000000000000000000000000000000000000..bccd46d7e75fa78ea6558d806a9d9692f7ad34be --- /dev/null +++ b/tools/bash/kill.sh @@ -0,0 +1 @@ +pkill -9 python diff --git a/tools/bash/killall.sh b/tools/bash/killall.sh new file mode 100644 index 0000000000000000000000000000000000000000..d2f3528bd3b9310f1da759ec3be16dd5e88a5645 --- /dev/null +++ b/tools/bash/killall.sh @@ -0,0 +1 @@ +pdsh -f 1024 -R ssh -w ^/job/hosts 'pkill -f train.py' diff --git a/tools/bash/sync.sh b/tools/bash/sync.sh new file mode 100644 index 0000000000000000000000000000000000000000..fd9377dbb69981c93d7ff111d1a4128337bcc6e8 --- /dev/null +++ b/tools/bash/sync.sh @@ -0,0 +1,28 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/usr/bin/env bash + +# Push files to all nodes +# Usage +# sync.sh file [file2..] + +echo Number of files to upload: $# + +for file in "$@" +do + full_path=$(realpath $file) + echo Uploading $full_path + pdcp -f 1024 -R ssh -w ^/job/hosts $full_path $full_path +done diff --git a/tools/bash/sync_cmd.sh b/tools/bash/sync_cmd.sh new file mode 100644 index 0000000000000000000000000000000000000000..2d8a617caf378930ba25c829f20f590c12ba8ab6 --- /dev/null +++ b/tools/bash/sync_cmd.sh @@ -0,0 +1,22 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/usr/bin/env bash + +# Runs a command in parallel across all nodes +# Usage +# sync_cmd.sh 'echo "hello world"' + +echo "Command: $1"; +pdsh -R ssh -w ^/job/hosts $1 diff --git a/tools/bash/syncdir.sh b/tools/bash/syncdir.sh new file mode 100644 index 0000000000000000000000000000000000000000..229c9af9a69995506ee965dc86a0cdd495c3ca44 --- /dev/null +++ b/tools/bash/syncdir.sh @@ -0,0 +1,29 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/usr/bin/env bash + +# Push files to all nodes +# Usage +# syncdir.sh file [file2..] + +echo Number of files to upload: $# + +for file in "$@" +do + full_path=$(realpath $file) + parentdir="$(dirname "$full_path")" + echo Uploading $full_path to $parentdir + pdcp -f 1024 -R ssh -w ^/job/hosts -r $full_path $parentdir +done diff --git a/tools/ckpts/README.md b/tools/ckpts/README.md new file mode 100644 index 0000000000000000000000000000000000000000..770cfb9c637721ab4384ce12de5c2859133d53ed --- /dev/null +++ b/tools/ckpts/README.md @@ -0,0 +1,150 @@ +# Checkpoint Scripts + + +## Utilities + +### `inspect_checkpoints.py` +Reports information about a saved checkpoint. +``` +usage: inspect_checkpoints.py [-h] [--attributes [ATTRIBUTES ...]] [--interactive] [--compare] [--diff] dir + +positional arguments: + dir The checkpoint dir to inspect. Must be either: - a directory containing pickle binaries saved with 'torch.save' ending in .pt or .ckpt - a single path to a .pt or .ckpt file - two comma separated directories - + in which case the script will *compare* the two checkpoints + +options: + -h, --help show this help message and exit + --attributes [ATTRIBUTES ...] + Name of one or several attributes to query. To access an attribute within a nested structure, use '/' as separator. + --interactive, -i Drops into interactive shell after printing the summary. + --compare, -c If true, script will compare two directories separated by commas + --diff, -d In compare mode, only print diffs +``` + +## HuggingFace Scripts + +### `convert_hf_to_sequential.py` +A script for converting publicly available Huggingface (HF) checkpoints to NeoX format. + +Note that this script requires access to corresponding config files for equivalent NeoX models to those found in Hugging face. + +``` +Example usage: (Converts the 70M Pythia model to NeoX format) +================================================================ +OMPI_COMM_WORLD_RANK=0 CUDA_VISIBLE_DEVICES=0 python tools/ckpts/convert_hf_to_sequential.py \ + --hf-model-name pythia-70m-v0 \ + --revision 143000 \ + --output-dir checkpoints/neox_converted/pythia/70m \ + --cache-dir checkpoints/HF \ + --config configs/pythia/70M.yml configs/local_setup.yml \ + --test + + +For multi-gpu support we must initialize deepspeed: +NOTE: This requires manually changing the arguments below. +================================================================ +CUDA_VISIBLE_DEVICES=0,1,2,3 python ./deepy.py tools/ckpts/convert_hf_to_sequential.py \ + -d configs pythia/70M.yml local_setup.yml +``` +### `convert_module_to_hf.py` +Converts a NeoX model with pipeline parallelism greater than 1 to a HuggingFace transformers `GPTNeoXForCausalLM` model + +Note that this script does not support all NeoX features. +Please investigate carefully whether your model is compatible with all architectures supported by the GPTNeoXForCausalLM class in HF. + +(e.g. position embeddings such as AliBi may not be supported by Huggingface's GPT-NeoX architecture) + +``` +usage: convert_module_to_hf.py [-h] [--input_dir INPUT_DIR] [--config_file CONFIG_FILE] [--output_dir OUTPUT_DIR] [--upload] + +Merge MP partitions and convert to HF Model. + +options: + -h, --help show this help message and exit + --input_dir INPUT_DIR + Path to NeoX checkpoint, e.g. /path/to/model/global_step143000 + --config_file CONFIG_FILE + Path to config file for the input NeoX checkpoint. + --output_dir OUTPUT_DIR + Output dir, where to save the HF Model, tokenizer, and configs + --upload Set to true in order to upload to the HF Hub directly. +``` + +### `convert_sequential_to_hf.py` +Converts a NeoX model without pipeline parallelism to a HuggingFace transformers `GPTNeoXForCausalLM` model. + +``` +usage: convert_sequential_to_hf.py [-h] [--input_dir INPUT_DIR] [--config_file CONFIG_FILE] [--output_dir OUTPUT_DIR] [--upload] + +Merge MP partitions and convert to HF Model. + +options: + -h, --help show this help message and exit + --input_dir INPUT_DIR + Path to NeoX checkpoint, e.g. /path/to/model/global_step143000 + --config_file CONFIG_FILE + Path to config file for the input NeoX checkpoint. + --output_dir OUTPUT_DIR + Output dir, where to save the HF Model, tokenizer, and configs + --upload Set to true in order to upload to the HF Hub directly. +``` +### `upload.py` +Uploads a _converted_ checkpoint to the HuggingFace hub. + +``` +python upload.py +``` +## NeoX-20B Scripts + +### `merge20b.py` +Reduces model and pipeline parallelism of a 20B checkpoint to 1 and 1. + +``` +usage: merge20b.py [-h] [--input_dir INPUT_DIR] [--output_dir OUTPUT_DIR] + +Merge 20B checkpoint. + +options: + -h, --help show this help message and exit + --input_dir INPUT_DIR + Checkpoint dir, which should contain (e.g. a folder named "global_step150000") + --output_dir OUTPUT_DIR + Output dir, to save the 1-GPU weights configs +``` +## Llama Scripts + +### `convert_raw_llama_weights_to_neox.py` +Takes a Llama checkpoint and puts it into a NeoX-compatible format. + +``` +usage: convert_raw_llama_weights_to_neox.py [-h] [--input_dir INPUT_DIR] [--model_size {7B,13B,30B,65B,tokenizer_only}] [--output_dir OUTPUT_DIR] [--num_output_shards NUM_OUTPUT_SHARDS] [--pipeline_parallel] + +Convert raw LLaMA checkpoints to GPT-NeoX format. + +options: + -h, --help show this help message and exit + --input_dir INPUT_DIR + Location of LLaMA weights, which contains tokenizer.model and model folders + --model_size {7B,13B,30B,65B,tokenizer_only} + --output_dir OUTPUT_DIR + Location to write GPT-NeoX mode + --num_output_shards NUM_OUTPUT_SHARDS + --pipeline_parallel Only use if PP>1 +``` + +### `convert_hf_llama_to_neox.py` +Takes an HF Llama checkpoint and puts it into a NeoX-compatible format. + +Note that this does not support pipeline parallelism! + +``` +usage: convert_hf_llama_to_neox.py [-h] [--tp TP] [--pp PP] [--model MODEL] [--model_path MODEL_PATH] + +options: + -h, --help show this help message and exit + --tp TP Number of tensor parallelism ranks + --pp PP Number of pipeline parallelism stages + --model MODEL HF model name + --model_path MODEL_PATH + Path to save model +``` diff --git a/tools/ckpts/convert_hf_llama_to_neox.py b/tools/ckpts/convert_hf_llama_to_neox.py new file mode 100644 index 0000000000000000000000000000000000000000..21249995ba3411ff0d799c2df4356688cdde73d5 --- /dev/null +++ b/tools/ckpts/convert_hf_llama_to_neox.py @@ -0,0 +1,211 @@ +import torch +import argparse +from transformers import AutoTokenizer, AutoModelForCausalLM +import os +import tqdm + + +def convert_model(hf_state_dict, hf_config, tp_ranks): + conv_state_dicts = [{} for _ in range(tp_ranks)] + # get embeddings... + for i, chunk in enumerate( + torch.chunk(hf_state_dict["model.embed_tokens.weight"], tp_ranks, dim=0) + ): + conv_state_dicts[i][ + "sequential.0.word_embeddings.weight" + ] = chunk.clone().detach() + print( + "model.embed_tokens.weight", + hf_state_dict["model.embed_tokens.weight"].shape, + "sequential.0.word_embeddings.weight", + conv_state_dicts[0]["sequential.0.word_embeddings.weight"].shape, + ) + # Get config data... + num_kv_heads = hf_config.num_key_value_heads + num_q_heads = hf_config.num_attention_heads + head_dim = hf_config.hidden_size // num_q_heads + # do layers... + for layer_num in tqdm.tqdm(range(model.model.config.num_hidden_layers)): + # --- attention --- + # Output first since it's a simple row parallel... + for i, chunk in enumerate( + torch.chunk( + hf_state_dict[f"model.layers.{layer_num}.self_attn.o_proj.weight"], + tp_ranks, + dim=1, + ) + ): + conv_state_dicts[i][ + f"sequential.{layer_num+2}.attention.dense.weight" + ] = chunk.clone().detach() + print( + f"model.layers.{layer_num}.self_attn.o_proj.weight", + hf_state_dict[f"model.layers.{layer_num}.self_attn.o_proj.weight"].shape, + f"sequential.{layer_num+2}.attention.dense.weight", + conv_state_dicts[0][ + f"sequential.{layer_num+2}.attention.dense.weight" + ].shape, + ) + # Now for attention... + # Split into heads... + q = hf_state_dict[f"model.layers.{layer_num}.self_attn.q_proj.weight"] + k = hf_state_dict[f"model.layers.{layer_num}.self_attn.k_proj.weight"] + v = hf_state_dict[f"model.layers.{layer_num}.self_attn.v_proj.weight"] + # The GQA code splits the heads by the num_q_heads so we also do that + # here to ensure it matches... + q = q.view(num_q_heads, -1, q.shape[-1]) + k = k.view(num_q_heads, -1, q.shape[-1]) + v = v.view(num_q_heads, -1, q.shape[-1]) + # Chunk for tensor parallelism... + for i, q_chunk, k_chunk, v_chunk in zip( + range(tp_ranks), + torch.chunk(q, tp_ranks, dim=0), + torch.chunk(k, tp_ranks, dim=0), + torch.chunk(v, tp_ranks, dim=0), + ): + # Need to join the heads across q, k, v... + conv_state_dicts[i][ + f"sequential.{layer_num+2}.attention.query_key_value.weight" + ] = ( + torch.cat([q_chunk, k_chunk, v_chunk], dim=1) + .view(-1, q.shape[-1]) + .clone() + .detach() + ) + print( + f"model.layers.{layer_num}.self_attn.(q/k/v)_proj.weight", + hf_state_dict[f"model.layers.{layer_num}.self_attn.q_proj.weight"].shape, + hf_state_dict[f"model.layers.{layer_num}.self_attn.k_proj.weight"].shape, + hf_state_dict[f"model.layers.{layer_num}.self_attn.v_proj.weight"].shape, + f"sequential.{layer_num+2}.attention.query_key_value.weight", + conv_state_dicts[0][ + f"sequential.{layer_num+2}.attention.query_key_value.weight" + ].shape, + ) + # --- mlp --- + # Do SwiGLU weights... + # w1... + for i, (w1, w3) in enumerate( + zip( + torch.chunk( + hf_state_dict[f"model.layers.{layer_num}.mlp.gate_proj.weight"], + tp_ranks, + dim=0, + ), + torch.chunk( + hf_state_dict[f"model.layers.{layer_num}.mlp.up_proj.weight"], + tp_ranks, + dim=0, + ), + ) + ): + conv_state_dicts[i][ + f"sequential.{layer_num+2}.mlp.linear1.weight" + ] = torch.cat([w3.clone().detach(), w1.clone().detach()], dim=0) + print( + f"model.layers.{layer_num}.mlp.gate_proj.weight", + hf_state_dict[f"model.layers.{layer_num}.mlp.gate_proj.weight"].shape, + f"model.layers.{layer_num}.mlp.up_proj.weight", + hf_state_dict[f"model.layers.{layer_num}.mlp.up_proj.weight"].shape, + f"sequential.{layer_num+2}.mlp.w3.weight", + conv_state_dicts[0][f"sequential.{layer_num+2}.mlp.linear1.weight"].shape, + ) + # w2 (output)... + for i, chunk in enumerate( + torch.chunk( + hf_state_dict[f"model.layers.{layer_num}.mlp.down_proj.weight"], + tp_ranks, + dim=1, + ) + ): + conv_state_dicts[i][ + f"sequential.{layer_num+2}.mlp.linear2.weight" + ] = chunk.clone().detach() + print( + f"model.layers.{layer_num}.mlp.down_proj.weight", + hf_state_dict[f"model.layers.{layer_num}.mlp.down_proj.weight"].shape, + f"sequential.{layer_num+2}.mlp.linear2.weight", + conv_state_dicts[0][f"sequential.{layer_num+2}.mlp.linear2.weight"].shape, + ) + # --- norm --- + for i in range(tp_ranks): + conv_state_dicts[i][f"sequential.{layer_num+2}.input_layernorm.scale"] = ( + hf_state_dict[f"model.layers.{layer_num}.input_layernorm.weight"] + .clone() + .detach() + ) + conv_state_dicts[i][ + f"sequential.{layer_num+2}.post_attention_layernorm.scale" + ] = ( + hf_state_dict[ + f"model.layers.{layer_num}.post_attention_layernorm.weight" + ] + .clone() + .detach() + ) + + # Get final ln/linear.... + index = model.model.config.num_hidden_layers + 3 + for i in range(tp_ranks): + conv_state_dicts[i][f"sequential.{index}.norm.scale"] = ( + hf_state_dict["model.norm.weight"].clone().detach() + ) + index += 1 + # do output... + for i, chunk in enumerate( + torch.chunk(hf_state_dict["lm_head.weight"], tp_ranks, dim=0) + ): + conv_state_dicts[i][ + f"sequential.{index}.final_linear.weight" + ] = chunk.clone().detach() + print( + "lm_head.weight", + hf_state_dict["lm_head.weight"].shape, + f"sequential.{index}.final_linear.weight", + conv_state_dicts[0][f"sequential.{index}.final_linear.weight"].shape, + ) + return conv_state_dicts + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--tp", type=int, default=1, help="Number of tensor parallelism ranks" + ) + parser.add_argument( + "--pp", type=int, default=0, help="Number of pipeline parallelism stages" + ) + parser.add_argument("--model", type=str, default="gpt2", help="HF model name") + parser.add_argument( + "--model_path", type=str, default=None, help="Path to save model" + ) + args = parser.parse_args() + assert args.pp == 0, "Pipeline parallelism not supported yet" + tokenizer = AutoTokenizer.from_pretrained(args.model).save_pretrained( + args.model_path + "/tokenizer" + ) + model = AutoModelForCausalLM.from_pretrained(args.model, torch_dtype="auto") + state_dict = model.state_dict() + for key in state_dict.keys(): + print(key, state_dict[key].shape) + os.makedirs(args.model_path, exist_ok=True) + # Setup model directory... + os.makedirs(f"{args.model_path}/0", exist_ok=True) + # Save the latest file so neox can figure out where to grab the weights... + with open(f"{args.model_path}/latest", "w") as f: + f.write("0") + # Convert the model... + tp_state_dicts = convert_model(state_dict, model.model.config, args.tp) + for i in range(args.tp): + torch.save( + { + "dp_world_size": 1, + "mp_world_size": args.tp, + "optimizer": {}, + "global_steps": 1, + "skipped_steps": 1, + "iteration": 1, + "module": tp_state_dicts[i], + }, + f"{args.model_path}/0/mp_rank_{i:02d}_model_states.pt", + ) diff --git a/tools/ckpts/convert_hf_to_sequential.py b/tools/ckpts/convert_hf_to_sequential.py new file mode 100644 index 0000000000000000000000000000000000000000..5e0ada3343a4cbad04b6d1b9788b869e270fdf0f --- /dev/null +++ b/tools/ckpts/convert_hf_to_sequential.py @@ -0,0 +1,658 @@ +import sys +import os +import copy +import deepspeed + +# import time + +import argparse +import torch + +import numpy as np + +from functools import reduce +from transformers import GPTNeoXForCausalLM, GPTNeoXConfig + +sys.path.append( + os.path.abspath( + os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir) + ) +) +from megatron.neox_arguments import NeoXArgs +from megatron.training import get_model, get_optimizer, get_learning_rate_scheduler +from megatron.initialize import initialize_megatron +from megatron import mpu +from megatron.checkpointing import load_checkpoint, save_checkpoint + +# from megatron.utils import ( +# Timers, +# init_wandb, +# ) + +""" +A script for converting publicly available Huggingface (HF) checkpoints NeoX format. + +Note that this script requires access to corresponding config files for equivalent NeoX models to those found in Hugging face. + +Example usage: (Converts the 70M Pythia model to NeoX format) +================================================================ +OMPI_COMM_WORLD_RANK=0 CUDA_VISIBLE_DEVICES=0 python tools/ckpts/convert_hf_to_sequential.py \ + --hf-model-name pythia-70m-v0 \ + --revision 143000 \ + --output-dir checkpoints/neox_converted/pythia/70m \ + --cache-dir checkpoints/HF \ + --config configs/pythia/70M.yml configs/local_setup.yml \ + --test + + +For multi-gpu support we must initialize deepspeed: +NOTE: This requires manually changing the arguments below. +================================================================ +CUDA_VISIBLE_DEVICES=0,1,2,3 python ./deepy.py tools/ckpts/convert_hf_to_sequential.py \ + -d configs pythia/70M.yml local_setup.yml +""" + +MULTI_GPU_ARGS = " ".join( + [ + "--hf-model-name pythia-70m-v0", + "--revision 143000", + "--output-dir checkpoints/neox_converted/pythia/70m", + "--cache-dir checkpoints/HF", + "--config configs/pythia/70M.yml configs/local_setup.yml", + "--test", + ] +) + + +def convert_hf_to_sequential(hf_model, seq_state_dict): + """Converts the weights of a HuggingFace model to neox 2.0 format. + + :param hf_model: the huggingface model + :param seq_state_dict: the state dict of the equivalent neox model + + returns the updated sequential state dict + """ + num_layers = hf_model.config.num_hidden_layers + # Embedding is layer idx 0 + seq_state_dict[ + "sequential.0.word_embeddings.weight" + ] = hf_model.gpt_neox.embed_in.state_dict()["weight"] + + for layer_hf in range(num_layers): + # offset by 2 + layer_seq = layer_hf + 2 + + # get layer from hf model + hf_layer = hf_model.gpt_neox.layers[layer_hf] + hf_layer_sd = hf_layer.state_dict() + + for key in hf_model.gpt_neox.layers[0].state_dict().keys(): + + if key in ["attention.bias", "attention.masked_bias"]: + continue + seq_state_dict[f"sequential.{layer_seq}.{key}"] = hf_layer_sd[key] + + # Load final layer norm + layer_seq = num_layers + 3 + seq_state_dict[ + f"sequential.{layer_seq}.norm.weight" + ] = hf_model.gpt_neox.final_layer_norm.state_dict()["weight"] + seq_state_dict[ + f"sequential.{layer_seq}.norm.bias" + ] = hf_model.gpt_neox.final_layer_norm.state_dict()["bias"] + + # output embedding / LM head + layer_seq += 1 + seq_state_dict[ + f"sequential.{layer_seq}.final_linear.weight" + ] = hf_model.embed_out.state_dict()["weight"] + + +def shard_sequential_mp(num_mp_ranks, sequential): + """Shards the sequential model into model parallel ranks. + + :param num_mp_ranks: the number of model parallel ranks + :param sequential: the state dict of the sequential model at mp=1 + + returns a dict of state dicts for each mp rank + """ + ranks = {x: dict() for x in range(num_mp_ranks)} + for k, v in sequential.items(): + if reduce( + np.logical_or, + [ + x in k + for x in [ + "dense_4h_to_h.bias", + "attention.dense.bias", + ] + ], + ): + # Divide by tp_size since they get added together + for x in range(num_mp_ranks): + ranks[x][k] = v / num_mp_ranks + elif reduce( + np.logical_or, + [ + x in k + for x in [ + "layernorm", + "rotary_emb", + "norm.weight", + "norm.bias", + ] + ], + ): + # no splitting + for x in range(num_mp_ranks): + ranks[x][k] = v + else: + if len(v.shape) == 1: + size_per_rank = v.shape[0] / num_mp_ranks + if size_per_rank % 128 != 0.0: + padded_size = (128 - (size_per_rank % 128)) + size_per_rank + size_diff = int((padded_size * 4) - v.shape[max_]) + zero_pad = torch.zeros((size_diff)) + v = torch.cat([v, zero_pad], dim=max_) + else: + padded_size = size_per_rank + + assert size_per_rank % 1.0 == 0.0 + assert padded_size % 1.0 == 0.0 + + padded_size = int(padded_size) + size_per_rank = int(size_per_rank) + + for x in range(num_mp_ranks): + if size_per_rank != padded_size: + # need to pad + ranks[x][k] = v[padded_size * x : padded_size * (x + 1)] + else: + ranks[x][k] = v[size_per_rank * x : size_per_rank * (x + 1)] + + elif len(v.shape) == 2: + + if reduce( + np.logical_or, + [ + x in k + for x in [ + "attention.dense.weight", + "mlp.dense_4h_to_h.weight", + ] + ], + ): # column parallel + max_, min_ = 1, 0 + elif reduce( + np.logical_or, + [ + x in k + for x in [ + "mlp.dense_h_to_4h.weight", + "mlp.dense_h_to_4h.bias", + "attention.query_key_value.weight", + "attention.query_key_value.bias", + "word_embeddings.weight", + "final_linear.weight", + ] + ], + ): + # row parallel + max_, min_ = 0, 1 + else: + raise Exception("Unknown weight to shard: {}".format(k)) + + size_per_rank = v.shape[max_] / num_mp_ranks + if size_per_rank % 128 != 0.0: + padded_size = (128 - (size_per_rank % 128)) + size_per_rank + size_diff = int((padded_size * num_mp_ranks) - v.shape[max_]) + + assert ( + size_diff > 0 + ), "[ERROR] size diff is negative: {} for size_per_rank: {}, k:{}, shape:{}, padded_size:{}".format( + size_diff, size_per_rank, k, v.shape, padded_size + ) + + zero_pad = ( + torch.zeros((size_diff, v.shape[min_])) + if max_ == 0 + else torch.zeros((v.shape[min_], size_diff)) + ) + + v = torch.cat([v, zero_pad], dim=max_) + else: + padded_size = size_per_rank + + assert size_per_rank % 1.0 == 0.0 + assert padded_size % 1.0 == 0.0 + + padded_size = int(padded_size) + size_per_rank = int(size_per_rank) + + for x in range(num_mp_ranks): + if size_per_rank != padded_size: + # need to pad + ranks[x][k] = ( + v[padded_size * x : padded_size * (x + 1), :] + if max_ == 0 + else v[:, padded_size * x : padded_size * (x + 1)] + ) + else: + ranks[x][k] = ( + v[size_per_rank * x : size_per_rank * (x + 1), ...] + if max_ == 0 + else v[:, size_per_rank * x : size_per_rank * (x + 1)] + ) + + else: + raise NotImplementedError() + + return ranks + + +def replace_sharded_seq(mp_checkpoints, mp_sharded_seq): + """replaces the values within checkpointed configs with those + from the sharded sequential object.""" + + for mp_idx, shard in mp_sharded_seq.items(): + mp_key = f"mp_rank_{mp_idx:02}_model_states.pt" + + # use for loop instead of direct assignment + # to check for compatibility + for k, v in mp_checkpoints[mp_key]["module"].items(): + try: + mp_checkpoints[mp_key]["module"][k] = shard[k] + except KeyError: + print("ERROR key:{} not found in shard.".format(k)) + + +def shard_pp(sequential, mp_rank, num_layers): + """Shards the model into layers. + + :param sequential: the state dict of the sequential model at mp=1 + :param mp_rank: the model parallel rank of the layers + + returns a dict of state dicts for each layer + """ + suffix = f"-model_{mp_rank:02}-model_states.pt" + + layers_seq = dict() + layers_seq[f"layer_00" + suffix] = { + "word_embeddings.weight": sequential[f"sequential.0.word_embeddings.weight"] + } + layers_seq[f"layer_{num_layers+3:02}" + suffix] = { + "norm.weight": sequential[f"sequential.{num_layers+3}.norm.weight"], + "norm.bias": sequential[f"sequential.{num_layers+3}.norm.bias"], + } + + layers_seq[f"layer_{num_layers+4:02}" + suffix] = { + "final_linear.weight": sequential[ + f"sequential.{num_layers+4}.final_linear.weight" + ] + } + + for layer in range(2, num_layers + 2): + layer_keys = [x for x in sequential if ".{}.".format(layer) in x] + layers_seq[f"layer_{layer:02}" + suffix] = { + k.split(".{}.".format(layer))[1]: sequential[k] for k in layer_keys + } + + return layers_seq + + +def shard_pp_mp(num_mp_ranks, sequential, num_layers): + """Shards the model into layers and model parallel ranks. + + :param num_mp_ranks: the number of model parallel ranks + :param sequential: the state dict of the sequential model at mp=1 + :param num_layers: the number of layers in the model + + returns a dict of state dicts for each layer for each model parallel rank + """ + mp_sharded = shard_sequential_mp(num_mp_ranks=num_mp_ranks, sequential=sequential) + + layers_pp_mp = {} + for mp_rank, d in mp_sharded.items(): + layers_pp_mp.update( + shard_pp(sequential=d, mp_rank=mp_rank, num_layers=num_layers) + ) + return layers_pp_mp + + +def convert(hf_model, ckpt_dir, output_dir): + """Converts a huggingface model to a NeoX checkpoint for different + model parallel and pipeline parallel settings degrees. + + :param hf_model: the huggingface model + :param ckpt_dir: the directory containing the NeoX checkpoint + :param output_dir: the directory to save the converted checkpoint + returns None + """ + + os.listdir(ckpt_dir) + + ckpts, layers = {}, {} + for x in os.listdir(ckpt_dir): + if x.startswith("mp_rank"): + ckpts[x] = torch.load(os.path.join(ckpt_dir, x)) + elif x.startswith("layer"): + layers[x] = torch.load(os.path.join(ckpt_dir, x)) + + assert len(layers) + len(ckpts) > 0, "No checkpoints found in {}".format(ckpt_dir) + + os.makedirs(output_dir, exist_ok=True) + seq_state_dict = dict() + convert_hf_to_sequential(hf_model, seq_state_dict) + + if len(ckpts) == 1 and len(layers) == 0: + # pp=0, mp=1 + key = list(ckpts.keys())[0] + ckpts[key]["module"] = seq_state_dict + to_save = ckpts + + elif len(ckpts) > 1 and len(layers) == 0: + # pp=0, mp>1 + sharded_seq = shard_sequential_mp( + num_mp_ranks=len(ckpts), sequential=seq_state_dict + ) + replace_sharded_seq(mp_checkpoints=ckpts, mp_sharded_seq=sharded_seq) + to_save = ckpts + + elif len(ckpts) == 1 and len(layers) > 1: + # pp>0, mp==1 + to_save = shard_pp( + sequential=seq_state_dict, + mp_rank=0, + num_layers=hf_model.config.num_hidden_layers, + ) + + elif len(ckpts) > 1 and len(layers) > 1: + # pp>0, mp>1 + to_save = shard_pp_mp( + num_mp_ranks=len(ckpts), + sequential=seq_state_dict, + num_layers=hf_model.config.num_hidden_layers, + ) + + else: + raise NotImplementedError( + "Not implemented for len(ckpts)={} and len(layers)={}".format( + len(ckpts), len(layers) + ) + ) + + for k, v in to_save.items(): + print("saving {}...".format(os.path.join(output_dir, k))) + torch.save(v, os.path.join(ckpt_dir, k)) + + # copy the checkpoint to the output_dir + print("rm {}/*".format(output_dir)) + os.system("rm {}/*".format(output_dir)) + os.makedirs(output_dir, exist_ok=True) + print("cp {} {}".format(os.path.join(ckpt_dir, "*"), output_dir)) + os.system("cp {} {}".format(os.path.join(ckpt_dir, "*"), output_dir)) + + # set latest file within the output_dir + latest_file = os.path.join("/".join(output_dir.split("/")[:-1]), "latest") + os.system("rm " + latest_file) + with open(latest_file, "w") as f: + f.write(output_dir.split("/")[-1]) + + +def consume_neox_args2(args_parsed, overwrite_values=None): + """ + Deepspeed launcher needs to pass the arguments for `pretrain_gpt2.py` across to all machines. + + In order not to have any problems with different configs being mismatched across machines, we instead read the .yaml configuration file from the main rank, + then serialize the arguments to a dictionary, which the deepspeed launcher broadcasts to all machines (`--megatron_config`). + + We then instantiate a new NeoXArgs from the dictionary (`.from_dict`). This should ensure args are never inconsistent across machines. + """ + + with open(args_parsed.megatron_config) as jsonfile: + megatron_config = json.load(jsonfile) + if args_parsed.deepspeed_config is not None: + overwrite_values = NeoXArgs.set_up_autotuning( + args_parsed.deepspeed_config, overwrite_values + ) + if overwrite_values is not None: + megatron_config.update(overwrite_values) + return NeoXArgs.from_dict(args_dict=megatron_config) + + +def get_non_existing_dir(tmp_dir): + while os.path.exists(tmp_dir): + tmp_dir = os.path.join(tmp_dir, "tmp_dir") + return tmp_dir + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Convert a Hugging Face GPT-NeoX model back to a sequential model compatible with GPT-NeoX training." + ) + parser.add_argument( + "--revision", + type=int, + default=143000, + help="Revision or step of the Pythia model to convert.", + ) + parser.add_argument( + "--output-dir", + type=str, + help="Path to save the converted GPT-NeoX model checkpoint.", + ) + parser.add_argument( + "--config", + nargs="*", + default=[], + help="Path to the config file for the equivalent NeoX model.", + ) + parser.add_argument( + "--test", + action="store_true", + help="If set, will run a test to ensure the conversion was successful.", + ) + parser.add_argument( + "--download-only", + action="store_true", + help="If set, script will only download the model and not convert it.", + ) + + parser.add_argument( + "--ckpt-tmp-dir", + default="/tmp/ckpt_tmp_dir", + help="Directory to store cached hugging face checkpoints. [WARNING: MUST BE VISIBLE TO ALL RANKS]", + ) + parser.add_argument( + "--hf-model-name", + type=str, + help="Name of the hugging face model to download from EleutherAI/{hf-model-name}.}", + ) + + parser.add_argument( + "--cache-dir", + default="/gpfs/alpine/csc499/proj-shared/hf_checkpoints", + help="Directory to store cached hugging face checkpoints.", + ) + try: + if int(os.environ["WORLD_SIZE"]) > 1: + args = parser.parse_args(MULTI_GPU_ARGS.split(" ")) + else: + args = parser.parse_args() + except KeyError: + args = parser.parse_args() + + tmp_cache_dir = get_non_existing_dir(args.ckpt_tmp_dir) + + if args.download_only: + hf_model = GPTNeoXForCausalLM.from_pretrained( + f"EleutherAI/{args.hf_model_name}", + revision=f"step{args.revision}", + cache_dir=os.path.join( + args.cache_dir, f"{args.hf_model_name}/step{args.revision}" + ), + ).half() + exit(0) + else: + print("======================================================================") + print( + "Warning the following script will delete files within {}".format( + args.output_dir + ) + ) + print( + "Warning the following script will delete this directory {}".format( + tmp_cache_dir + ) + ) + print("======================================================================") + # time.sleep(5) + + if int(os.environ.get("OMPI_COMM_WORLD_SIZE", 1)) > 1: + neox_args = consume_neox_args2(args2) + else: + neox_args = NeoXArgs.from_ymls(args.config) + neox_args.configure_distributed_args() + neox_args.build_tokenizer() + neox_args.initialize_tensorboard_writer() + neox_args.comet() + + # setup logging and timers + # init_wandb(neox_args=neox_args) + # timers = Timers( + # use_wandb=neox_args.use_wandb, tensorboard_writer=neox_args.tensorboard_writer + # ) + initialize_megatron(neox_args=neox_args) + + torch.distributed.barrier() + + model = get_model(neox_args=neox_args, use_cache=True) + optimizer, param_groups = get_optimizer(model=model, neox_args=neox_args) + lr_scheduler = get_learning_rate_scheduler(optimizer=optimizer, neox_args=neox_args) + + model, optimizer, _, lr_scheduler = deepspeed.initialize( + model=model, + optimizer=optimizer, + # args=neox_args, + lr_scheduler=lr_scheduler, + dist_init_required=False, + model_parameters=None, + config_params=neox_args.deepspeed_config, + mpu=mpu, + ) + + if os.environ.get("OMPI_COMM_WORLD_RANK", "1") == "0": + os.makedirs(f"{tmp_cache_dir}", exist_ok=True) + + torch.distributed.barrier() + neox_args.save = tmp_cache_dir + + save_checkpoint( + neox_args=neox_args, + iteration=0, + model=model, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + ) + print(os.listdir(f"{tmp_cache_dir}")) + ckpt_dir = os.path.join(tmp_cache_dir, "global_step0") + + if torch.distributed.get_rank() == 0: + config = GPTNeoXConfig.from_pretrained( + f"EleutherAI/{args.hf_model_name}", + revision=f"step{args.revision}", + cache_dir=os.path.join( + args.cache_dir, f"{args.hf_model_name}/step{args.revision}" + ), + ) + # does not change the weights, but is needed to align logits + config.update({"hidden_act": "gelu_fast"}) + hf_model = GPTNeoXForCausalLM.from_pretrained( + f"EleutherAI/{args.hf_model_name}", + revision=f"step{args.revision}", + config=config, + cache_dir=os.path.join( + args.cache_dir, f"{args.hf_model_name}/step{args.revision}" + ), + ).half() + print("==========================================") + print("Loaded Hugging Face model successfully!") + print("==========================================") + convert(hf_model, ckpt_dir=ckpt_dir, output_dir=args.output_dir) + + if os.environ.get("OMPI_COMM_WORLD_RANK", "1") == "0": + # cleanup temp dir + os.system(f"rm -r {tmp_cache_dir}") + + torch.distributed.barrier() + + # verify the conversion can be loaded + neox_args.load = "/".join(args.output_dir.split("/")[:-1]) + print(neox_args.load) + neox_args.finetune = True + load_checkpoint( + neox_args=neox_args, + model=model, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + iteration=None, + ) + print("==========================================") + print("Converted checkpoint successfully loaded!") + print("==========================================") + + if args.test and torch.distributed.get_world_size() == 1: + # only implemented for world size 1 + + with torch.no_grad(): + # torch.backends.cudnn.benchmark = False + # torch.use_deterministic_algorithms(True) #setting the CUBLAS_WORKSPACE_CONFIG=:4096:8 environment variable is required for this to work (tested for A6000) + model.eval() + hf_model.eval() + + b = 10 + seq_len = 32 + inputs = torch.randint(0, 50304, (b, seq_len), dtype=torch.long).cuda() + mask = ( + (torch.triu(torch.ones(seq_len, seq_len)) != 1).transpose(0, 1).cuda() + ) + pos_ids = torch.arange(0, seq_len).unsqueeze(0).cuda() + + torch.manual_seed(0) + outputs_neox = model.cuda()( + (inputs, pos_ids, mask.unsqueeze(0).unsqueeze(0)), neox_args=neox_args + ) + + torch.manual_seed(0) + outputs = hf_model.cuda()(input_ids=inputs) + + print("HF logits .sum(): ", outputs.logits.to(torch.float32).sum()) + print("NeoX logits .sum(): ", outputs_neox.to(torch.float32).sum()) + + print( + "\nLogit comparison summary for {} sequences of length {}:".format( + b, seq_len + ) + ) + print("=============================================================") + for i in range(b): + abs_diff = ( + outputs.logits[i, ...].to(torch.float32) + - outputs_neox[i, ...].to(torch.float32) + ).abs() + print( + "[Random sequence {}] (hflogits - neoxlogits).abs() -- mean: {:.5f}\tmax: {:.5f}\tmin: {:.5f}\tmedian: {:.5f}".format( + i, + abs_diff.mean(), + abs_diff.max(), + abs_diff.min(), + abs_diff.median(), + ) + ) + + elif args.test: + print( + "[INFO] Checkpoint conversion logit test not implemented for distributed world_size > 1. Current world_size: {}".format( + torch.distributed.get_world_size() + ) + ) diff --git a/tools/ckpts/convert_neox_to_hf.py b/tools/ckpts/convert_neox_to_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..8dfe02d54581e64c918b83162b5a10271b4933f2 --- /dev/null +++ b/tools/ckpts/convert_neox_to_hf.py @@ -0,0 +1,906 @@ +# Copyright (c) 2023, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +import yaml +import argparse +from tqdm import tqdm + +import torch +from transformers import ( + MistralConfig, + LlamaConfig, + GPTNeoXConfig, + AutoModelForCausalLM, + AutoConfig, + AutoModelForSequenceClassification, +) + +from typing import List, Literal + +sys.path.append( + os.path.abspath( + os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir) + ) +) +from megatron.tokenizer import build_tokenizer + + +""" +A script for converting saved NeoX Checkpoints to Huggingface (HF) compatible GPT-NeoX type models. + +Note that this script does not support all NeoX features. +Please investigate carefully whether your model is compatible with all architectures supported by the GPTNeoXForCausalLM class in HF. + +(e.g. position embeddings such as AliBi may not be supported by Huggingface's GPT-NeoX architecture). +""" + + +# Model definitions: a list of keys, and where they fall in terms of handling them in the presence of TP. +# in format : {model arch: {param type: {param in neox: param in HF}}} +MODEL_KEYS = { + "neox": { + "new": { + "COLUMN_PARALLEL_LINEAR_KEYS": { + "mlp.linear1.weight": "mlp.dense_h_to_4h.weight", + "mlp.linear1.bias": "mlp.dense_h_to_4h.bias", + "attention.query_key_value.weight": "attention.query_key_value.weight", + "attention.query_key_value.bias": "attention.query_key_value.bias", # TODO: handle GQA separately? + }, + "ROW_PARALLEL_LINEAR_KEYS": { + "attention.dense.weight": "attention.dense.weight", + "mlp.linear2.weight": "mlp.dense_4h_to_h.weight", + }, + "ROW_PARALLEL_BIAS_KEYS": { + "mlp.linear2.bias": "mlp.dense_4h_to_h.bias", + "attention.dense.bias": "attention.dense.bias", + }, + "NORM_KEYS": { + "input_layernorm.weight": "input_layernorm.weight", + "input_layernorm.bias": "input_layernorm.bias", + "post_attention_layernorm.weight": "post_attention_layernorm.weight", + "post_attention_layernorm.bias": "post_attention_layernorm.bias", + }, + "FINAL_NORM_KEYS": { + "norm.weight": "weight", + "norm.bias": "bias", + }, + }, + "legacy": { + "COLUMN_PARALLEL_LINEAR_KEYS": { + "mlp.dense_h_to_4h.weight": "mlp.dense_h_to_4h.weight", + "mlp.dense_h_to_4h.bias": "mlp.dense_h_to_4h.bias", + "attention.query_key_value.weight": "attention.query_key_value.weight", + "attention.query_key_value.bias": "attention.query_key_value.bias", # TODO: handle GQA separately? + }, + "ROW_PARALLEL_LINEAR_KEYS": { + "attention.dense.weight": "attention.dense.weight", + "mlp.dense_4h_to_h.weight": "mlp.dense_4h_to_h.weight", + }, + "ROW_PARALLEL_BIAS_KEYS": { + "mlp.dense_4h_to_h.bias": "mlp.dense_4h_to_h.bias", + "attention.dense.bias": "attention.dense.bias", + }, + "NORM_KEYS": { + "input_layernorm.weight": "input_layernorm.weight", + "input_layernorm.bias": "input_layernorm.bias", + "post_attention_layernorm.weight": "post_attention_layernorm.weight", + "post_attention_layernorm.bias": "post_attention_layernorm.bias", + }, + "FINAL_NORM_KEYS": { + "norm.weight": "weight", + "norm.bias": "bias", + }, + }, + }, + "llama": { + "new": { + "COLUMN_PARALLEL_LINEAR_KEYS": { + "mlp.linear1.weight": ["mlp.up_proj.weight", "mlp.gate_proj.weight"] + }, + "ROW_PARALLEL_LINEAR_KEYS": { + "attention.dense.weight": "self_attn.o_proj.weight", + "mlp.linear2.weight": "mlp.down_proj.weight", + }, + "ROW_PARALLEL_BIAS_KEYS": {}, # No biases in RowParallelLinear layers + "NORM_KEYS": { + "input_layernorm.scale": "input_layernorm.weight", + "post_attention_layernorm.scale": "post_attention_layernorm.weight", + }, + "FINAL_NORM_KEYS": { + "norm.scale": "weight", + }, + "GQA_QKV_KEYS": { # because Llama can have Grouped Query Attention and has separate Q, K, and V linear proj params, handle them separately. + "attention.query_key_value.weight": [ + "self_attn.q_proj.weight", + "self_attn.k_proj.weight", + "self_attn.v_proj.weight", + ], + }, + }, + "legacy": { + "COLUMN_PARALLEL_LINEAR_KEYS": { + "mlp.w1.weight": "mlp.gate_proj.weight", + "mlp.w3.weight": "mlp.up_proj.weight", + }, + "ROW_PARALLEL_LINEAR_KEYS": { + "attention.dense.weight": "self_attn.o_proj.weight", + "mlp.w2.weight": "mlp.down_proj.weight", + }, + "ROW_PARALLEL_BIAS_KEYS": {}, # No biases in RowParallelLinear layers + "NORM_KEYS": { + "input_layernorm.scale": "input_layernorm.weight", + "post_attention_layernorm.scale": "post_attention_layernorm.weight", + }, + "FINAL_NORM_KEYS": { + "norm.scale": "weight", + }, + "GQA_QKV_KEYS": { # because Llama can have Grouped Query Attention and has separate Q, K, and V linear proj params, handle them separately. + "attention.query_key_value.weight": [ + "self_attn.q_proj.weight", + "self_attn.k_proj.weight", + "self_attn.v_proj.weight", + ], + }, + }, + }, +} + +MODEL_KEYS["mistral"] = MODEL_KEYS["llama"] + + +def load_partitions( + input_checkpoint_path: str, mp_partitions: int, layer_idx: int, sequential: bool +) -> List[torch.Tensor]: + """Returns a list containing all states from a model (across MP partitions)""" + + if sequential: + filename_format = f"mp_rank_{{i:02}}_model_states.pt" + else: + filename_format = f"layer_{layer_idx:02}-model_{{i:02}}-model_states.pt" + + loaded_tp_ranks = [ + torch.load( + os.path.join( + input_checkpoint_path, + filename_format.format(i=i), + ), + map_location=torch.device("cuda" if torch.cuda.is_available() else "cpu"), + ) + for i in range(mp_partitions) + ] + + return loaded_tp_ranks + + +def get_state( + state_dicts: List[torch.Tensor], key: str, layer_idx: int, sequential: bool +) -> torch.Tensor: + """Helper that returns a list containing a given weight's state from each MP partition, for a given layer in the model.""" + + if sequential: + # use the correct key into the sequential dict for given weight/provided key + key = f"sequential.{layer_idx}.{key}" + + return [state_dict["module"][key] for state_dict in state_dicts] + else: + # For the PipelineModule case, we don't need any key / module prefix. just grab this weight value. + # layer_idx is also ignored because we've loaded only this layer's weights, ahead of time. + key = key + + return [state_dict[key] for state_dict in state_dicts] + + +def get_key(loaded_config, key, default=None): + """ + Search for a given key in a NeoX yaml. normalizes underscores -> hyphens + """ + key = key.replace("_", "-") + try: + return loaded_config[key] + except KeyError: + key = key.replace("-", "_") + try: + return loaded_config[key] + except KeyError: + return default + + +def create_config(neox_config, architecture="neox", is_rm=False, pad_token_id=-1): + """take in a loaded yaml from NeoX and assign relevant values to HF config. + Returns: GPTNeoXConfig() object + """ + + def gated_size(hidden_dim): + # takes in a hidden dim and calculates intermediate dim of a LLaMAParallelMLP. + # (only used if intermediate_size not specified in config) + # hidden-size * 8 / 3 , rounded up to nearest multiple of 256 + ff_dim = int(2 * hidden_dim * 4 / 3) + ff_dim = 256 * ((ff_dim + 256 - 1) // 256) + return ff_dim + + class TokenizerArgs: + # kinda hacky. + # this is to get something with the same interface as is used in build_tokenizer() + # without diving into loading a neox_args object or using argparse etc. + def __init__(self, neox_config): + self.make_vocab_size_divisible_by = get_key( + neox_config, "make-vocab-size-divisible-by", default=128 + ) + self.model_parallel_size = get_key(neox_config, "model-parallel-size") + self.vocab_file = get_key(neox_config, "vocab-file") + self.merge_file = get_key(neox_config, "merge-file") + self.tokenizer_type = get_key(neox_config, "tokenizer-type") + + self.rank = 0 + + args = TokenizerArgs(neox_config) + tokenizer = build_tokenizer(args) + try: # GPT2TokenizerFast raises NotImplementedError + pad_token = tokenizer.pad + except: + pad_token = ( + 1 # pad defaulting to 1. follows convention from GPT-NeoX-20b tokenizer + ) + + # TODO: change the default value here based on discussion regarding `gpt_j_tied` config parameter's default + use_tied_lns = get_key(neox_config, "gpt-j-tied", False) + + if use_tied_lns: + raise NotImplementedError( + """ERROR: Huggingface Transformers does not yet support a single shared layernorm + per transformer block for GPT-NeoX models trained w/ GPT-J parallel residuals. + See https://github.com/EleutherAI/gpt-neox/pull/481 for further details.""" + ) + + # set all config values. + + # shared config parameters. + args = { + "vocab_size": args.padded_vocab_size, + "hidden_size": get_key(neox_config, "hidden-size"), + "num_hidden_layers": get_key(neox_config, "num-layers"), + "num_attention_heads": get_key(neox_config, "num-attention-heads"), + "max_position_embeddings": get_key(neox_config, "max-position-embeddings"), + "initializer_range": get_key(neox_config, "init-method-std", 0.02), + "tie_word_embeddings": (not get_key(neox_config, "no-weight-tying", False)), + "use_cache": True, + } + if architecture == "mistral" or architecture == "llama": + args.update( + { + "intermediate_size": get_key( + neox_config, + "intermediate-size", + gated_size(get_key(neox_config, "hidden-size")), + ), + "num_key_value_heads": get_key( + neox_config, + "num-kv-heads", + get_key(neox_config, "num-attention-heads"), + ), + "hidden_act": get_key( + neox_config, "activation", default="silu" + ).replace("swiglu", "silu"), + "rms_norm_eps": get_key(neox_config, "rms-norm-epsilon", 1.0e-6), + "bos_token_id": tokenizer.eod, + "eos_token_id": tokenizer.eod, + "rope_theta": get_key(neox_config, "rotary-emb-base", 10000.0), + } + ) + + if architecture == "mistral": + # mistral-specific options + args.update( + { + "sliding_window": get_key( + neox_config, "sliding-window-width", 4096 + ), + } + ) + hf_config = MistralConfig(**args) + elif architecture == "llama": + # llama-specific options + args.update( + { + # NeoX library defaults to using bias in attention + "attention_bias": get_key( + neox_config, "use_bias_in_attn_linear", True + ), + } + ) + hf_config = LlamaConfig(**args) + else: + # GPT-NeoX HF model class-specific options + args.update( + { + "rotary_pct": get_key(neox_config, "rotary-pct", default=1.0), + "rotary_emb_base": get_key( + neox_config, "rotary-emb-base", default=1000.0 + ), + "use_parallel_residual": get_key(neox_config, "gpt-j-residual", False), + "layer_norm_eps": get_key(neox_config, "layernorm-epsilon", 1e-5), + "intermediate_size": get_key( + neox_config, + "intermediate-size", + 4 * get_key(neox_config, "hidden-size"), + ), + } + ) + hf_config = GPTNeoXConfig(**args) + if is_rm: + hf_config.num_labels = 1 + hf_config.pad_token_id = pad_token_id + + return hf_config + + +def reshard_and_split_qkv( + param_mapping: dict, # a dictionary mapping the QKV weight keys in GPT-NeoX -> a list of keys representing the Q, K, and V weight keys the HF model will use + hf_config: AutoConfig, # a HF model config for the model + loaded_tp_ranks: List[torch.Tensor], + layer_idx: int, + sequential: bool, +): + """ + A helper function which performs reshaping and sharding to make the QKV projection from NeoX compatible with HF Llama models, + even when grouped-query attention is required. + """ + for key, hf_keys in param_mapping.items(): + assert ( + isinstance(hf_keys, list) and len(hf_keys) == 3 + ), "Must map QKV to precisely 3 resulting weight matrices." + + for key, hf_keys in param_mapping.items(): + # we first merge the QKV proj. across TP ranks + sharded_qkv = torch.stack( + get_state(loaded_tp_ranks, key, layer_idx, sequential), dim=0 + ) + # should now have shape [TP_SIZE, (hidden_size + 2 * kv_hidden_size) / TP_SIZE, hidden_size]. + + sharded_qkv = sharded_qkv.view( + len(loaded_tp_ranks), + hf_config.num_attention_heads // len(loaded_tp_ranks), + int( + hf_config.hidden_size + // hf_config.num_attention_heads + * ( + 1 + + 2 * hf_config.num_key_value_heads / hf_config.num_attention_heads + ) + ), + hf_config.hidden_size, + ) # is meant to convert to shape [TP_SIZE, NUM_QUERY_HEADS_PER_SHARD, dims_per_head * (1 + 2 * kv-to-q head ratio), hidden_size] + + q, k, v = torch.split( + sharded_qkv, + [ + hf_config.hidden_size // hf_config.num_attention_heads, + int( + (hf_config.num_key_value_heads / hf_config.num_attention_heads) + * hf_config.hidden_size + // hf_config.num_attention_heads + ), + int( + (hf_config.num_key_value_heads / hf_config.num_attention_heads) + * hf_config.hidden_size + // hf_config.num_attention_heads + ), + ], + dim=2, + ) + # splits along the (dims_per_head * (1 + 2 * kv-to-q head ratio)_ dim to get 3 tensors: + # 1 x [TP_SIZE, NUM_Q_HEADS_PER_SHARD, dims_per_head, hidden_size] and 2 x [TP_SIZE, NUM_Q_HEADS_PER_SHARD, (dims_per_head / kv-to-q head ratio), hidden_size] + # these are the Q, and K, V tensors respectively. + + # we have to do additional reshape for each individual tensor now, + # into the expected square (or smaller than square, for K/V tensors) shape + q, k, v = q.squeeze(dim=2), k.squeeze(dim=2), v.squeeze(dim=2) + q = q.view( + hf_config.num_attention_heads, + hf_config.hidden_size // hf_config.num_attention_heads, + hf_config.hidden_size, + ).reshape(hf_config.hidden_size, hf_config.hidden_size) + k = k.reshape( + hf_config.num_key_value_heads, + hf_config.hidden_size // hf_config.num_attention_heads, + hf_config.hidden_size, + ).reshape( + hf_config.hidden_size + // hf_config.num_attention_heads + * hf_config.num_key_value_heads, + hf_config.hidden_size, + ) + v = v.reshape( + hf_config.num_key_value_heads, + hf_config.hidden_size // hf_config.num_attention_heads, + hf_config.hidden_size, + ).reshape( + hf_config.hidden_size + // hf_config.num_attention_heads + * hf_config.num_key_value_heads, + hf_config.hidden_size, + ) + + # return these + state_dict = {} + for hf_key, proj in zip(hf_keys, [q, k, v]): + state_dict[hf_key] = proj.clone() + return state_dict + + +def get_mlp_naming_convention(loaded_tp_ranks, layer_idx, sequential): + """Determine whether the checkpoint uses the legacy or new MLP naming convention.""" + print(list(loaded_tp_ranks[0]["module"].keys())) + if any( + [ + ["mlp.linear1.weight" in key for key in list(state_dict["module"].keys())] + for state_dict in loaded_tp_ranks + ] + ): + return "new" + elif any( + [ + [ + "mlp.dense_h_to_4h.weight" in key + for key in list(state_dict["module"].keys()) + ] + for state_dict in loaded_tp_ranks + ] + ): + return "legacy" + else: + raise ValueError("Unable to determine MLP naming convention in checkpoint") + + +def convert( + input_checkpoint_path, + loaded_config, + output_checkpoint_path, + sequential: bool = True, + precision: Literal["auto", "fp16", "bf16", "fp32"] = "auto", + architecture: Literal["neox", "llama", "mistral"] = "neox", + is_rm: bool = False, + pad_token_id: int = -1, +): + """convert a NeoX checkpoint to a HF model format. + should perform model-parallel merging correctly + but only supports features allowed by HF GPT-NeoX implementation (e.g. rotary embeddings) + """ + + ARCH = MODEL_KEYS[architecture] + + hf_config = create_config( + loaded_config, architecture=architecture, is_rm=is_rm, pad_token_id=pad_token_id + ) + + if not is_rm: + hf_model = AutoModelForCausalLM.from_config(hf_config) + else: + hf_model = AutoModelForSequenceClassification.from_config(hf_config) + + if architecture == "neox": + hf_transformer = hf_model.gpt_neox + else: + hf_transformer = hf_model.model + + if precision == "auto": + print("Auto-detecting precision to save model into...") + # save model in FP16 if Deepspeed fp16 was used in config, else 32 bit + fp16 = get_key(loaded_config, "fp16") + + if fp16: + try: + # current behavior is to pass "fp16": {"enabled": true}, when using upstream Deepspeed + if fp16["enabled"]: + hf_model.half() + print("Saving weights in fp16 precision...") + except: + try: + # attempt to access bf16 dict in yaml file, if fp16 not enabled + bf16 = get_key(loaded_config, "bf16") + if bf16: + hf_model.to(dtype=torch.bfloat16) + print("Saving weights in bf16 precision...") + except: + hf_model.to(dtype=torch.float) + print( + "Model not trained in fp16 / bf16 mixed precision, saving weights in fp32..." + ) + else: + name_to_dtype = { + "bf16": torch.bfloat16, + "fp16": torch.float16, + "fp32": torch.float, + } + print(f"Saving model into specified {precision} precision...") + hf_model.to(dtype=name_to_dtype[precision]) + + mp_partitions = get_key(loaded_config, "model-parallel-size") + + # Sequential saves all model states from an MP rank in one file. + # so we only load the MP ranks only once and index into them with get_state(). + # for the pipeline-parallel case (pipeline-parallel-size >= 1), + # we must load the correct layer's states at each step. + # (this does mean that less memory is required for PP conversion.) + loaded_tp_ranks = load_partitions( + input_checkpoint_path, mp_partitions, layer_idx=0, sequential=sequential + ) + + ### Embedding layer ### + # Embedding is layer idx 0 + if architecture == "neox": + embed_in = hf_transformer.embed_in + else: + embed_in = hf_transformer.embed_tokens + embed_in.load_state_dict( # TODO: embed_in is not always model's name for embedding + { + "weight": torch.cat( + get_state( + loaded_tp_ranks, + "word_embeddings.weight", + layer_idx=0, + sequential=sequential, + ), + dim=0, + ) + } + ) + assert ( + hf_config.vocab_size == embed_in.weight.shape[0] + ), f"ERROR: calculated vocab size {hf_config.vocab_size} != embed param size {embed_in.shape[0]}" + ### End Embedding Layer ### + + # grab from 3rd layer to pass embeddings + mlp_naming = get_mlp_naming_convention( + load_partitions( + input_checkpoint_path, + mp_partitions, + layer_idx=3, + sequential=sequential, + ), + 0, + sequential, + ) + print(f"Detected MLP naming convention: {mlp_naming}") + ARCH = ARCH[mlp_naming] + + for layer_i in tqdm(range(get_key(loaded_config, "num-layers"))): + + # get layer from hf model + hf_layer = hf_transformer.layers[layer_i] # TODO: model module names + + if not sequential: + # in the non-sequential case, must load from each layer individually. + # use layer index + 2 bc of embed layer and a dummy _pre_transformer_block, which are "layers 0 and 1" + loaded_tp_ranks = load_partitions( + input_checkpoint_path, + mp_partitions, + layer_idx=layer_i + 2, + sequential=sequential, + ) + + # + 2 bc of embed layer and a dummy _pre_transformer_block + state_dict = {} + for key, hf_key in ARCH["ROW_PARALLEL_LINEAR_KEYS"].items(): + state_dict[hf_key] = torch.cat( + get_state( + loaded_tp_ranks, key, layer_idx=layer_i + 2, sequential=sequential + ), + dim=1, + ) + + # average layernorm stats over mp ranks + for key, hf_key in ARCH["NORM_KEYS"].items(): + state_dict[hf_key] = sum( + get_state( + loaded_tp_ranks, key, layer_idx=layer_i + 2, sequential=sequential + ) + ) / len(loaded_tp_ranks) + + # LinearWithTPMerge + for key, hf_key in ARCH["COLUMN_PARALLEL_LINEAR_KEYS"].items(): + if type(hf_key) == list: + # Llama magic - split the weight into two parts for the gate and up proj + states = [ + torch.chunk(state, chunks=2, dim=0) + for state in get_state( + loaded_tp_ranks, + key, + layer_idx=layer_i + 2, + sequential=sequential, + ) + ] + # Set up proj... + state_dict[hf_key[0]] = torch.cat([state[0] for state in states], dim=0) + # Set gate proj... + state_dict[hf_key[1]] = torch.cat([state[1] for state in states], dim=0) + else: + state_dict[hf_key] = torch.cat( + get_state( + loaded_tp_ranks, + key, + layer_idx=layer_i + 2, + sequential=sequential, + ), + dim=0, + ) + + # LinearWithTPSplitBias + for key, hf_key in ARCH["ROW_PARALLEL_BIAS_KEYS"].items(): + state_dict[hf_key] = sum( + get_state( + loaded_tp_ranks, key, layer_idx=layer_i + 2, sequential=sequential + ) + ) + + # Just take one + if "attention.bias" in hf_layer.state_dict(): + state_dict["attention.bias"] = hf_layer.state_dict()["attention.bias"] + if "attention.masked_bias" in hf_layer.state_dict(): + state_dict["attention.masked_bias"] = hf_layer.state_dict()[ + "attention.masked_bias" + ] + + # some architectures, like Mistral and Llama, have the following which must be handled specially: + # - Q, K, V projections are performed separately, so we must split apart GPT-NeoX library's single QKV proj + # - Support for Grouped-Query Attention, meaning the Q and the K, V projections may not be the same size + if "GQA_QKV_KEYS" in ARCH: + state_dict.update( + reshard_and_split_qkv( + param_mapping=ARCH["GQA_QKV_KEYS"], + hf_config=hf_config, + loaded_tp_ranks=loaded_tp_ranks, + layer_idx=layer_i + 2, + sequential=sequential, + ) + ) + # load state_dict into layer + hf_layer.load_state_dict(state_dict) + + if not sequential: + loaded_tp_ranks = load_partitions( + input_checkpoint_path, + mp_partitions, + get_key(loaded_config, "num-layers") + 3, + sequential=sequential, + ) + # Load final layer norm + norm_state_dict = {} + for key, hf_key in ARCH["FINAL_NORM_KEYS"].items(): + norm_state_dict[hf_key] = sum( + get_state( + loaded_tp_ranks, + key, + layer_idx=get_key(loaded_config, "num-layers") + 3, + sequential=sequential, + ) + ) / len(loaded_tp_ranks) + + if architecture == "neox": + final_layer_norm = hf_transformer.final_layer_norm + else: + final_layer_norm = hf_transformer.norm + + final_layer_norm.load_state_dict(norm_state_dict) + + # Load output embedding + if not sequential: + if get_key(loaded_config, "no-weight-tying", False): + # if we have trained input + output embedding layers without tied weights + loaded_tp_ranks = load_partitions( + input_checkpoint_path, + mp_partitions, + get_key(loaded_config, "num-layers") + 4, + sequential=sequential, + ) + else: + # in this case, output embedding layer and input embedding layer are tied. + # load + save the input embed weights into the output embedding layer's place. + loaded_tp_ranks = load_partitions( + input_checkpoint_path, + mp_partitions, + layer_idx=0, + sequential=sequential, + ) + # output embedding / LM head + if not is_rm: + if architecture == "neox": # name of lm head / final linear proj varies + lm_head = hf_model.embed_out + else: + lm_head = hf_model.lm_head + else: + lm_head = hf_model.score + + if get_key(loaded_config, "no-weight-tying", False): + # save the (untied) final linear into LM head for HF + lm_head.load_state_dict( + { + "weight": torch.cat( + get_state( + loaded_tp_ranks, + "final_linear.weight" if not is_rm else "rm_linear.weight", + layer_idx=get_key(loaded_config, "num-layers") + 4, + sequential=sequential, + ), + dim=0 if not is_rm else 1, + ), + } + ) + else: + # don't need to worry about rm here since you can't really tie them... + + # embedding layers are tied. transpose input layer and save + lm_head.load_state_dict( + { + "weight": torch.cat( + get_state( + loaded_tp_ranks, + "word_embeddings.weight", + layer_idx=0, + sequential=sequential, + ), + dim=0, + ), + } + ) + + del loaded_tp_ranks + + return hf_model + + +def main(input_args=None, overwrite_values=None): + from huggingface_hub import create_repo, HfApi + + parser = argparse.ArgumentParser( + description="Merge MP partitions and convert to HF Model." + ) + parser.add_argument( + "--input_dir", + type=str, + help="Path to NeoX checkpoint, e.g. /path/to/model/global_step143000", + ) + parser.add_argument( + "--config_file", + type=str, + help="Path to config file for the input NeoX checkpoint.", + ) + parser.add_argument( + "--output_dir", + type=str, + help="Output dir, where to save the HF Model, tokenizer, and configs", + ) + parser.add_argument( + "--precision", + type=str, + default="auto", + help="What precision to save the model into. Defaults to auto, which auto-detects which 16-bit dtype to save into, or falls back to fp32.", + ) + parser.add_argument( + "--no_save_tokenizer", + action="store_true", + help="Whether to skip saving the tokenizer alongside a model.", + ) + parser.add_argument( + "--vocab-is-hf-tokenizer", + action="store_true", + help="Whether the vocab file is in a Huggingface tokenizer path.", + ) + parser.add_argument( + "--pad-token-id", + type=int, + default=-1, + help="Pad token id to set in tokenizer. Required for RM style models.", + ) + parser.add_argument( + "--architecture", + type=str, + default="neox", + help="What HF model class type to export into.", + ) + args = parser.parse_args(input_args) + + # validate arguments + assert args.precision in [ + "auto", + "fp16", + "bf16", + "fp32", + ], f"expected --precision to be one of 'auto', 'fp16', 'bf16', 'fp32' but got '{args.precision}' !" + assert args.architecture in [ + "neox", + "llama", + "mistral", + ], f"expected --architecture to be one of 'neox', 'mistral', 'llama', but got '{args.architecture}' !" + + with open(args.config_file) as f: + loaded_config = yaml.full_load(f) + if overwrite_values: + loaded_config.update(overwrite_values) + + # Determine the checkpoint format of the model. + # DeepSpeed saves models wrapped in a PipelineModule differently from those not. + # PipelineModule models are saved as per-layer state dicts per TP shard, + # while Sequential model state dicts are saved all together in one mp_rank_xx_model_states.pt + # file per tensor/model parallel shard. + pipeline_world_size = get_key(loaded_config, "pipe-parallel-size", 1) + is_rm = get_key(loaded_config, "train_impl", "normal") == "rm" + if is_rm and args.pad_token_id == -1: + raise ValueError("RM models require a pad token id to be set.") + if pipeline_world_size == 0: + sequential = True + print( + f"Detected 'pipe-parallel-size' of {pipeline_world_size}, assuming model is saved as Sequential..." + ) + else: + sequential = False + print( + f"Detected 'pipe-parallel-size' of {pipeline_world_size}, assuming model is saved as PipelineModule..." + ) + + # convert the model to HF. + hf_model = convert( + args.input_dir, + loaded_config, + args.output_dir, + sequential=sequential, + architecture=args.architecture, + is_rm=is_rm, + pad_token_id=args.pad_token_id, + ) + + # Save to disk. + hf_model.save_pretrained(args.output_dir) + + if not args.no_save_tokenizer: + # save tokenizer to directory as well, for easy loading of model as a HF model. + tokenizer_type = get_key(loaded_config, "tokenizer-type") + if args.vocab_is_hf_tokenizer: + from transformers import AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained( + os.path.dirname(get_key(loaded_config, "vocab-file")) + ) + if args.pad_token_id != -1: + tokenizer.pad_token_id = args.pad_token_id + print("loaded tokenizer: ", tokenizer) + tokenizer.save_pretrained(args.output_dir) + print("tokenizer saved!") + elif tokenizer_type == "HFTokenizer": # TODO: handle sentencepiece tokenizers? + print(f"saving tokenizer from file {get_key(loaded_config, 'vocab-file')}") + print( + "Warning: please check that your model config and tokenizer end with the correct special tokens (EOS, BOS)." + ) + from transformers import PreTrainedTokenizerFast + + tokenizer = PreTrainedTokenizerFast( + tokenizer_file=get_key(loaded_config, "vocab-file") + ) + if args.pad_token_id != -1: + tokenizer.pad_token_id = args.pad_token_id + print("loaded tokenizer: ", tokenizer) + tokenizer.save_pretrained(args.output_dir) + print("tokenizer saved!") + + +if __name__ == "__main__": + + # before running script: + # `pip install --upgrade transformers` + # `huggingface-cli login` + # + main() diff --git a/tools/ckpts/convert_neox_to_mamba_ssm.py b/tools/ckpts/convert_neox_to_mamba_ssm.py new file mode 100644 index 0000000000000000000000000000000000000000..f87b369540c65008e14f6977728a353c830aba86 --- /dev/null +++ b/tools/ckpts/convert_neox_to_mamba_ssm.py @@ -0,0 +1,361 @@ +import torch + +from convert_neox_to_hf import load_partitions, get_key, get_state + +from mamba_ssm.models.config_mamba import MambaConfig +from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel + + +import argparse +from typing import Literal +import yaml +from tqdm import tqdm + +import os +import sys + +sys.path.append( + os.path.abspath( + os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir) + ) +) +from megatron.tokenizer import build_tokenizer + +""" +Conversion utility for converting a Mamba model +trained in GPT-NeoX into the mamba_ssm package ckpt format. +""" +ARCH = { + "COLUMN_PARALLEL_LINEAR_KEYS": { + # these require concat across dim=0 + "mixer.in_proj.weight": "mixer.in_proj.weight", + # "mixer.in_proj.bias": "mixer.in_proj.bias", + "mixer.A_log": "mixer.A_log", + "mixer.D": "mixer.D", + "mixer.conv1d.weight": "mixer.conv1d.weight", + "mixer.conv1d.bias": "mixer.conv1d.bias", + "mixer.dt_proj.weight": "mixer.dt_proj.weight", + "mixer.dt_proj.bias": "mixer.dt_proj.bias", + }, + "ROW_PARALLEL_LINEAR_KEYS": { + # these require concat across dim=1 + "mixer.out_proj.weight": "mixer.out_proj.weight", + "mixer.x_proj.weight": "mixer.x_proj.weight", + }, + "ROW_PARALLEL_BIAS_KEYS": { + # these require summing across ranks + # "mixer.x_proj.bias": "mixer.x_proj.bias", + # "mixer.out_proj.bias": "mixer.out_proj.bias", + }, + "NORM_KEYS": { + "norm.scale": "norm.weight", + # "norm.bias": "norm.bias", + }, + "FINAL_NORM_KEYS": { + "norm.scale": "weight", + # "norm.bias": "bias", + }, +} + + +def create_config(neox_config): + class TokenizerArgs: + # kinda hacky. + # this is to get something with the same interface as is used in build_tokenizer() + # without diving into loading a neox_args object or using argparse etc. + def __init__(self, neox_config): + self.make_vocab_size_divisible_by = get_key( + neox_config, "make-vocab-size-divisible-by", default=128 + ) + self.model_parallel_size = get_key(neox_config, "model-parallel-size") + self.vocab_file = get_key(neox_config, "vocab-file") + self.merge_file = get_key(neox_config, "merge-file") + self.tokenizer_type = get_key(neox_config, "tokenizer-type") + + self.rank = 0 + + args = TokenizerArgs(neox_config) + tokenizer = build_tokenizer(args) + try: # GPT2TokenizerFast raises NotImplementedError + pad_token = tokenizer.pad + except: + pad_token = ( + 1 # pad defaulting to 1. follows convention from GPT-NeoX-20b tokenizer + ) + norm_type = get_key(neox_config, "norm", "layernorm") + if norm_type == "rmsnorm": + use_rms_norm = True + else: + assert ( + norm_type == "layernorm" + ), "only layernorm or rmsnorm supported by mamba_ssm!" + use_rms_norm = False + return MambaConfig( + d_model=get_key(neox_config, "hidden_size"), + n_layer=get_key(neox_config, "num_layers"), + vocab_size=args.padded_vocab_size, + rms_norm=use_rms_norm, + residual_in_fp32=False, + fused_add_norm=True, + # shouldn't really matter? we didn't train with it but should be equiv. + # it's faster though + # pad_vocab_size_multiple_of=get_key(neox_config, "make_vocab_size_divisible_by", 128), + tie_embeddings=not get_key( + neox_config, "no_weight_tying", False + ), # requires newer mamba_ssm>=1.2.0.post1 + ) + + +def convert( + input_checkpoint_path, + loaded_config, + output_checkpoint_path, + sequential: bool = True, + precision: Literal["auto", "fp16", "bf16", "fp32"] = "auto", +): + + mamba_config = create_config(loaded_config) + + if precision == "auto": + print("Auto-detecting precision to save model into...") + # save model in FP16 if Deepspeed fp16 was used in config, else 32 bit + fp16 = get_key(loaded_config, "fp16") + + if fp16: + try: + # current behavior is to pass "fp16": {"enabled": true}, when using upstream Deepspeed + if fp16["enabled"]: + dtype = torch.float16 + print("Saving weights in fp16 precision...") + except: + try: + # attempt to access bf16 dict in yaml file, if fp16 not enabled + bf16 = get_key(loaded_config, "bf16") + if bf16: + dtype = torch.bfloat16 + print("Saving weights in bf16 precision...") + except: + dtype = torch.float + print( + "Model not trained in fp16 / bf16 mixed precision, saving weights in fp32..." + ) + else: + name_to_dtype = { + "bf16": torch.bfloat16, + "fp16": torch.float16, + "fp32": torch.float, + } + print(f"Saving model into specified {precision} precision...") + dtype = name_to_dtype[precision] + + mamba_model = MambaLMHeadModel( + config=mamba_config, + device="cuda" if torch.cuda.is_available() else "cpu", + dtype=torch.float, + ) # dtype) + + mp_partitions = get_key(loaded_config, "model-parallel-size") + + # Sequential saves all model states from an MP rank in one file. + # so we only load the MP ranks only once and index into them with get_state(). + # for the pipeline-parallel case (pipeline-parallel-size >= 1), + # we must load the correct layer's states at each step. + # (this does mean that less memory is required for PP conversion.) + loaded_tp_ranks = load_partitions( + input_checkpoint_path, mp_partitions, layer_idx=0, sequential=sequential + ) + + mamba_model.backbone.embedding.load_state_dict( + { + "weight": torch.cat( + get_state( + loaded_tp_ranks, + "word_embeddings.weight", + layer_idx=0, + sequential=sequential, + ), + dim=0, + ) + } + ) + + for layer_i in tqdm(range(get_key(loaded_config, "num-layers"))): + + layer = mamba_model.backbone.layers[layer_i] + + if not sequential: + # in the non-sequential case, must load from each layer individually. + # use layer index + 2 bc of embed layer and a dummy _pre_transformer_block, which are "layers 0 and 1" + loaded_tp_ranks = load_partitions( + input_checkpoint_path, + mp_partitions, + layer_idx=layer_i + 2, + sequential=sequential, + ) + + state_dict = {} + + for key, hf_key in ARCH["ROW_PARALLEL_LINEAR_KEYS"].items(): # ROW_PARALLEL + state_dict[hf_key] = torch.cat( + get_state( + loaded_tp_ranks, key, layer_idx=layer_i + 2, sequential=sequential + ), + dim=1, + ) + + # average layernorm stats over mp ranks + for key, hf_key in ARCH["NORM_KEYS"].items(): + state_dict[hf_key] = sum( + get_state( + loaded_tp_ranks, key, layer_idx=layer_i + 2, sequential=sequential + ) + ) / len(loaded_tp_ranks) + + # LinearWithTPMerge + for key, hf_key in ARCH["COLUMN_PARALLEL_LINEAR_KEYS"].items(): + state_dict[hf_key] = torch.cat( + get_state( + loaded_tp_ranks, key, layer_idx=layer_i + 2, sequential=sequential + ), + dim=0, + ) + + # LinearWithTPSplitBias + for key, hf_key in ARCH["ROW_PARALLEL_BIAS_KEYS"].items(): + state_dict[hf_key] = sum( + get_state( + loaded_tp_ranks, key, layer_idx=layer_i + 2, sequential=sequential + ) + ) + + layer.load_state_dict(state_dict) + + if not sequential: + loaded_tp_ranks = load_partitions( + input_checkpoint_path, + mp_partitions, + get_key(loaded_config, "num-layers") + 3, + sequential=sequential, + ) + + norm_state_dict = {} + for key, hf_key in ARCH["FINAL_NORM_KEYS"].items(): + norm_state_dict[hf_key] = sum( + get_state( + loaded_tp_ranks, + key, + layer_idx=get_key(loaded_config, "num-layers") + 3, + sequential=sequential, + ) + ) / len(loaded_tp_ranks) + + final_layer_norm = mamba_model.backbone.norm_f + + final_layer_norm.load_state_dict(norm_state_dict) + + if not sequential: + loaded_tp_ranks = load_partitions( + input_checkpoint_path, + mp_partitions, + get_key(loaded_config, "num-layers") + 4, + sequential=sequential, + ) + + lm_head = mamba_model.lm_head + + lm_head.load_state_dict( + { + "weight": torch.cat( + get_state( + loaded_tp_ranks, + "final_linear.weight", + layer_idx=get_key(loaded_config, "num-layers") + 4, + sequential=sequential, + ), + dim=0, + ), + } + ) + + del loaded_tp_ranks + + return mamba_model + + +def main(input_args=None, overwrite_values=None): + + parser = argparse.ArgumentParser( + description="Merge MP partitions and convert to HF Model." + ) + parser.add_argument( + "--input_dir", + type=str, + help="Path to NeoX checkpoint, e.g. /path/to/model/global_step143000", + ) + parser.add_argument( + "--config_file", + type=str, + help="Path to config file for the input NeoX checkpoint.", + ) + parser.add_argument( + "--output_dir", + type=str, + help="Output dir, where to save the HF Model, tokenizer, and configs", + ) + parser.add_argument( + "--precision", + type=str, + default="auto", + help="What precision to save the model into. Defaults to auto, which auto-detects which 16-bit dtype to save into, or falls back to fp32.", + ) + parser.add_argument( + "--no_save_tokenizer", + action="store_true", + help="Whether to skip saving the tokenizer alongside a model.", + ) + args = parser.parse_args(input_args) + + # validate arguments + assert args.precision in [ + "auto", + "fp16", + "bf16", + "fp32", + ], f"expected --precision to be one of 'auto', 'fp16', 'bf16', 'fp32' but got '{args.precision}' !" + + with open(args.config_file) as f: + loaded_config = yaml.full_load(f) + if overwrite_values: + loaded_config.update(overwrite_values) + + # Determine the checkpoint format of the model. + # DeepSpeed saves models wrapped in a PipelineModule differently from those not. + # PipelineModule models are saved as per-layer state dicts per TP shard, + # while Sequential model state dicts are saved all together in one mp_rank_xx_model_states.pt + # file per tensor/model parallel shard. + pipeline_world_size = get_key(loaded_config, "pipe-parallel-size", 1) + if pipeline_world_size == 0: + sequential = True + print( + f"Detected 'pipe-parallel-size' of {pipeline_world_size}, assuming model is saved as Sequential..." + ) + else: + sequential = False + print( + f"Detected 'pipe-parallel-size' of {pipeline_world_size}, assuming model is saved as PipelineModule..." + ) + + model = convert( + args.input_dir, + loaded_config, + args.output_dir, + sequential=sequential, + precision=args.precision, + ) + + model.save_pretrained(args.output_dir) + + +if __name__ == "__main__": + + main() diff --git a/tools/ckpts/convert_raw_llama_weights_to_neox.py b/tools/ckpts/convert_raw_llama_weights_to_neox.py new file mode 100644 index 0000000000000000000000000000000000000000..b7435f010e5fab7f67d4e0c7883ead60017a1733 --- /dev/null +++ b/tools/ckpts/convert_raw_llama_weights_to_neox.py @@ -0,0 +1,661 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import torch +import json +import math +import tqdm.auto as tqdm + + +INTERMEDIATE_SIZE_MAP = { + "7B": 11008, + "13B": 13824, + "30B": 17920, + "34B": 22016, + "65B": 22016, + "70B": 28672, + "mistral-7B-v0.1": 14336, +} +NUM_SHARDS = { + "7B": 1, + "13B": 2, + "30B": 4, + "34B": 4, + "65B": 8, + "70B": 8, + "mistral-7B-v0.1": 1, +} + + +def compute_intermediate_size(n): + return int(math.ceil(n * 8 / 3) + 255) // 256 * 256 + + +def read_json(path): + with open(path, "r") as f: + return json.load(f) + + +def write_json(text, path): + with open(path, "w") as f: + json.dump(text, f) + + +def write_file(text, path): + with open(path, "w") as f: + f.write(text) + + +def convert_model_pipeline( + output_base_path, input_base_path, model_size: str, num_output_shards: int +): + assert model_size in NUM_SHARDS + + model_path = os.path.join(output_base_path, "global_step0") + os.makedirs(model_path, exist_ok=True) + write_file("global_step0", os.path.join(output_base_path, "latest")) + + params = read_json(os.path.join(input_base_path, "params.json")) + num_input_shards = NUM_SHARDS[model_size] + num_layers = params["n_layers"] + num_heads = params["n_heads"] + if "n_kv_heads" in params: + num_kv_heads = params["n_kv_heads"] + else: + num_kv_heads = num_heads + num_kv_heads_per_input_shard = num_kv_heads // num_input_shards + num_heads_per_input_shard = num_heads // num_input_shards + num_heads_per_output_shard = num_heads // num_output_shards + num_kv_heads_per_output_shard = num_kv_heads // num_output_shards + hidden_size = params["dim"] + dims_per_head = hidden_size // num_heads + # base = 10000.0 + # inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)) + + def permute_rotary(w): + if w.shape == (num_heads, dims_per_head, hidden_size): + N_HEADS = num_heads + elif w.shape == (num_kv_heads, dims_per_head, hidden_size): + N_HEADS = num_kv_heads + else: + assert False + return ( + w.view(N_HEADS, dims_per_head // 2, 2, hidden_size) + .transpose(1, 2) + .reshape(N_HEADS, dims_per_head, hidden_size) + ) + + pbar = tqdm.tqdm(total=num_input_shards + num_layers + 3) + + pbar.set_description(f"Loading shard") + loaded = [] + for i in range(num_input_shards): + loaded.append( + torch.load( + os.path.join(input_base_path, f"consolidated.{i:02d}.pth"), + map_location="cpu", + ) + ) + pbar.set_description(f"Loaded shard {i}/{num_input_shards}") + pbar.update(1) + helper = Helper( + loaded=loaded, + model_path=model_path, + num_output_shards=num_output_shards, + model_size=model_size, + pipeline_parallel=False, + ) + + sequential_cache = [{} for _ in range(num_output_shards)] + + # Embedding in + embeddings_in = torch.cat( + [ + loaded[rank]["tok_embeddings.weight"].cpu() + for rank in range(num_input_shards) + ], + dim=1, + ) + print(embeddings_in.shape) + helper.save_shards( + {"word_embeddings.weight": helper.shard(embeddings_in, dim=0)}, layer_i=0 + ) + helper.del_loaded("tok_embeddings.weight") + pbar.set_description(f"Saved embeddings") + pbar.update(1) + + # Norms + helper.save_duplicates( + {"norm.scale": loaded[0]["norm.weight"]}, layer_i=num_layers + 3 + ) + helper.del_loaded("norm.weight") + pbar.set_description(f"Saved final norm") + pbar.update(1) + + # Embedding out + embeddings_out = torch.cat( + [loaded[rank]["output.weight"].cpu() for rank in range(num_input_shards)], dim=0 + ) + helper.save_shards( + {"final_linear.weight": helper.shard(embeddings_out, dim=0)}, + layer_i=num_layers + 4, + ) + helper.del_loaded("output.weight") + pbar.set_description(f"Saved out embeddings") + pbar.update(1) + + # Layers + for layer_i in range(num_layers): + + # Linear + attn_wo = helper.shard( + torch.cat( + [ + loaded[rank][f"layers.{layer_i}.attention.wo.weight"] + for rank in range(num_input_shards) + ], + dim=1, + ), + dim=1, + ) + mlp_w1 = helper.shard( + torch.cat( + [ + loaded[rank][f"layers.{layer_i}.feed_forward.w1.weight"] + for rank in range(num_input_shards) + ], + dim=0, + ), + dim=0, + ) + mlp_w2 = helper.shard( + torch.cat( + [ + loaded[rank][f"layers.{layer_i}.feed_forward.w2.weight"] + for rank in range(num_input_shards) + ], + dim=1, + ), + dim=1, + ) + mlp_w3 = helper.shard( + torch.cat( + [ + loaded[rank][f"layers.{layer_i}.feed_forward.w3.weight"] + for rank in range(num_input_shards) + ], + dim=0, + ), + dim=0, + ) + helper.del_loaded(f"layers.{layer_i}.attention.wo.weight") + helper.del_loaded(f"layers.{layer_i}.feed_forward.w1.weight") + helper.del_loaded(f"layers.{layer_i}.feed_forward.w2.weight") + helper.del_loaded(f"layers.{layer_i}.feed_forward.w3.weight") + + # Attention + w_q = permute_rotary( + torch.cat( + [ + loaded[rank][f"layers.{layer_i}.attention.wq.weight"].view( + num_heads_per_input_shard, dims_per_head, hidden_size + ) + for rank in range(num_input_shards) + ], + dim=0, + ) + ) + w_k = permute_rotary( + torch.cat( + [ + loaded[rank][f"layers.{layer_i}.attention.wk.weight"].view( + num_kv_heads_per_input_shard, dims_per_head, hidden_size + ) + for rank in range(num_input_shards) + ], + dim=0, + ) + ).view(num_heads, int(dims_per_head * (num_kv_heads / num_heads)), hidden_size) + + w_v = torch.cat( + [ + loaded[rank][f"layers.{layer_i}.attention.wv.weight"].view( + num_kv_heads_per_input_shard, dims_per_head, hidden_size + ) + for rank in range(num_input_shards) + ], + dim=0, + ).view(num_heads, int(dims_per_head * (num_kv_heads / num_heads)), hidden_size) + + sharded_qkv = torch.cat( + [ + helper.shard( + w_q, dim=0 + ), # num_output_shards, num_heads_per_output_shard, dims_per_head, hidden_size + helper.shard(w_k, dim=0), + helper.shard(w_v, dim=0), + ], + dim=2, + ) # num_output_shards, num_heads_per_output_shard, QKV=3, dims_per_head, hidden_size + + sharded_qkv = sharded_qkv.view( + num_output_shards, + num_heads_per_output_shard * dims_per_head + + 2 * num_kv_heads_per_output_shard * dims_per_head, + hidden_size, + ) + helper.del_loaded(f"layers.{layer_i}.attention.wq.weight") + helper.del_loaded(f"layers.{layer_i}.attention.wk.weight") + helper.del_loaded(f"layers.{layer_i}.attention.wv.weight") + + # Duplicated + input_layernorm = loaded[0][f"layers.{layer_i}.attention_norm.weight"] + post_attention_layernorm = loaded[0][f"layers.{layer_i}.ffn_norm.weight"] + helper.del_loaded(f"layers.{layer_i}.attention_norm.weight") + helper.del_loaded(f"layers.{layer_i}.ffn_norm.weight") + + for out_rank in range(num_output_shards): + helper.save( + { + "attention.query_key_value.weight": sharded_qkv[out_rank], + # Sharded layers + "attention.dense.weight": attn_wo[out_rank].clone(), + "mlp.w1.weight": mlp_w1[out_rank].clone(), + "mlp.w2.weight": mlp_w2[out_rank].clone(), + "mlp.w3.weight": mlp_w3[out_rank].clone(), + # Duplicated layers + "input_layernorm.scale": input_layernorm, + "post_attention_layernorm.scale": post_attention_layernorm, + }, + layer_i=layer_i + 2, + rank=out_rank, + ) + + pbar.set_description(f"Saved layer {layer_i} / {num_layers}") + pbar.update(1) + + model_state = { + "dp_world_size": 1, + "mp_world_size": num_output_shards, + "module": {}, + "optimizer": {}, + "global_steps": 1, + "skipped_steps": 1, + "iteration": 1, + } + for rank in range(num_output_shards): + torch.save( + model_state, os.path.join(model_path, f"mp_rank_{rank:02d}_model_states.pt") + ) + pbar.set_description("Done.") + + +def convert_model_sequential( + output_base_path, input_base_path, model_size: str, num_output_shards: int +): + assert model_size in NUM_SHARDS + + model_path = os.path.join(output_base_path, "global_step0") + os.makedirs(model_path, exist_ok=True) + write_file("global_step0", os.path.join(output_base_path, "latest")) + + params = read_json(os.path.join(input_base_path, "params.json")) + num_input_shards = NUM_SHARDS[model_size] + num_layers = params["n_layers"] + num_heads = params["n_heads"] + if "n_kv_heads" in params: + num_kv_heads = params["n_kv_heads"] + else: + num_kv_heads = num_heads + num_kv_heads_per_input_shard = num_kv_heads // num_input_shards + num_heads_per_input_shard = num_heads // num_input_shards + num_heads_per_output_shard = num_heads // num_output_shards + num_kv_heads_per_output_shard = num_kv_heads // num_output_shards + hidden_size = params["dim"] + dims_per_head = hidden_size // num_heads + # base = 10000.0 + # inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)) + + def permute_rotary(w): + if w.shape == (num_heads, dims_per_head, hidden_size): + N_HEADS = num_heads + elif w.shape == (num_kv_heads, dims_per_head, hidden_size): + N_HEADS = num_kv_heads + else: + assert False + return ( + w.view(N_HEADS, dims_per_head // 2, 2, hidden_size) + .transpose(1, 2) + .reshape(N_HEADS, dims_per_head, hidden_size) + ) + + pbar = tqdm.tqdm(total=num_input_shards + num_output_shards) + + pbar.set_description(f"Loading shard") + loaded = [] + for i in range(num_input_shards): + loaded.append( + torch.load( + os.path.join(input_base_path, f"consolidated.{i:02d}.pth"), + map_location="cpu", + ) + ) + pbar.set_description(f"Loaded shard {i}/{num_input_shards}") + pbar.update(1) + helper = Helper( + loaded=loaded, + model_path=model_path, + num_output_shards=num_output_shards, + model_size=model_size, + pipeline_parallel=False, + ) + + # Embedding in + embeddings_in = torch.cat( + [ + loaded[rank]["tok_embeddings.weight"].cpu() + for rank in range(num_input_shards) + ], + dim=1, + ) + + helper.add_sequential_shard( + {"word_embeddings.weight": helper.shard(embeddings_in, dim=0)}, layer_i=0 + ) + helper.del_loaded("tok_embeddings.weight") + + # Norms + helper.add_sequential_duplicates( + {"norm.scale": loaded[0]["norm.weight"]}, layer_i=num_layers + 3 + ) + helper.del_loaded("norm.weight") + + # Embedding out + embeddings_out = torch.cat( + [loaded[rank]["output.weight"].cpu() for rank in range(num_input_shards)], dim=0 + ) + helper.add_sequential_shard( + {"final_linear.weight": helper.shard(embeddings_out, dim=0)}, + layer_i=num_layers + 4, + ) + helper.del_loaded("output.weight") + + # Layers + for layer_i in range(num_layers): + + # Linear + attn_wo = helper.shard( + torch.cat( + [ + loaded[rank][f"layers.{layer_i}.attention.wo.weight"] + for rank in range(num_input_shards) + ], + dim=1, + ), + dim=1, + ) + mlp_w1 = helper.shard( + torch.cat( + [ + loaded[rank][f"layers.{layer_i}.feed_forward.w1.weight"] + for rank in range(num_input_shards) + ], + dim=0, + ), + dim=0, + ) + mlp_w2 = helper.shard( + torch.cat( + [ + loaded[rank][f"layers.{layer_i}.feed_forward.w2.weight"] + for rank in range(num_input_shards) + ], + dim=1, + ), + dim=1, + ) + mlp_w3 = helper.shard( + torch.cat( + [ + loaded[rank][f"layers.{layer_i}.feed_forward.w3.weight"] + for rank in range(num_input_shards) + ], + dim=0, + ), + dim=0, + ) + helper.del_loaded(f"layers.{layer_i}.attention.wo.weight") + helper.del_loaded(f"layers.{layer_i}.feed_forward.w1.weight") + helper.del_loaded(f"layers.{layer_i}.feed_forward.w2.weight") + helper.del_loaded(f"layers.{layer_i}.feed_forward.w3.weight") + + # Attention + w_q = permute_rotary( + torch.cat( + [ + loaded[rank][f"layers.{layer_i}.attention.wq.weight"].view( + num_heads_per_input_shard, dims_per_head, hidden_size + ) + for rank in range(num_input_shards) + ], + dim=0, + ) + ) + + w_k = permute_rotary( + torch.cat( + [ + loaded[rank][f"layers.{layer_i}.attention.wk.weight"].view( + num_kv_heads_per_input_shard, dims_per_head, hidden_size + ) + for rank in range(num_input_shards) + ], + dim=0, + ) + ).view(num_heads, int(dims_per_head * (num_kv_heads / num_heads)), hidden_size) + + w_v = torch.cat( + [ + loaded[rank][f"layers.{layer_i}.attention.wv.weight"].view( + num_kv_heads_per_input_shard, dims_per_head, hidden_size + ) + for rank in range(num_input_shards) + ], + dim=0, + ).view(num_heads, int(dims_per_head * (num_kv_heads / num_heads)), hidden_size) + + sharded_qkv = torch.cat( + [ + helper.shard( + w_q, dim=0 + ), # num_output_shards, num_heads_per_output_shard, dims_per_head, hidden_size + helper.shard(w_k, dim=0), + helper.shard(w_v, dim=0), + ], + dim=2, + ) # num_output_shards, num_heads_per_output_shard, QKV=3, dims_per_head, hidden_size + + sharded_qkv = sharded_qkv.view( + num_output_shards, + num_heads_per_output_shard * dims_per_head + + 2 * num_kv_heads_per_output_shard * dims_per_head, + hidden_size, + ) + + helper.del_loaded(f"layers.{layer_i}.attention.wq.weight") + helper.del_loaded(f"layers.{layer_i}.attention.wk.weight") + helper.del_loaded(f"layers.{layer_i}.attention.wv.weight") + + # Duplicated + input_layernorm = loaded[0][f"layers.{layer_i}.attention_norm.weight"] + post_attention_layernorm = loaded[0][f"layers.{layer_i}.ffn_norm.weight"] + helper.del_loaded(f"layers.{layer_i}.attention_norm.weight") + helper.del_loaded(f"layers.{layer_i}.ffn_norm.weight") + + for out_rank in range(num_output_shards): + helper.add_sequential( + { + "attention.query_key_value.weight": sharded_qkv[out_rank], + # Sharded layers + "attention.dense.weight": attn_wo[out_rank].clone(), + "mlp.w1.weight": mlp_w1[out_rank].clone(), + "mlp.w2.weight": mlp_w2[out_rank].clone(), + "mlp.w3.weight": mlp_w3[out_rank].clone(), + # Duplicated layers + "input_layernorm.scale": input_layernorm, + "post_attention_layernorm.scale": post_attention_layernorm, + }, + layer_i=layer_i + 2, + rank=out_rank, + ) + + for rank in range(num_output_shards): + model_state = { + "dp_world_size": 1, + "mp_world_size": num_output_shards, + "module": helper.sequential_cache[rank], + "optimizer": {}, + "global_steps": 1, + "skipped_steps": 1, + "iteration": 1, + } + torch.save( + model_state, os.path.join(model_path, f"mp_rank_{rank:02d}_model_states.pt") + ) + pbar.set_description(f"Saved shard {rank}") + pbar.update(1) + pbar.set_description("Done.") + + +class Helper: + def __init__( + self, loaded, model_size, num_output_shards, model_path, pipeline_parallel + ): + self.loaded = loaded + self.model_size = model_size + self.num_output_shards = num_output_shards + self.model_path = model_path + + self.pipeline_parallel = pipeline_parallel + self.sequential_cache = [{} for _ in range(num_output_shards)] + + def del_loaded(self, key: str): + # Remove from memory as we go along + for loaded_shared in self.loaded: + del loaded_shared[key] + + def save_shards(self, dictionary, layer_i: int): + for k, v in dictionary.items(): + assert v.shape[0] == self.num_output_shards + for rank in range(self.num_output_shards): + torch.save( + {k: v[rank].clone() for k, v in dictionary.items()}, + self.save_path(layer_i=layer_i, rank=rank), + ) + + def save_duplicates(self, dictionary, layer_i: int): + for rank in range(self.num_output_shards): + torch.save( + {k: v.clone() for k, v in dictionary.items()}, + self.save_path(layer_i=layer_i, rank=rank), + ) + + def save(self, obj, layer_i, rank): + torch.save(obj, self.save_path(layer_i=layer_i, rank=rank)) + + def shard(self, x, dim): + x_shape = list(x.shape) + assert x_shape[dim] % self.num_output_shards == 0 + new_x_shape = ( + x_shape[:dim] + + [self.num_output_shards, x_shape[dim] // self.num_output_shards] + + x_shape[dim + 1 :] + ) + x = x.view(*new_x_shape) + return torch.movedim(x, 0, dim) + + def save_path(self, layer_i, rank): + return os.path.join( + self.model_path, f"layer_{layer_i:02d}-model_{rank:02d}-model_states.pt" + ) + + def add_sequential_shard(self, dictionary, layer_i): + assert not self.pipeline_parallel + for k, v in dictionary.items(): + for rank in range(self.num_output_shards): + self.sequential_cache[rank][f"sequential.{layer_i}.{k}"] = v[ + rank + ].clone() + + def add_sequential_duplicates(self, dictionary, layer_i): + assert not self.pipeline_parallel + for k, v in dictionary.items(): + for rank in range(self.num_output_shards): + self.sequential_cache[rank][f"sequential.{layer_i}.{k}"] = v.clone() + + def add_sequential(self, dictionary, layer_i, rank): + assert not self.pipeline_parallel + for k, v in dictionary.items(): + self.sequential_cache[rank][f"sequential.{layer_i}.{k}"] = v.clone() + + +def main(): + parser = argparse.ArgumentParser( + description="Convert raw LLaMA or Mistral checkpoints to GPT-NeoX format." + ) + parser.add_argument( + "--input_dir", + help="Location of parent directory, which contains tokenizer.model and model weights subfolders", + ) + parser.add_argument( + "--model_size", + choices=["7B", "mistral-7B-v0.1", "13B", "30B", "34B", "65B", "tokenizer_only"], + ) + parser.add_argument( + "--output_dir", + help="Location to write GPT-NeoX model", + ) + parser.add_argument( + "--num_output_shards", + type=int, + default=1, + ) + parser.add_argument( + "--pipeline_parallel", + action="store_true", + help="Only use if PP>1", + ) + args = parser.parse_args() + if args.pipeline_parallel: + print("parallel") + convert_model_pipeline( + output_base_path=args.output_dir, + input_base_path=os.path.join(args.input_dir, args.model_size), + model_size=args.model_size, + num_output_shards=args.num_output_shards, + ) + else: + print("sequential") + convert_model_sequential( + output_base_path=args.output_dir, + input_base_path=os.path.join(args.input_dir, args.model_size), + model_size=args.model_size, + num_output_shards=args.num_output_shards, + ) + + +if __name__ == "__main__": + main() diff --git a/tools/ckpts/inspect_checkpoints.py b/tools/ckpts/inspect_checkpoints.py new file mode 100644 index 0000000000000000000000000000000000000000..a23d456cab0d5d1af48947aa818ebd04b412caa3 --- /dev/null +++ b/tools/ckpts/inspect_checkpoints.py @@ -0,0 +1,335 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Adapted from https://github.com/awaelchli/pytorch-lightning-snippets/blob/master/checkpoint/peek.py + +import code +import os +import re +from argparse import ArgumentParser, Namespace +from collections.abc import Mapping, Sequence +from pathlib import Path + +import torch + + +class COLORS: + BLUE = "\033[94m" + CYAN = "\033[96m" + GREEN = "\033[92m" + RED = "\033[31m" + YELLOW = "\033[33m" + MAGENTA = "\033[35m" + WHITE = "\033[37m" + UNDERLINE = "\033[4m" + END = "\033[0m" + + +PRIMITIVE_TYPES = (int, float, bool, str, type) + + +def natural_sort(l): + convert = lambda text: int(text) if text.isdigit() else text.lower() + alphanum_key = lambda key: [convert(c) for c in re.split("([0-9]+)", str(key))] + return sorted(l, key=alphanum_key) + + +def sizeof_fmt(num, suffix="B"): + for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]: + if abs(num) < 1024.0: + return "%3.1f%s%s" % (num, unit, suffix) + num /= 1024.0 + return "%.1f%s%s" % (num, "Yi", suffix) + + +def pretty_print(contents: dict): + """Prints a nice summary of the top-level contents in a checkpoint dictionary.""" + col_size = max(len(str(k)) for k in contents) + for k, v in sorted(contents.items()): + key_length = len(str(k)) + line = " " * (col_size - key_length) + line += f"{k}: {COLORS.BLUE}{type(v).__name__}{COLORS.END}" + if isinstance(v, dict): + pretty_print(v) + elif isinstance(v, PRIMITIVE_TYPES): + line += f" = " + line += f"{COLORS.CYAN}{repr(v)}{COLORS.END}" + elif isinstance(v, Sequence): + line += ", " + line += f"{COLORS.CYAN}len={len(v)}{COLORS.END}" + elif isinstance(v, torch.Tensor): + if v.ndimension() in (0, 1) and v.numel() == 1: + line += f" = " + line += f"{COLORS.CYAN}{v.item()}{COLORS.END}" + else: + line += ", " + line += f"{COLORS.CYAN}shape={list(v.shape)}{COLORS.END}" + line += ", " + line += f"{COLORS.CYAN}dtype={v.dtype}{COLORS.END}" + line += ( + ", " + + f"{COLORS.CYAN}size={sizeof_fmt(v.nelement() * v.element_size())}{COLORS.END}" + ) + print(line) + + +def common_entries(*dcts): + if not dcts: + return + for i in set(dcts[0]).intersection(*dcts[1:]): + yield (i,) + tuple(d[i] for d in dcts) + + +def pretty_print_double(contents1: dict, contents2: dict, args): + """Prints a nice summary of the top-level contents in a checkpoint dictionary.""" + col_size = max( + max(len(str(k)) for k in contents1), max(len(str(k)) for k in contents2) + ) + common_keys = list(contents1.keys() & contents2.keys()) + uncommon_keys_1 = [i for i in contents2.keys() if i not in common_keys] + uncommon_keys_2 = [i for i in contents1.keys() if i not in common_keys] + diffs_found = False + if uncommon_keys_1 + uncommon_keys_2: + diffs_found = True + if uncommon_keys_1: + print( + f"{COLORS.RED}{len(uncommon_keys_1)} key(s) found in ckpt 1 that isn't present in ckpt 2:{COLORS.END} \n\t{COLORS.BLUE}{' '.join(uncommon_keys_1)}{COLORS.END}" + ) + if uncommon_keys_2: + print( + f"{COLORS.RED}{len(uncommon_keys_2)} key(s) found in ckpt 2 that isn't present in ckpt 1:{COLORS.END} \n\t{COLORS.BLUE}{' '.join(uncommon_keys_2)}{COLORS.END}" + ) + for k, v1, v2 in sorted(common_entries(contents1, contents2)): + key_length = len(str(k)) + line = " " * (col_size - key_length) + if type(v1) != type(v2): + print( + f"{COLORS.RED}{k} is a different type between ckpt1 and ckpt2: ({type(v1).__name__} vs. {type(v2).__name__}){COLORS.END}" + ) + continue + else: + prefix = f"{k}: {COLORS.BLUE}{type(v1).__name__} | {type(v2).__name__}{COLORS.END}" + if isinstance(v1, dict): + pretty_print_double(v1, v2, args) + elif isinstance(v1, PRIMITIVE_TYPES): + if repr(v1) != repr(v2): + c = COLORS.RED + line += f" = " + line += f"{c}{repr(v1)} | {repr(v2)}{COLORS.END}" + else: + c = COLORS.CYAN + if not args.diff: + line += f" = " + line += f"{c}{repr(v1)} | {repr(v2)}{COLORS.END}" + elif isinstance(v1, Sequence): + if len(v1) != len(v2): + c = COLORS.RED + line += ", " + line += f"{c}len={len(v1)} | len={len(v2)}{COLORS.END}" + else: + c = COLORS.CYAN + if not args.diff: + line += ", " + line += f"{c}len={len(v1)} | len={len(v2)}{COLORS.END}" + elif isinstance(v1, torch.Tensor): + if v1.ndimension() != v2.ndimension(): + c = COLORS.RED + else: + c = COLORS.CYAN + + if (v1.ndimension() in (0, 1) and v1.numel() == 1) and ( + v2.ndimension() in (0, 1) and v2.numel() == 1 + ): + if not args.diff: + line += f" = " + line += f"{c}{v1.item()} | {c}{v2.item()}{COLORS.END}" + else: + if list(v1.shape) != list(v2.shape): + c = COLORS.RED + line += ", " + line += f"{c}shape={list(v1.shape)} | shape={list(v2.shape)}{COLORS.END}" + else: + c = COLORS.CYAN + if not args.diff: + line += ", " + line += f"{c}shape={list(v1.shape)} | shape={list(v2.shape)}{COLORS.END}" + if v1.dtype != v2.dtype: + c = COLORS.RED + line += f"{c}dtype={v1.dtype} | dtype={v2.dtype}{COLORS.END}" + + else: + c = COLORS.CYAN + if not args.diff: + line += ", " + line += f"{c}dtype={v1.dtype} | dtype={v2.dtype}{COLORS.END}" + if list(v1.shape) == list(v2.shape): + if torch.allclose(v1, v2): + if not args.diff: + line += f", {COLORS.CYAN}VALUES EQUAL{COLORS.END}" + else: + line += f", {COLORS.RED}VALUES DIFFER{COLORS.END}" + + if line.replace(" ", "") != "": + line = prefix + line + print(line) + diffs_found = True + if args.diff and not diffs_found: + pass + else: + if not args.diff: + print("\n") + + return diffs_found + + +def get_attribute(obj: object, name: str) -> object: + if isinstance(obj, Mapping): + return obj[name] + if isinstance(obj, Namespace): + return obj.name + return getattr(object, name) + + +def get_files(pth): + if os.path.isdir(pth): + files = list(Path(pth).glob("*.pt")) + list(Path(pth).glob("*.ckpt")) + elif os.path.isfile(pth): + assert pth.endswith(".pt") or pth.endswith(".ckpt") + files = [Path(pth)] + else: + raise ValueError("Dir / File not found.") + return natural_sort(files) + + +def peek(args: Namespace): + + files = get_files(args.dir) + + for file in files: + file = Path(file).absolute() + print(f"{COLORS.GREEN}{file.name}:{COLORS.END}") + ckpt = torch.load(file, map_location=torch.device("cpu")) + selection = dict() + attribute_names = args.attributes or list(ckpt.keys()) + for name in attribute_names: + parts = name.split("/") + current = ckpt + for part in parts: + current = get_attribute(current, part) + selection.update({name: current}) + pretty_print(selection) + print("\n") + + if args.interactive: + code.interact( + banner="Entering interactive shell. You can access the checkpoint contents through the local variable 'checkpoint'.", + local={"checkpoint": ckpt, "torch": torch}, + ) + + +def get_shared_fnames(files_1, files_2): + names_1 = [Path(i).name for i in files_1] + names_1_parent = Path(files_1[0]).parent + names_2 = [Path(i).name for i in files_2] + names_2_parent = Path(files_2[0]).parent + shared_names = list(set.intersection(*map(set, [names_1, names_2]))) + return [names_1_parent / i for i in shared_names], [ + names_2_parent / i for i in shared_names + ] + + +def get_selection(filename, args): + ckpt = torch.load(filename, map_location=torch.device("cpu")) + selection = dict() + attribute_names = args.attributes or list(ckpt.keys()) + for name in attribute_names: + parts = name.split("/") + current = ckpt + for part in parts: + current = get_attribute(current, part) + selection.update({name: current}) + return selection + + +def compare(args: Namespace): + dirs = [i.strip() for i in args.dir.split(",")] + assert len(dirs) == 2, "Only works with 2 directories / files" + files_1 = get_files(dirs[0]) + files_2 = get_files(dirs[1]) + files_1, files_2 = get_shared_fnames(files_1, files_2) + + for file1, file2 in zip(files_1, files_2): + file1 = Path(file1).absolute() + file2 = Path(file2).absolute() + print(f"COMPARING {COLORS.GREEN}{file1.name} & {file2.name}:{COLORS.END}") + selection_1 = get_selection(file1, args) + selection_2 = get_selection(file2, args) + diffs_found = pretty_print_double(selection_1, selection_2, args) + if args.diff and diffs_found: + print( + f"{COLORS.RED}THE ABOVE DIFFS WERE FOUND IN {file1.name} & {file2.name} ^{COLORS.END}\n" + ) + + if args.interactive: + code.interact( + banner="Entering interactive shell. You can access the checkpoint contents through the local variable 'selection_1' / 'selection_2'.\nPress Ctrl-D to exit.", + local={ + "selection_1": selection_1, + "selection_2": selection_2, + "torch": torch, + }, + ) + + +def main(): + parser = ArgumentParser() + parser.add_argument( + "dir", + type=str, + help="The checkpoint dir to inspect. Must be either: \ + - a directory containing pickle binaries saved with 'torch.save' ending in .pt or .ckpt \ + - a single path to a .pt or .ckpt file \ + - two comma separated directories - in which case the script will *compare* the two checkpoints", + ) + parser.add_argument( + "--attributes", + nargs="*", + help="Name of one or several attributes to query. To access an attribute within a nested structure, use '/' as separator.", + default=None, + ) + parser.add_argument( + "--interactive", + "-i", + action="store_true", + help="Drops into interactive shell after printing the summary.", + ) + parser.add_argument( + "--compare", + "-c", + action="store_true", + help="If true, script will compare two directories separated by commas", + ) + parser.add_argument( + "--diff", "-d", action="store_true", help="In compare mode, only print diffs" + ) + + args = parser.parse_args() + if args.compare: + compare(args) + else: + peek(args) + + +if __name__ == "__main__": + main() diff --git a/tools/ckpts/merge20b.py b/tools/ckpts/merge20b.py new file mode 100644 index 0000000000000000000000000000000000000000..09029bb5bd8721731858d9e7d03b4ac4b3bd8fac --- /dev/null +++ b/tools/ckpts/merge20b.py @@ -0,0 +1,282 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import torch +import yaml +import shutil +from tqdm import auto as tqdm_lib + + +VOCAB_SIZE = 50432 +IGNORED_MODEL_STATE_KEYS = [ + "optimizer", + "random_rng_state", + "np_rng_state", + "torch_rng_state", + "cuda_rng_state", + "rng_tracker_states", +] + + +def modify_config(input_config_path, output_config_path, output_dir): + with open(input_config_path) as f: + loaded_config = yaml.full_load(f) + + # replace model/pipeline parallel + loaded_config["model_parallel_size"] = 1 + loaded_config["pipe_parallel_size"] = 1 + + # replace load / save directories: + loaded_config["load"] = output_dir + loaded_config["save"] = output_dir + + # replace some other paths + loaded_config["vocab_file"] = os.path.join(output_dir, "20B_tokenizer.json") + loaded_config["log_dir"] = "./logs" + + # we need to make sure the resulting vocab size is correct + # do this by modifying the 'make_vocab_size_divisible_by' argument to be + # orig * (orig_mp / mp_out) + loaded_config["make_vocab_size_divisible_by"] = VOCAB_SIZE + + # remove zero optimizer + loaded_config["zero_optimization"]["stage"] = 0 + + with open(output_config_path, "w") as f: + yaml.dump(loaded_config, f) + + +def modify_model_states(input_model_state_path, output_model_state_path): + model_state = torch.load(input_model_state_path) + for key in IGNORED_MODEL_STATE_KEYS: + del model_state[key] + model_state["mp_world_size"] = 1 + model_state["dp_world_size"] = 1 # could make this configurable? + model_state["args"]["model_parallel_size"] = 1 + model_state["args"]["make_vocab_size_divisible_by"] = VOCAB_SIZE + torch.save(model_state, output_model_state_path) + + +def merge_model_weights(input_checkpoint_path, output_checkpoint_path): + pbar = tqdm_lib.tqdm(total=47) + + # Load transformer layers + for layer_i in range(44): + pbar.set_description(f"Merging layer {layer_i}") + filename_tp1 = f"layer_{layer_i + 2:02d}-model_00-model_states.pt" + filename_tp2 = f"layer_{layer_i + 2:02d}-model_01-model_states.pt" + loaded_tp1 = torch.load(os.path.join(input_checkpoint_path, filename_tp1)) + loaded_tp2 = torch.load(os.path.join(input_checkpoint_path, filename_tp2)) + # noinspection PyDictCreation + merged = {} + + # RowParallelLinear + merged["mlp.dense_4h_to_h.weight"] = torch.cat( + [ + loaded_tp1["mlp.dense_4h_to_h.weight"], + loaded_tp2["mlp.dense_4h_to_h.weight"], + ], + dim=1, + ) + merged["attention.dense.weight"] = torch.cat( + [ + loaded_tp1["attention.dense.weight"], + loaded_tp2["attention.dense.weight"], + ], + dim=1, + ) + merged["mlp.dense_4h_to_h.bias"] = ( + loaded_tp1["mlp.dense_4h_to_h.bias"] + loaded_tp2["mlp.dense_4h_to_h.bias"] + ) + merged["attention.dense.bias"] = ( + loaded_tp1["attention.dense.bias"] + loaded_tp2["attention.dense.bias"] + ) + + # Layer Norms + merged["input_layernorm.weight"] = ( + loaded_tp1["input_layernorm.weight"] + loaded_tp2["input_layernorm.weight"] + ) / 2 + merged["input_layernorm.bias"] = ( + loaded_tp1["input_layernorm.bias"] + loaded_tp2["input_layernorm.bias"] + ) / 2 + merged["post_attention_layernorm.weight"] = ( + loaded_tp1["post_attention_layernorm.weight"] + + loaded_tp2["post_attention_layernorm.weight"] + ) / 2 + merged["post_attention_layernorm.bias"] = ( + loaded_tp1["post_attention_layernorm.bias"] + + loaded_tp2["post_attention_layernorm.bias"] + ) / 2 + + # ColumnParallelLinear + merged["mlp.dense_h_to_4h.weight"] = torch.cat( + [ + loaded_tp1["mlp.dense_h_to_4h.weight"], + loaded_tp2["mlp.dense_h_to_4h.weight"], + ], + dim=0, + ) + merged["mlp.dense_h_to_4h.bias"] = torch.cat( + [ + loaded_tp1["mlp.dense_h_to_4h.bias"], + loaded_tp2["mlp.dense_h_to_4h.bias"], + ], + dim=0, + ) + merged["attention.query_key_value.weight"] = torch.cat( + [ + loaded_tp1["attention.query_key_value.weight"], + loaded_tp2["attention.query_key_value.weight"], + ], + dim=0, + ) + merged["attention.query_key_value.bias"] = torch.cat( + [ + loaded_tp1["attention.query_key_value.bias"], + loaded_tp2["attention.query_key_value.bias"], + ], + dim=0, + ) + + # Just take one + merged["attention.rotary_emb.inv_freq"] = loaded_tp1[ + "attention.rotary_emb.inv_freq" + ] + + torch.save(merged, os.path.join(output_checkpoint_path, filename_tp1)) + del loaded_tp1 + del loaded_tp2 + pbar.update(1) + + # Load input embedding + pbar.set_description(f"Merging input embedding") + loaded_tp1 = torch.load( + os.path.join(input_checkpoint_path, "layer_00-model_00-model_states.pt") + ) + loaded_tp2 = torch.load( + os.path.join(input_checkpoint_path, "layer_00-model_01-model_states.pt") + ) + merged = { + "word_embeddings.weight": torch.cat( + [ + loaded_tp1["word_embeddings.weight"], + loaded_tp2["word_embeddings.weight"], + ], + dim=0, + ) + } + torch.save( + merged, + os.path.join(output_checkpoint_path, "layer_00-model_00-model_states.pt"), + ) + del loaded_tp1 + del loaded_tp2 + pbar.update(1) + + # Load final layer norm + pbar.set_description(f"Merging final layer norm") + loaded_tp1 = torch.load( + os.path.join(input_checkpoint_path, "layer_47-model_00-model_states.pt") + ) + loaded_tp2 = torch.load( + os.path.join(input_checkpoint_path, "layer_47-model_01-model_states.pt") + ) + merged = { + "norm.weight": (loaded_tp1["norm.weight"] + loaded_tp2["norm.weight"]) / 2, + "norm.bias": (loaded_tp1["norm.bias"] + loaded_tp2["norm.bias"]) / 2, + } + torch.save( + merged, + os.path.join(output_checkpoint_path, "layer_47-model_00-model_states.pt"), + ) + del loaded_tp1 + del loaded_tp2 + pbar.update(1) + + # Load output embedding + pbar.set_description(f"Merging output embedding") + loaded_tp1 = torch.load( + os.path.join(input_checkpoint_path, "layer_48-model_00-model_states.pt") + ) + loaded_tp2 = torch.load( + os.path.join(input_checkpoint_path, "layer_48-model_01-model_states.pt") + ) + merged = { + "final_linear.weight": torch.cat( + [ + loaded_tp1["final_linear.weight"], + loaded_tp2["final_linear.weight"], + ], + dim=0, + ), + } + torch.save( + merged, + os.path.join(output_checkpoint_path, "layer_48-model_00-model_states.pt"), + ) + del loaded_tp1 + del loaded_tp2 + pbar.update(1) + pbar.set_description("Done.") + + +def merge(input_dir, output_dir): + input_checkpoint_path = os.path.join(input_dir, "global_step150000") + output_checkpoint_path = os.path.join(output_dir, "global_step150000") + os.makedirs(output_checkpoint_path, exist_ok=True) + os.makedirs(os.path.join(output_dir, "configs"), exist_ok=True) + for i in range(8): + modify_model_states( + input_model_state_path=os.path.join( + input_checkpoint_path, f"mp_rank_{i:02d}_model_states.pt" + ), + output_model_state_path=os.path.join( + output_checkpoint_path, f"mp_rank_{i:02d}_model_states.pt" + ), + ) + modify_config( + input_config_path=os.path.join(input_dir, "configs", "20B.yml"), + output_config_path=os.path.join(output_dir, "configs", "20B.yml"), + output_dir=output_dir, + ) + merge_model_weights( + input_checkpoint_path=input_checkpoint_path, + output_checkpoint_path=output_checkpoint_path, + ) + shutil.copyfile( + os.path.join(input_dir, "20B_tokenizer.json"), + os.path.join(output_dir, "20B_tokenizer.json"), + ) + with open(os.path.join(output_dir, "latest"), "w") as f: + f.write("global_step150000") + + +def main(): + parser = argparse.ArgumentParser(description="Merge 20B checkpoint.") + parser.add_argument( + "--input_dir", + type=str, + help='Checkpoint dir, which should contain (e.g. a folder named "global_step150000")', + ) + parser.add_argument( + "--output_dir", type=str, help="Output dir, to save the 1-GPU weights configs" + ) + args = parser.parse_args() + merge(args.input_dir, args.output_dir) + + +if __name__ == "__main__": + main() diff --git a/tools/ckpts/upload.py b/tools/ckpts/upload.py new file mode 100644 index 0000000000000000000000000000000000000000..01d585be86e99fddf1a0ea84f02c8bd06bd6a0a9 --- /dev/null +++ b/tools/ckpts/upload.py @@ -0,0 +1,52 @@ +# Copyright (c) 2024, EleutherAI +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +from huggingface_hub import HfApi, create_repo + +converted_ckpt = sys.argv[1] +repo_name = sys.argv[2] +branch_name = sys.argv[3] +try: + create_repo(repo_name, repo_type="model", private=False) +except: + print("repo {repo_name} already exists!") + pass + +files = os.listdir(converted_ckpt) + +api = HfApi() +if branch_name != "main": + try: + api.create_branch( + repo_id=repo_name, + repo_type="model", + branch=branch_name, + ) + except: + print(f"branch {branch_name} already exists, try again...") +print(f"to upload: {files}") +for file in files: + print(f"Uploading {file} to branch {branch_name}...") + api.upload_file( + path_or_fileobj=os.path.join(converted_ckpt, file), + path_in_repo=file, + repo_id=repo_name, + repo_type="model", + commit_message=f"Upload {file}", + revision=branch_name, + ) + print(f"Successfully uploaded {file} !") diff --git a/tools/datasets/README.md b/tools/datasets/README.md new file mode 100644 index 0000000000000000000000000000000000000000..af3009a23907bbee358dcd9e62ffe26fda0dc6f0 --- /dev/null +++ b/tools/datasets/README.md @@ -0,0 +1,169 @@ +# Data Scripts + +## `preprocess_data.py` +Takes a raw dataset, splits it up, tokenizes it, and saves it as numpy files that can be memmapped and used efficiently by the training code. + +``` +usage: preprocess_data.py [-h] --input INPUT [--jsonl-keys JSONL_KEYS [JSONL_KEYS ...]] [--num-docs NUM_DOCS] + --tokenizer-type + {HFGPT2Tokenizer,HFTokenizer,GPT2BPETokenizer,CharLevelTokenizer,TiktokenTokenizer,SPMTokenizer} + [--vocab-file VOCAB_FILE] [--merge-file MERGE_FILE] [--append-eod] [--ftfy] --output-prefix + OUTPUT_PREFIX [--dataset-impl {lazy,cached,mmap}] [--workers WORKERS] + [--log-interval LOG_INTERVAL] + +options: + -h, --help show this help message and exit + +input data: + --input INPUT Path to input jsonl files or lmd archive(s) - if using multiple archives, put them in a comma + separated list + --jsonl-keys JSONL_KEYS [JSONL_KEYS ...] + space separate listed of keys to extract from jsonl. Default: text + --num-docs NUM_DOCS Optional: Number of documents in the input data (if known) for an accurate progress bar. + +tokenizer: + --tokenizer-type {HFGPT2Tokenizer,HFTokenizer,GPT2BPETokenizer,CharLevelTokenizer,TiktokenTokenizer,SPMTokenizer} + What type of tokenizer to use. + --vocab-file VOCAB_FILE + Path to the vocab file + --merge-file MERGE_FILE + Path to the BPE merge file (if necessary). + --append-eod Append an token to the end of a document. + --ftfy Use ftfy to clean text + +output data: + --output-prefix OUTPUT_PREFIX + Path to binary output file without suffix + --dataset-impl {lazy,cached,mmap} + Dataset implementation to use. Default: mmap + +runtime: + --workers WORKERS Number of worker processes to launch + --log-interval LOG_INTERVAL + Interval between progress updates +``` +## `preprocess_data_with_mask.py` +Does the same but also creates `label` tensors if the dataset has labels. + +N.B. If using this, you **must** specify your data when training/finetuning with the following configs +```json +"train_data_paths": ["train_documents"], +"test_data_paths": ["test_documents"], +"valid_data_paths": ["test_documents"], +"label_data_paths": ["label_documents"] +``` + +the `"data_path"` option will not work with `"label_data_paths"`. + + +``` +usage: preprocess_data_with_mask.py [-h] --input INPUT [--jsonl-keys JSONL_KEYS [JSONL_KEYS ...]] + [--mask-before-token MASK_BEFORE_TOKEN] [--num-docs NUM_DOCS] --tokenizer-type + {HFGPT2Tokenizer,HFTokenizer,GPT2BPETokenizer,CharLevelTokenizer} + [--vocab-file VOCAB_FILE] [--merge-file MERGE_FILE] [--append-eod] [--ftfy] + --output-prefix OUTPUT_PREFIX [--dataset-impl {lazy,cached,mmap}] + [--workers WORKERS] [--log-interval LOG_INTERVAL] + +options: + -h, --help show this help message and exit + +input data: + --input INPUT Path to input jsonl files or lmd archive(s) - if using multiple archives, put them in a comma + separated list + --jsonl-keys JSONL_KEYS [JSONL_KEYS ...] + space separate listed of keys to extract from jsonl. Defa + --mask-before-token MASK_BEFORE_TOKEN + apply loss masks before certain token(s). If multi-token pattern, separate by commas without + space, e.g. --mask-before-token 0,1,1270 to use the token pattern [0,1,1270]. + --num-docs NUM_DOCS Optional: Number of documents in the input data (if known) for an accurate progress bar. + +tokenizer: + --tokenizer-type {HFGPT2Tokenizer,HFTokenizer,GPT2BPETokenizer,CharLevelTokenizer} + What type of tokenizer to use. + --vocab-file VOCAB_FILE + Path to the vocab file + --merge-file MERGE_FILE + Path to the BPE merge file (if necessary). + --append-eod Append an token to the end of a document. + --ftfy Use ftfy to clean text + +output data: + --output-prefix OUTPUT_PREFIX + Path to binary output file without suffix + --dataset-impl {lazy,cached,mmap} + Dataset implementation to use. Default: mmap + +runtime: + --workers WORKERS Number of worker processes to launch + --log-interval LOG_INTERVAL + Interval between progress updates +``` +## `preprocess_data_with_chat_template.py` +Similar, but uses huggingface's [chat templates](https://huggingface.co/docs/transformers/main/en/chat_templating) to +tokenize the data to support multiturn and more complicated use cases. + +N.B. If using this, you **must** specify your data when training/finetuning with the following configs +```json +"train_data_paths": ["train_documents"], +"test_data_paths": ["test_documents"], +"valid_data_paths": ["test_documents"], +"label_data_paths": ["label_documents"] +``` + +the `"data_path"` option will not work with `"label_data_paths"`. + + +``` +usage: preprocess_data_with_chat_template.py [-h] --input INPUT [--jsonl-keys JSONL_KEYS [JSONL_KEYS ...]] [--no-mask] + [--generation-role GENERATION_ROLE] [--only-last] [--num-docs NUM_DOCS] + --tokenizer-path TOKENIZER_PATH [--ftfy] --output-prefix OUTPUT_PREFIX + [--dataset-impl {lazy,cached,mmap}] [--workers WORKERS] + [--log-interval LOG_INTERVAL] + +options: + -h, --help show this help message and exit + +input data: + --input INPUT Path to input jsonl files or lmd archive(s) - if using multiple archives, put them in a comma separated list + --jsonl-keys JSONL_KEYS [JSONL_KEYS ...] + space separate listed of keys to extract from jsonl. Default: text + --no-mask If set, this will not mask any tokens in the input data. + --generation-role GENERATION_ROLE + The role of the model generating the chat, usually 'assistant'. Default: assistant + --only-last If set, this will mask everything except the last turn in the chat. + --num-docs NUM_DOCS Optional: Number of documents in the input data (if known) for an accurate progress bar. + +tokenizer: + --tokenizer-path TOKENIZER_PATH + Path to HF Tokenizer. + --ftfy Use ftfy to clean text + +output data: + --output-prefix OUTPUT_PREFIX + Path to binary output file without suffix + --dataset-impl {lazy,cached,mmap} + Dataset implementation to use. Default: mmap + +runtime: + --workers WORKERS Number of worker processes to launch + --log-interval LOG_INTERVAL + Interval between progress updates +``` +## `multinode_prepare_data.sh` +Does the same but distributed over multiple nodes. + +``` +# USAGE: +# This script allows you to prepare your dataset using multiple nodes by chunking the individual files and distributed the chunks +# over the processes. +# This bash script takes a single text file as input argument. +# The text file contains a valid filepath in each line, leading to a jsonl-file. +# Furthermore an environment variable for the rank and the world size needs to be set. +# These default to the SLURM and OMPI variables in this order of priority, but they can be set manually as well +# using the variables $RANK and $WORLD_SIZE, which will overwrite the cluster-specific variables. +# You can also add all arguments of the prepare_data.py script to this script and it will simply pass them through. +``` + + +## `corpora.py` +Has information for common datasets. Primarily meant for use in top-level `prepare_data.py` script. diff --git a/tools/datasets/corpora.py b/tools/datasets/corpora.py new file mode 100644 index 0000000000000000000000000000000000000000..2c440dc0a0af309eb356e144290a70fd22cc11c2 --- /dev/null +++ b/tools/datasets/corpora.py @@ -0,0 +1,364 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os +from abc import ABC, abstractmethod +from multiprocessing import cpu_count + +""" +This registry is for automatically downloading and extracting datasets. + +To register a class you need to inherit the DataDownloader class, and provide name and url attributes, and (optionally) +the number of documents. + +When done, add it to the DATA_DOWNLOADERS dict. The function process_data runs the pre-processing for the selected +dataset. +""" + +GPT2_VOCAB_URL = "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json" +GPT2_MERGE_URL = "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt" + + +class DataDownloader(ABC): + """Dataset registry class to automatically download / extract datasets""" + + def __init__( + self, + tokenizer_type=None, + merge_file=None, + vocab_file=None, + data_dir=None, + force_redownload=None, + num_workers=None, + ): + if tokenizer_type is None: + tokenizer_type = "GPT2BPETokenizer" + if data_dir is None: + data_dir = os.environ.get("DATA_DIR", "./data") + if merge_file is None: + merge_file = f"{data_dir}/gpt2-merges.txt" + if force_redownload is None: + force_redownload = False + if vocab_file is None: + if tokenizer_type == "GPT2BPETokenizer": + vocab_file = f"{data_dir}/gpt2-vocab.json" + elif tokenizer_type == "HFGPT2Tokenizer": + vocab_file = "gpt2" + elif tokenizer_type == "CharLevelTokenizer": + pass + else: + assert vocab_file is not None, "No vocab file provided" + if num_workers is None: + num_workers = cpu_count() + self._tokenizer_type = tokenizer_type + self._merge_file = merge_file + self._vocab_file = vocab_file + self._data_dir = data_dir + self._force_redownload = force_redownload + self._num_workers = num_workers + + @property + def base_dir(self): + """base data directory""" + return self._data_dir + + @property + @abstractmethod + def name(self): + """name of dataset""" + pass + + @property + @abstractmethod + def urls(self): + """URLs from which to download dataset""" + pass + + @property + def tokenizer_type(self): + """tokenizer type to use when tokenizing data""" + return self._tokenizer_type + + @property + def merge_file(self): + """Merge file for tokenizer""" + return self._merge_file + + @property + def vocab_file(self): + """Vocab file for tokenizer""" + return self._vocab_file + + @property + def num_workers(self): + """Number of workers to use in preprocessing""" + return self._num_workers + + @property + def num_docs(self): + """Number of documents in the dataset (if known)""" + return None + + @property + def ftfy(self): + """Use ftfy (https://github.com/LuminosoInsight/python-ftfy) to fix text encodings""" + return False + + def exists(self): + """Checks if the dataset is present""" + return os.path.isdir(f"{self.base_dir}/{self.name}") + + def download(self): + """downloads dataset""" + os.makedirs(os.path.join(self.base_dir, self.name), exist_ok=True) + for url in self.urls: + try: + os_cmd = f"wget {url} -O {os.path.join(self.base_dir, self.name, os.path.basename(url))}" + if os.system(os_cmd) != 0: + raise Exception( + f"Cannot download file at URL {url}: server may be down" + ) + except Exception as e: + raise Exception(f"Download error: {e}") + + def tokenize(self): + """tokenizes dataset""" + parent_folder = os.path.join(self.base_dir, self.name) + jsonl_filepath = ",".join( + [os.path.join(parent_folder, os.path.basename(url)) for url in self.urls] + ) + + cmd = f"python tools/datasets/preprocess_data.py \ + --input {jsonl_filepath} \ + --output-prefix {parent_folder}/{self.name} \ + --vocab {self.vocab_file} \ + --dataset-impl mmap \ + --tokenizer-type {self.tokenizer_type} \ + --merge-file {self.merge_file} \ + --append-eod \ + --workers {self.num_workers} " + + if self.num_docs is not None: + cmd += f"--num-docs {self.num_docs} " + + if self.ftfy: + cmd += f"--ftfy " + + os.system(cmd) + + def prepare(self): + if self._force_redownload: + self.download() + else: + if not self.exists(): + self.download() + + self.tokenize() + + +class Enron(DataDownloader): + name = "enron" + urls = ["http://eaidata.bmk.sh/data/enron_emails.jsonl.zst"] + num_docs = 517401 + + +class PileSubset(DataDownloader): + name = "pile_00" + urls = ["https://the-eye.eu/public/AI/pile/train/00.jsonl.zst"] + + +class Pile(DataDownloader): + name = "pile" + urls = [ + f"https://the-eye.eu/public/AI/pile/train/{i:02}.jsonl.zst" for i in range(30) + ] + + +class Github(DataDownloader): + name = "github" + urls = ["http://eaidata.bmk.sh/data/github_small.jsonl.zst"] + + +class ArXiv(DataDownloader): + name = "arxiv" + urls = [ + "https://the-eye.eu/public/AI/pile_preliminary_components/2020-09-08-arxiv-extracts-nofallback-until-2007-068.tar.gz" + ] + + +class EuroParl(DataDownloader): + name = "europarl" + urls = [ + "https://the-eye.eu/public/AI/pile_preliminary_components/EuroParliamentProceedings_1996_2011.jsonl.zst" + ] + + +class FreeLaw(DataDownloader): + name = "freelaw" + urls = [ + "https://the-eye.eu/public/AI/pile_preliminary_components/FreeLaw_Opinions.jsonl.zst" + ] + + +class NiH(DataDownloader): + name = "nih" + urls = [ + "https://the-eye.eu/public/AI/pile_preliminary_components/NIH_ExPORTER_awarded_grant_text.jsonl.zst" + ] + + +class PubMed(DataDownloader): + name = "pubmed" + urls = [ + "https://the-eye.eu/public/AI/pile_preliminary_components/PMC_extracts.tar.gz" + ] + + +class Books1(DataDownloader): + name = "books1" + urls = ["https://the-eye.eu/public/AI/pile_preliminary_components/books1.tar.gz"] + + +class Books3(DataDownloader): + name = "books3" + urls = ["https://the-eye.eu/public/AI/pile_preliminary_components/books3.tar.gz"] + + +class HackerNews(DataDownloader): + name = "hackernews" + urls = ["https://the-eye.eu/public/AI/pile_preliminary_components/hn.tar.gz"] + num_docs = 373000 + + +class OpenWebText2(DataDownloader): + name = "openwebtext2" + urls = [ + "https://huggingface.co/datasets/segyges/OpenWebText2/resolve/main/openwebtext2.jsonl.zst.tar" + ] + num_docs = 17103000 + + +class StackExchange(DataDownloader): + name = "stackexchange" + urls = [ + "https://the-eye.eu/public/AI/pile_preliminary_components/stackexchange_dataset.tar" + ] + + +class UbuntuIRC(DataDownloader): + name = "ubuntu_irc" + urls = [ + "https://the-eye.eu/public/AI/pile_preliminary_components/ubuntu_irc_until_2020_9_1.jsonl.zst" + ] + + +class YoutubeSubtitles(DataDownloader): + name = "youtube_subtitles" + urls = [ + "https://the-eye.eu/public/AI/pile_preliminary_components/yt_subs.jsonl.zst" + ] + + +class C4(DataDownloader): + name = "c4" + urls = [ + f"https://the-eye.eu/eleuther_staging/c4/en/c4-train.{i:05}-of-01024.json.gz" + for i in range(1024) + ] + + +class C4OpenWebText(DataDownloader): + name = "c4_openwebtext" + urls = [ + f"https://the-eye.eu/eleuther_staging/c4/realnewslike/c4-train.{i:05}-of-00512.json.gz" + for i in range(512) + ] + + +class Enwik8(DataDownloader): + name = "enwik8" + urls = ["http://mattmahoney.net/dc/enwik8.zip"] + + +def maybe_download_gpt2_tokenizer_data(tokenizer_type, data_dir): + if tokenizer_type is None or tokenizer_type == "GPT2BPETokenizer": + GPT2_VOCAB_FP = f"{data_dir}//gpt2-vocab.json" + GPT2_MERGE_FP = f"{data_dir}/gpt2-merges.txt" + if not os.path.isfile(GPT2_VOCAB_FP): + os.system(f"wget {GPT2_VOCAB_URL} -O {GPT2_VOCAB_FP}") + if not os.path.isfile(GPT2_MERGE_FP): + os.system(f"wget {GPT2_MERGE_URL} -O {GPT2_MERGE_FP}") + + +DATA_DOWNLOADERS = { + "pass": "pass", + "enron": Enron, + "pile_subset": PileSubset, + "pile": Pile, + "github": Github, + "arxiv": ArXiv, + "europarl": EuroParl, + "freelaw": FreeLaw, + "nih": NiH, + "pubmed": PubMed, + "books1": Books1, + "books3": Books3, + "hackernews": HackerNews, + "openwebtext2": OpenWebText2, + "stackexchange": StackExchange, + "ubuntu_irc": UbuntuIRC, + "youtube_subtitles": YoutubeSubtitles, + "c4": C4, + "c4_openwebtext": C4OpenWebText, + "enwik8": Enwik8, +} + + +def prepare_dataset( + dataset_name: str, + tokenizer_type: str = None, + data_dir: str = None, + vocab_file: str = None, + merge_file: str = None, + force_redownload: bool = None, + num_workers: int = None, +): + """ + Downloads + tokenizes a dataset in the registry (dataset_name) and saves output .npy files to data_dir. + """ + if data_dir is None: + data_dir = os.environ.get("DATA_DIR", "./data") + os.makedirs(data_dir, exist_ok=True) + maybe_download_gpt2_tokenizer_data(tokenizer_type, data_dir) + DownloaderClass = DATA_DOWNLOADERS.get(dataset_name.lower(), None) + if DownloaderClass is None: + raise NotImplementedError( + f'Dataset "{dataset_name}" not recognized - please choose from {list(DATA_DOWNLOADERS.keys())}' + ) + elif DownloaderClass == "pass": + # pass on building dataset (for unit tests) + pass + else: + num_workers = 1 if dataset_name == "enwik8" else num_workers + d = DownloaderClass( + tokenizer_type=tokenizer_type, + vocab_file=vocab_file, + merge_file=merge_file, + data_dir=data_dir, + force_redownload=force_redownload, + num_workers=num_workers, + ) + d.prepare() diff --git a/tools/datasets/dataset_token_count.py b/tools/datasets/dataset_token_count.py new file mode 100644 index 0000000000000000000000000000000000000000..c9a4ff3bafde42d7d841e964ad717431edb088fd --- /dev/null +++ b/tools/datasets/dataset_token_count.py @@ -0,0 +1,30 @@ +# Script counts tokens in a pretokenized dataset from preprocess_data.py +# Necessary for setting batch size, train_iters, etc + +import sys +import os + +## Necessary for the import +project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")) +sys.path.insert(0, project_root) + +from megatron.data import indexed_dataset +import numpy as np + +if len(sys.argv) < 2: + print( + "Usage: python dataset_token_count.py /absolute/file/path/to/dataset1 /absolute/file/path/to/dataset2 ..." + ) + sys.exit(1) + +# Access the command-line arguments +arguments = sys.argv[1:] + +for arg in arguments: + print("Checking file", arg) + try: + dataset = indexed_dataset.make_dataset(arg, "mmap") + size = np.sum(dataset.sizes) + print("Dataset size in tokens is", size) + except AttributeError: + print("Dataset could not be loaded", arg) diff --git a/tools/datasets/merge_datasets.py b/tools/datasets/merge_datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..21567bd80ab8c39c13c64711ff73b9c2941b6fae --- /dev/null +++ b/tools/datasets/merge_datasets.py @@ -0,0 +1,86 @@ +import os +import sys +import json +import argparse + +sys.path.append( + os.path.abspath( + os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir) + ) +) + +from megatron.data import indexed_dataset + + +def main(args): + + prefixes = set() + for basename in os.listdir(args.input): + prefix, ext = os.path.splitext(basename) + + if prefix in prefixes: + continue + + if not os.path.isfile(os.path.join(args.input, basename)): + continue + + ext_pair = ".bin" if ext == ".idx" else ".idx" + assert os.path.isfile( + os.path.join(args.input, prefix) + ext_pair + ), f"ERROR: {ext_pair} file not provided for {os.path.join(args.input, prefix)}" + + prefixes.add(prefix) + + builder = None + for prefix in sorted(prefixes): + if builder is None: + dataset = indexed_dataset.make_dataset( + os.path.join(args.input, prefix), "infer" + ) + + if isinstance(dataset, indexed_dataset.MMapIndexedDataset): + builder = indexed_dataset.MMapIndexedDatasetBuilder( + args.output_prefix + ".bin", dtype=dataset._index.dtype + ) + else: + builder = indexed_dataset.IndexedDatasetBuilder( + args.output_prefix + ".bin" + ) + + del dataset + + builder.merge_file_(os.path.join(args.input, prefix)) + + builder.finalize(args.output_prefix + ".idx") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + group = parser.add_argument_group(title="input data") + group.add_argument( + "--input", + type=str, + required=True, + help="Path to directory containing all document files to merge", + ) + + group = parser.add_argument_group(title="output data") + group.add_argument( + "--output-prefix", + type=str, + required=True, + help="Path to binary output file without suffix", + ) + + args = parser.parse_args() + + assert os.path.isdir( + args.input + ), f"ERROR: {args.input} is not a directory or does not exist" + + assert os.path.isdir( + os.path.dirname(args.output_prefix) + ), f"ERROR: {os.path.dirname(args.output_prefix)} is not a directory or does not exist" + + main(args) diff --git a/tools/datasets/multinode_prepare_data.sh b/tools/datasets/multinode_prepare_data.sh new file mode 100644 index 0000000000000000000000000000000000000000..87cb8ef3188ea00b3040cf41146d928b8b5e7360 --- /dev/null +++ b/tools/datasets/multinode_prepare_data.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +# USAGE: +# This script allows you to prepare your dataset using multiple nodes by chunking the individual files and distributed the chunks +# over the processes. +# This bash script takes a single text file as input argument. +# The text file contains a valid filepath in each line, leading to a jsonl-file. +# Furthermore an environment variable for the rank and the world size needs to be set. +# These default to the SLURM and OMPI variables in this order of priority, but they can be set manually as well +# using the variables $RANK and $WORLD_SIZE, which will overwrite the cluster-specific variables. +# You can also add all arguments of the prepare_data.py script to this script and it will simply pass them through. + +# Parse command-line arguments +text_file="$1" +rank="${RANK:-${SLURM_PROCID:-$OMPI_COMM_WORLD_RANK}}" +world_size="${WORLD_SIZE:-${SLURM_NTASKS:-$OMPI_COMM_WORLD_SIZE}}" +num_lines=$(wc -l < "$text_file") +chunk_size=$((num_lines / world_size)) +start_line=$((rank * chunk_size + 1)) +end_line=$((start_line + chunk_size - 1)) + +# Make sure the last chunk includes all remaining lines +if [[ $rank == $((world_size - 1)) ]]; then + end_line=$num_lines +fi + +# Select the chunk of the text file that corresponds to the rank +chunk_file="chunk_${rank}.txt" +sed -n "${start_line},${end_line}p" "$text_file" > "$chunk_file" + +# Parse additional flags to be passed to the Python script +shift 1 # Shift past the first three arguments +py_args="" +prefix_arg="" +while [[ $# -gt 0 ]]; do + case "$1" in + --output-prefix=*) prefix_arg="$1"; shift;; + --output-prefix) prefix_arg="$1 $2"; shift 2;; + --*) py_args="$py_args $1 $2"; shift 2;; + *) echo "Unknown argument: $1"; exit 1;; + esac +done + +# Add the rank to the --output-prefix argument if it is set +if [[ -n "$prefix_arg" ]]; then + py_args="$py_args $prefix_arg$rank" +else + # Inject a default --output-prefix argument containing the rank + py_args="$py_args --output-prefix rank${rank}" +fi + + +echo "processing $chunk_file with rank $rank at world size $world_size" +echo "using the following args: $py_args" +# Call the Python script with the list of file paths in the chunk +python tools/datasets/preprocess_data.py --input $(tr '\n' ',' < "$chunk_file" | sed 's/,$/\n/') $py_args + +# Clean up +rm "$chunk_file" diff --git a/tools/datasets/preprocess_data.py b/tools/datasets/preprocess_data.py new file mode 100644 index 0000000000000000000000000000000000000000..d7765e504059d59a0e509d62808ba5dc4c3e3c83 --- /dev/null +++ b/tools/datasets/preprocess_data.py @@ -0,0 +1,246 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Processing data for pretraining.""" + +import argparse +import multiprocessing +import os +import sys + +import lm_dataformat as lmd +import numpy as np + +sys.path.append( + os.path.abspath( + os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir) + ) +) +import time +import tqdm +import torch +import ftfy + +from megatron.tokenizer import build_tokenizer +from megatron.data import indexed_dataset +from threading import Semaphore + + +class Encoder(object): + def __init__(self, args): + self.args = args + + def initializer(self): + # Use Encoder class as a container for global data + Encoder.tokenizer = build_tokenizer(self.args) + + def encode(self, text): + if self.args.ftfy: + text = ftfy.fix_text(text) + ids = {} + for key in self.args.jsonl_keys: + doc_ids = [] + text_ids = Encoder.tokenizer.tokenize(text) + if len(text_ids) > 0: + doc_ids.append(text_ids) + if self.args.append_eod: + doc_ids[-1].append(Encoder.tokenizer.eod) + ids[key] = doc_ids + return ids, len(text) + + +def get_args(input_args=None): + parser = argparse.ArgumentParser() + group = parser.add_argument_group(title="input data") + group.add_argument( + "--input", + type=str, + required=True, + help="Path to input jsonl files or lmd archive(s) - if using multiple archives, put them in a comma separated " + "list", + ) + group.add_argument( + "--jsonl-keys", + nargs="+", + default=["text"], + help="space separate listed of keys to extract from jsonl. Default: text", + ) + group.add_argument( + "--num-docs", + default=None, + help="Optional: Number of documents in the input data (if known) for an accurate progress bar.", + type=int, + ) + group = parser.add_argument_group(title="tokenizer") + group.add_argument( + "--tokenizer-type", + type=str, + required=True, + choices=[ + "HFGPT2Tokenizer", + "HFTokenizer", + "GPT2BPETokenizer", + "CharLevelTokenizer", + "TiktokenTokenizer", + "SPMTokenizer", + ], + help="What type of tokenizer to use.", + ) + group.add_argument( + "--vocab-file", type=str, default=None, help="Path to the vocab file" + ) + group.add_argument( + "--merge-file", + type=str, + default=None, + help="Path to the BPE merge file (if necessary).", + ) + group.add_argument( + "--append-eod", + action="store_true", + help="Append an token to the end of a document.", + ) + group.add_argument("--ftfy", action="store_true", help="Use ftfy to clean text") + group = parser.add_argument_group(title="output data") + group.add_argument( + "--output-prefix", + type=str, + required=True, + help="Path to binary output file without suffix", + ) + group.add_argument( + "--dataset-impl", + type=str, + default="mmap", + choices=["lazy", "cached", "mmap"], + help="Dataset implementation to use. Default: mmap", + ) + + group = parser.add_argument_group(title="runtime") + group.add_argument( + "--workers", type=int, default=1, help="Number of worker processes to launch" + ) + group.add_argument( + "--log-interval", + type=int, + default=100, + help="Interval between progress updates", + ) + args = parser.parse_args(input_args) + args.keep_empty = False + + # some default/dummy values for the tokenizer + args.rank = 0 + args.make_vocab_size_divisible_by = 128 + args.model_parallel_size = 1 + + return args + + +def yield_from_files(fnames: list, semaphore): + """ + Iterator over input documents using lm_dataformat. Should be able to handle jsons / texts / + other compressed formats. Also filters out empty documents. + + :param fnames: list of filenames + """ + + def yielder(fname, semaphore): + for f in filter(lambda x: x, lmd.Reader(fname).stream_data()): + semaphore.acquire() + yield f + + for fname in fnames: + semaphore.acquire() + + yield from yielder(fname, semaphore) + + +def main(input_args=None): + args = get_args(input_args) + encoder = Encoder(args) + tokenizer = build_tokenizer(args) + print(f"Vocab size: {tokenizer.vocab_size}") + print(f"Output prefix: {args.output_prefix}") + + # build a semaphore object to stop `yield_from_files` from getting ahead of encoder.encode and + # hence building up memory + semaphore = Semaphore(10000 + args.workers) + + # use multiprocessing to iterate over input documents + fin = yield_from_files(args.input.split(","), semaphore) + + if args.workers > 1: + pool = multiprocessing.Pool(args.workers, initializer=encoder.initializer) + encoded_docs = pool.imap(encoder.encode, fin, chunksize=25) + else: + encoder.initializer() + encoded_docs = (encoder.encode(doc) for doc in fin) + + # make a dataset builder for each key in args.jsonl_keys + # each key will output to a different file beginning with args.output_prefix + output_bin_files = {} + output_idx_files = {} + builders = {} + for key in args.jsonl_keys: + output_bin_files[key] = "{}_{}_{}.bin".format( + args.output_prefix, key, "document" + ) + output_idx_files[key] = "{}_{}_{}.idx".format( + args.output_prefix, key, "document" + ) + builders[key] = indexed_dataset.make_builder( + output_bin_files[key], + impl=args.dataset_impl, + vocab_size=tokenizer.vocab_size, + ) + + # actually do tokenization + proc_start = time.time() + total_bytes_processed = 0 + pbar = tqdm.tqdm() + for i, (doc, bytes_processed) in enumerate(encoded_docs, start=1): + total_bytes_processed += bytes_processed + + # release semaphore so `yield_from_files` can add another file to the buffer + semaphore.release() + + # add each tokenized document / sentence + for key, sentences in doc.items(): + for sentence in sentences: + builders[key].add_item(np.array(sentence, dtype=builders[key].dtype)) + # separate with eos token + builders[key].end_document() + + # log progress + if i % args.log_interval == 0: + current = time.time() + elapsed = current - proc_start + mbs = total_bytes_processed / elapsed / 1024 / 1024 + pbar.set_description( + f"Processed {i}{'' if args.num_docs is None else '/' + str(args.num_docs)} documents ({i / elapsed :.2f} docs/s, {mbs:.2f} MB/s)." + ) + if i != 0: + pbar.update(args.log_interval) + + # save output file + for key in args.jsonl_keys: + builders[key].finalize(output_idx_files[key]) + + +if __name__ == "__main__": + main() diff --git a/tools/datasets/preprocess_data_with_chat_template.py b/tools/datasets/preprocess_data_with_chat_template.py new file mode 100644 index 0000000000000000000000000000000000000000..ee2b983b60311f7d03bdf85736be611358bf3049 --- /dev/null +++ b/tools/datasets/preprocess_data_with_chat_template.py @@ -0,0 +1,416 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A script for processing a dataset such that chat templates are utilized in the creation of the data. +These are then used to perform instruction/chat model finetunes (for example, finetuning a model on only the assistant +portions of a chatml dataset). + +This follows the same output format as 'preprocess_data_with_mask.py' but using chat templates to generate the data. +This way we can support multiturn chat data in the finetuning process. instead of relying on a single turn of data. + +To run this script, first edit `tools/datasets/corpora.py` such that the command to call + `tools/datasets/preprocess_data_with_chat_template.py` is as follows: + +``` +cmd = f"python tools/datasets/preprocess_data_with_with_chat_template.py \ + --input {jsonl_filepath} \ + --output-prefix {parent_folder}/{self.name} \ + --tokenizer-path {hf-tokenizer} \ + --jsonl-keys {jsonl_keys} \ + --dataset-impl mmap \ + --workers {self.num_workers} " + +if self.only_last: + cmd += f"--only-last " + +if self.no_mask: + cmd += f"--no-mask " +``` + +Then, specify +``` +"train_data_paths": ["/path/to/dataset/name_text_document"], +"label_data_paths": ["/path/to/dataset/name_label_document"] +``` +in your YML config. This will then allow for finetuning on the data with loss masks set appropriately. + +""" + +import argparse +import multiprocessing +import os +import sys + +import lm_dataformat as lmd +import numpy as np + +sys.path.append( + os.path.abspath( + os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir) + ) +) + +import time +import tqdm +import jsonlines + +from megatron.data import indexed_dataset +from threading import Semaphore +from typing import List, Dict, Tuple +from transformers import AutoTokenizer, PreTrainedTokenizer + + +def build_chat( + chat: List[Dict[str, str]], + generation_role: str, + apply_mask: bool, + tokenizer: PreTrainedTokenizer, + only_last_turn: bool = False, + for_rm: bool = False, +) -> Tuple[List[int], List[int]]: + """ + Build a chat from a list of dictionaries. Each dictionary should have a "role" and "content" key, this follows the + Chat Template from https://huggingface.co/docs/transformers/main/en/chat_templating + + :param chat: A list of dictionaries with "role" and "content" keys + :param generation_role: The role of the model generating the chat, usually "assistant" + :param apply_mask: Whether to apply a loss mask to the chat, if False, all tokens will be included in the loss + :param tokenizer: A HF tokenizer + :param only_last_turn: Whether to only include the last turn in the chat, needed for some fine-tuning tasks + """ + tokens = [] + mask = [] + if apply_mask is False: + tokens = tokenizer.apply_chat_template(chat) + mask = tokens + return tokens, mask + elif for_rm: + tokens = tokenizer.apply_chat_template(chat) + mask = [-100] * len(tokens) + if tokenizer.eos_token_id is not None: + # since this is processed in a causal format (input[:-1], mask[1:], we need to put two here... + mask.append(-100) + tokens.append(tokenizer.eos_token_id) + mask.append(tokenizer.eos_token_id) + tokens.append(tokenizer.eos_token_id) + else: + raise ValueError( + "Tokenizer does not have an EOS token, unable to determine good mask, please edit and make your own." + ) + return tokens, mask + for i, turn in enumerate(chat): + add_gen = ( + False if i == len(chat) - 1 else chat[i + 1]["role"] == generation_role + ) + chat_tokens = tokenizer.apply_chat_template( + chat[: i + 1], add_generation_prompt=add_gen + )[len(tokens) :] + # remove previous stuff... + tokens.extend(chat_tokens) + if only_last_turn and (i != len(chat) - 1): + mask.extend([-100] * len(chat_tokens)) + elif apply_mask and (turn["role"] != generation_role): + mask.extend([-100] * len(chat_tokens)) + else: + mask.extend(chat_tokens) + if tokenizer.eos_token_id is not None: + mask.append(tokenizer.eos_token_id if mask[-1] != -100 else -100) + tokens.append(tokenizer.eos_token_id) + return tokens, mask + + +class Encoder(object): + def __init__(self, args): + self.args = args + + def initializer(self): + # Use Encoder class as a container for global data + Encoder.tokenizer = AutoTokenizer.from_pretrained(self.args.tokenizer_path) + + def encode(self, text): + ids = {} + for key in self.args.jsonl_keys: + text_ids, label_ids = build_chat( + text[key], + self.args.generation_role, + not self.args.no_mask, + Encoder.tokenizer, + self.args.only_last, + self.args.for_rm, + ) + if self.args.reward_key is not None: + reward = text[self.args.reward_key] + if self.args.binary_reward: + reward = [1] if reward else [-1] + elif type(reward) == float: + reward = [reward] + ids[key] = (text_ids, label_ids, reward) + else: + ids[key] = (text_ids, label_ids, None) + return ids, len(text) + + +def get_args(): + parser = argparse.ArgumentParser() + group = parser.add_argument_group(title="input data") + group.add_argument( + "--input", + type=str, + required=True, + help="Path to input jsonl files or lmd archive(s) - if using multiple archives, put them in a comma separated " + "list", + ) + group.add_argument( + "--jsonl-keys", + nargs="+", + default=["conversation"], + help="space separate listed of keys to extract from jsonl. Default: text", + ) + group.add_argument( + "--no-mask", + help="If set, this will not mask any tokens in the input data.", + action="store_true", + ) + group.add_argument( + "--for-rm", + help="If set, this will mask everything except the last token in the chat.", + action="store_true", + ) + + group.add_argument( + "--generation-role", + type=str, + default="assistant", + help="The role of the model generating the chat, usually 'assistant'. Default: assistant", + ) + group.add_argument( + "--only-last", + help="If set, this will mask everything except the last turn in the chat.", + action="store_true", + ) + group.add_argument( + "--reward-key", + type=str, + default=None, + help="Optional: key to use for reward data in the input data.", + ) + group.add_argument( + "--binary-reward", + help="If set, this will treat the reward data as a boolean.", + action="store_true", + ) + group.add_argument( + "--num-docs", + default=None, + help="Optional: Number of documents in the input data (if known) for an accurate progress bar.", + type=int, + ) + group = parser.add_argument_group(title="tokenizer") + group.add_argument( + "--tokenizer-path", + type=str, + required=True, + help="Path to HF Tokenizer.", + ) + group.add_argument("--ftfy", action="store_true", help="Use ftfy to clean text") + group = parser.add_argument_group(title="output data") + group.add_argument( + "--output-prefix", + type=str, + required=True, + help="Path to binary output file without suffix", + ) + group.add_argument( + "--dataset-impl", + type=str, + default="mmap", + choices=["lazy", "cached", "mmap"], + help="Dataset implementation to use. Default: mmap", + ) + + group = parser.add_argument_group(title="runtime") + group.add_argument( + "--workers", type=int, default=1, help="Number of worker processes to launch" + ) + group.add_argument( + "--log-interval", + type=int, + default=100, + help="Interval between progress updates", + ) + args = parser.parse_args() + args.keep_empty = False + + # some default/dummy values for the tokenizer + args.rank = 0 + args.make_vocab_size_divisible_by = 128 + args.model_parallel_size = 1 + + return args + + +def yield_from_files(fnames: list, semaphore): + """ + Iterator over input documents using lm_dataformat. Should be able to handle jsons / texts / + other compressed formats. Also filters out empty documents. + + :param fnames: list of filenames + """ + + def yielder(fname, semaphore): + with open(fname, encoding="utf-8") as f: + reader = jsonlines.Reader(f) + for f in reader: + semaphore.acquire() + yield f + + for fname in fnames: + semaphore.acquire() + + yield from yielder(fname, semaphore) + + +def main(): + args = get_args() + encoder = Encoder(args) + tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_path) + print(f"Vocab size: {tokenizer.vocab_size}") + print(f"Output prefix: {args.output_prefix}") + + # build a semaphore object to stop `yield_from_files` from getting ahead of encoder.encode and + # hence building up memory + semaphore = Semaphore(10000 + args.workers) + + # use multiprocessing to iterate over input documents + fin = yield_from_files(args.input.split(","), semaphore) + + if args.workers > 1: + pool = multiprocessing.Pool(args.workers, initializer=encoder.initializer) + encoded_docs = pool.imap(encoder.encode, fin, chunksize=25) + else: + encoder.initializer() + encoded_docs = (encoder.encode(doc) for doc in fin) + + # make a dataset builder for each key in args.jsonl_keys + # each key will output to a different file beginning with args.output_prefix + output_bin_files = {} + output_idx_files = {} + builders = {} + for key in args.jsonl_keys: + output_bin_files[key] = "{}_{}_{}.bin".format( + args.output_prefix, key, "document" + ) + output_idx_files[key] = "{}_{}_{}.idx".format( + args.output_prefix, key, "document" + ) + builders[key] = indexed_dataset.make_builder( + output_bin_files[key], + impl=args.dataset_impl, + vocab_size=tokenizer.vocab_size, + ) + builders[key]._dtype = np.int32 + if not args.no_mask: + assert ( + key + "_label" not in args.jsonl_keys + ), "label should not be included as it will be generated according to the mask." + label_key = key + "_label" + output_bin_files[label_key] = "{}_{}_{}.bin".format( + args.output_prefix, label_key, "document" + ) + output_idx_files[label_key] = "{}_{}_{}.idx".format( + args.output_prefix, label_key, "document" + ) + builders[label_key] = indexed_dataset.make_builder( + output_bin_files[label_key], + impl=args.dataset_impl, + vocab_size=tokenizer.vocab_size, + ) + builders[label_key]._dtype = np.int32 + if args.reward_key is not None: + assert ( + key + "_reward" not in args.jsonl_keys + ), "reward should not be included as it will be generated from the data." + reward_key = key + "_reward" + output_bin_files[reward_key] = "{}_{}_{}.bin".format( + args.output_prefix, reward_key, "document" + ) + output_idx_files[reward_key] = "{}_{}_{}.idx".format( + args.output_prefix, reward_key, "document" + ) + builders[reward_key] = indexed_dataset.make_builder( + output_bin_files[reward_key], + impl=args.dataset_impl, + vocab_size=tokenizer.vocab_size, + ) + builders[reward_key]._dtype = np.int32 + + # actually do tokenization + proc_start = time.time() + total_bytes_processed = 0 + pbar = tqdm.tqdm() + for i, (doc, bytes_processed) in enumerate(encoded_docs, start=1): + total_bytes_processed += bytes_processed + + # release semaphore so `yield_from_files` can add another file to the buffer + semaphore.release() + + # add each tokenized document / sentence + for key, conv in doc.items(): + tokens = conv[0] + token_mask = conv[1] + reward = conv[2] + builders[key].add_item(np.array(tokens, dtype=builders[key].dtype)) + builders[key + "_label"].add_item( + np.array(token_mask, dtype=builders[key + "_label"].dtype) + ) + if args.reward_key is not None: + builders[key + "_reward"].add_item( + np.array(reward, dtype=builders[key + "_reward"].dtype) + ) + # add indx... + builders[key].end_document() + builders[key + "_label"].end_document() + if args.reward_key is not None: + builders[key + "_reward"].end_document() + if i == 1: + print("key: ", key) + print("tokens: ", tokens) + print("token_mask: ", token_mask) + print("Reward: ", reward) + # log progress + if i % args.log_interval == 0: + current = time.time() + elapsed = current - proc_start + mbs = total_bytes_processed / elapsed / 1024 / 1024 + pbar.set_description( + f"Processed {i}{'' if args.num_docs is None else '/' + str(args.num_docs)} documents ({i / elapsed} docs/s, {mbs} MB/s)." + ) + if i != 0: + pbar.update(args.log_interval) + + # save output file + update_keys = args.jsonl_keys + for key in update_keys: + builders[key].finalize(output_idx_files[key]) + builders[key + "_label"].finalize(output_idx_files[key + "_label"]) + if args.reward_key is not None: + builders[key + "_reward"].finalize(output_idx_files[key + "_reward"]) + + +if __name__ == "__main__": + main() diff --git a/tools/datasets/preprocess_data_with_mask.py b/tools/datasets/preprocess_data_with_mask.py new file mode 100644 index 0000000000000000000000000000000000000000..661092c03206cafca4253aa2e30c89ef4aa61d4c --- /dev/null +++ b/tools/datasets/preprocess_data_with_mask.py @@ -0,0 +1,386 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A script for processing a dataset such that corresponding labels are also produced. These are then used to perform masked finetuning +(for example, finetuning a model to only output the text following some delimiter in the finetuning dataset such as "Answer: " +rather than generating the entire "Question: ... Answer: " turns of conversation. + +To run this script, first edit `tools/datasets/corpora.py` such that the command to call `tools/datasets/preprocess_data.py` is as follows: + +``` +cmd = f"python tools/datasets/preprocess_data_with_mask.py \ + --input {jsonl_filepath} \ + --output-prefix {parent_folder}/{self.name} \ + --vocab {self.vocab_file} \ + --dataset-impl mmap \ + --tokenizer-type {self.tokenizer_type} \ + --merge-file {self.merge_file} \ + --append-eod \ + --mask-before-token X,Y,Z \ + --workers {self.num_workers} " + +if self.num_docs is not None: + cmd += f"--num-docs {self.num_docs} " + +if self.ftfy: + cmd += f"--ftfy " +``` +where --mask-before-token must be the (comma-separated) list of tokens produced by encoding your delimiter string. +Up to and including the first occurrence of this token sequence in a document, all tokens will have their loss mask zeroed out when the label dataset is provided to NeoX. + +Then, specify +``` +"train_data_paths": ["/path/to/dataset/name_text_document"], +"label_data_paths": ["/path/to/dataset/name_label_document"] +``` +in your YML config. This will then allow for finetuning on the data with loss masks set appropriately. +(However, be warned that NeoX packs documents to fill context windows, which may degrade performance in some finetuning situations where instead padding out to the context length may be preferred.) +""" + +import argparse +import multiprocessing +import os +import sys +import re + +import lm_dataformat as lmd +import numpy as np + +sys.path.append( + os.path.abspath( + os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir) + ) +) +import time +import tqdm +import torch +import ftfy + +from megatron.tokenizer import build_tokenizer +from megatron.data import indexed_dataset +from threading import Semaphore +from functools import lru_cache + + +@lru_cache(maxsize=None) +def build_nxt(pattern: tuple) -> tuple: + # The function is being cached. Use tuple to avoid the cache being tampered out of scope. + nxt = [0] + current = 1 + match_idx = 0 + + while current < len(pattern): + if pattern[match_idx] == pattern[current]: + current += 1 + match_idx += 1 + nxt.append(match_idx) + elif match_idx != 0: + match_idx = nxt[match_idx - 1] + else: + nxt.append(0) + current += 1 + + return tuple(nxt) + + +def kmp(seq, pattern, first_appearance=False): + """ + Search for the location of a subsequence in a list. Not sure if there is a python built-in + implementation of kmp somewhere... + """ + nxt = build_nxt(tuple(pattern)) + current = 0 + match_idx = 0 + + matched = [] + + while current < len(seq): + if seq[current] == pattern[match_idx]: + current += 1 + match_idx += 1 + elif match_idx != 0: + match_idx = nxt[match_idx - 1] + else: + current += 1 + + if match_idx == len(pattern): + matched.append(current - len(pattern)) + if first_appearance: + return matched + match_idx = nxt[match_idx - 1] + + return matched + + +class Encoder(object): + def __init__(self, args): + self.args = args + + def initializer(self): + # Use Encoder class as a container for global data + Encoder.tokenizer = build_tokenizer(self.args) + + def encode(self, text): + if self.args.ftfy: + text = ftfy.fix_text(text) + if isinstance(text, str): + text = {"text": text} + ids = {} + for key in self.args.jsonl_keys: + doc_ids = [] + text_ids = Encoder.tokenizer.tokenize(text["text"]) + if len(text_ids) > 0: + doc_ids.append(text_ids) + if self.args.append_eod: + doc_ids[-1].append(Encoder.tokenizer.eod) + ids[key] = doc_ids + return ids, len(text) + + +def get_args(): + parser = argparse.ArgumentParser() + group = parser.add_argument_group(title="input data") + group.add_argument( + "--input", + type=str, + required=True, + help="Path to input jsonl files or lmd archive(s) - if using multiple archives, put them in a comma separated " + "list", + ) + group.add_argument( + "--jsonl-keys", + nargs="+", + default=["text"], + help="space separate listed of keys to extract from jsonl. Default: text", + ) + group.add_argument( + "--mask-before-token", + default=None, + help="apply loss masks before certain token(s). If multi-token pattern, separate by commas without space, e.g. --mask-before-token 0,1,1270 to use the token pattern [0,1,1270].", + type=str, + ) + group.add_argument( + "--num-docs", + default=None, + help="Optional: Number of documents in the input data (if known) for an accurate progress bar.", + type=int, + ) + group = parser.add_argument_group(title="tokenizer") + group.add_argument( + "--tokenizer-type", + type=str, + required=True, + choices=[ + "HFGPT2Tokenizer", + "HFTokenizer", + "GPT2BPETokenizer", + "CharLevelTokenizer", + ], + help="What type of tokenizer to use.", + ) + group.add_argument( + "--vocab-file", type=str, default=None, help="Path to the vocab file" + ) + group.add_argument( + "--merge-file", + type=str, + default=None, + help="Path to the BPE merge file (if necessary).", + ) + group.add_argument( + "--append-eod", + action="store_true", + help="Append an token to the end of a document.", + ) + group.add_argument("--ftfy", action="store_true", help="Use ftfy to clean text") + group = parser.add_argument_group(title="output data") + group.add_argument( + "--output-prefix", + type=str, + required=True, + help="Path to binary output file without suffix", + ) + group.add_argument( + "--dataset-impl", + type=str, + default="mmap", + choices=["lazy", "cached", "mmap"], + help="Dataset implementation to use. Default: mmap", + ) + + group = parser.add_argument_group(title="runtime") + group.add_argument( + "--workers", type=int, default=1, help="Number of worker processes to launch" + ) + group.add_argument( + "--log-interval", + type=int, + default=100, + help="Interval between progress updates", + ) + args = parser.parse_args() + args.keep_empty = False + + # some default/dummy values for the tokenizer + args.rank = 0 + args.make_vocab_size_divisible_by = 128 + args.model_parallel_size = 1 + + return args + + +def yield_from_files(fnames: list, semaphore): + """ + Iterator over input documents using lm_dataformat. Should be able to handle jsons / texts / + other compressed formats. Also filters out empty documents. + + :param fnames: list of filenames + """ + + def yielder(fname, semaphore): + for f in filter(lambda x: x, lmd.Reader(fname).stream_data()): + semaphore.acquire() + yield f + + for fname in fnames: + semaphore.acquire() + + yield from yielder(fname, semaphore) + + +def mask(sentence: list, pivot_tokens: list, include_pivot=True): + inds = kmp(sentence, pivot_tokens) + if not inds: + return sentence + index = inds[0] + if include_pivot: + index += len(pivot_tokens) + + return [-100] * index + sentence[index:] + + +def main(): + args = get_args() + encoder = Encoder(args) + tokenizer = build_tokenizer(args) + print(f"Vocab size: {tokenizer.vocab_size}") + print(f"Output prefix: {args.output_prefix}") + + # build a semaphore object to stop `yield_from_files` from getting ahead of encoder.encode and + # hence building up memory + semaphore = Semaphore(10000 + args.workers) + + # use multiprocessing to iterate over input documents + fin = yield_from_files(args.input.split(","), semaphore) + + if args.workers > 1: + pool = multiprocessing.Pool(args.workers, initializer=encoder.initializer) + encoded_docs = pool.imap(encoder.encode, fin, chunksize=25) + else: + encoder.initializer() + encoded_docs = (encoder.encode(doc) for doc in fin) + + if args.mask_before_token is not None: + token_mask = [ + int(re.sub(r"[^0-9]", "", r)) + for r in args.mask_before_token.split(",") + if re.sub(r"[^0-9]", "", r) + ] + else: + token_mask = [] + + # make a dataset builder for each key in args.jsonl_keys + # each key will output to a different file beginning with args.output_prefix + output_bin_files = {} + output_idx_files = {} + builders = {} + for key in args.jsonl_keys: + output_bin_files[key] = "{}_{}_{}.bin".format( + args.output_prefix, key, "document" + ) + output_idx_files[key] = "{}_{}_{}.idx".format( + args.output_prefix, key, "document" + ) + builders[key] = indexed_dataset.make_builder( + output_bin_files[key], + impl=args.dataset_impl, + vocab_size=tokenizer.vocab_size, + ) + if token_mask: + assert ( + "label" not in args.jsonl_keys + ), "label should not be included as it will be generated according to the mask." + key = "label" + output_bin_files[key] = "{}_{}_{}.bin".format( + args.output_prefix, key, "document" + ) + output_idx_files[key] = "{}_{}_{}.idx".format( + args.output_prefix, key, "document" + ) + builders[key] = indexed_dataset.make_builder( + output_bin_files[key], + impl=args.dataset_impl, + vocab_size=tokenizer.vocab_size, + ) + int32_labels = ["text", "label"] + for l in int32_labels: + builders[l]._dtype = np.int32 + + # actually do tokenization + proc_start = time.time() + total_bytes_processed = 0 + pbar = tqdm.tqdm() + for i, (doc, bytes_processed) in enumerate(encoded_docs, start=1): + total_bytes_processed += bytes_processed + + # release semaphore so `yield_from_files` can add another file to the buffer + semaphore.release() + + # add each tokenized document / sentence + for key, sentences in doc.items(): + for sentence in sentences: + builders[key].add_item(np.array(sentence, dtype=builders[key].dtype)) + if token_mask: + masked_sentence = mask(sentence, token_mask) + builders["label"].add_item( + np.array(masked_sentence, dtype=builders["text"].dtype) + ) + # separate with eos token + builders[key].end_document() + if token_mask: + builders["label"].end_document() + + # log progress + if i % args.log_interval == 0: + current = time.time() + elapsed = current - proc_start + mbs = total_bytes_processed / elapsed / 1024 / 1024 + pbar.set_description( + f"Processed {i}{'' if args.num_docs is None else '/' + str(args.num_docs)} documents ({i / elapsed} docs/s, {mbs} MB/s)." + ) + if i != 0: + pbar.update(args.log_interval) + + # save output file + update_keys = args.jsonl_keys + ["label"] if token_mask else args.jsonl_keys + for key in update_keys: + builders[key].finalize(output_idx_files[key]) + + +if __name__ == "__main__": + main() diff --git a/train.py b/train.py new file mode 100644 index 0000000000000000000000000000000000000000..3e01a6306a58d529b5ea10ab1fcdcba17fb0e923 --- /dev/null +++ b/train.py @@ -0,0 +1,35 @@ +# Copyright (c) 2024, EleutherAI +# This file is based on code by the authors denoted below and has been modified from its original version. +# +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Train""" +from megatron.neox_arguments import NeoXArgs +from megatron.training import pretrain + + +def main(input_args=None, overwrite_values=None): + neox_args = NeoXArgs.consume_neox_args( + input_args=input_args, overwrite_values=overwrite_values + ) + neox_args.configure_distributed_args() + neox_args.build_tokenizer() # tokenizer needs to be build in training in order to set the padding vocab + neox_args.initialize_tensorboard_writer() # is initialized if tensorboard directory is defined + neox_args.initialize_comet() # is initialized if comet directory is defined + pretrain(neox_args=neox_args) + + +if __name__ == "__main__": + main()