text
stringlengths 4
288k
| id
stringlengths 17
110
| metadata
dict | __index_level_0__
int64 0
47
|
---|---|---|---|
.select2-container {
box-sizing: border-box;
display: inline-block;
margin: 0;
position: relative;
vertical-align: middle; }
.select2-container .select2-selection--single {
box-sizing: border-box;
cursor: pointer;
display: block;
height: 28px;
user-select: none;
-webkit-user-select: none; }
.select2-container .select2-selection--single .select2-selection__rendered {
display: block;
padding-left: 8px;
padding-right: 20px;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap; }
.select2-container .select2-selection--single .select2-selection__clear {
position: relative; }
.select2-container[dir="rtl"] .select2-selection--single .select2-selection__rendered {
padding-right: 8px;
padding-left: 20px; }
.select2-container .select2-selection--multiple {
box-sizing: border-box;
cursor: pointer;
display: block;
min-height: 32px;
user-select: none;
-webkit-user-select: none; }
.select2-container .select2-selection--multiple .select2-selection__rendered {
display: inline-block;
overflow: hidden;
padding-left: 8px;
text-overflow: ellipsis;
white-space: nowrap; }
.select2-container .select2-search--inline {
float: left; }
.select2-container .select2-search--inline .select2-search__field {
box-sizing: border-box;
border: none;
font-size: 100%;
margin-top: 5px;
padding: 0; }
.select2-container .select2-search--inline .select2-search__field::-webkit-search-cancel-button {
-webkit-appearance: none; }
.select2-dropdown {
background-color: white;
border: 1px solid #aaa;
border-radius: 4px;
box-sizing: border-box;
display: block;
position: absolute;
left: -100000px;
width: 100%;
z-index: 1051; }
.select2-results {
display: block; }
.select2-results__options {
list-style: none;
margin: 0;
padding: 0; }
.select2-results__option {
padding: 6px;
user-select: none;
-webkit-user-select: none; }
.select2-results__option[aria-selected] {
cursor: pointer; }
.select2-container--open .select2-dropdown {
left: 0; }
.select2-container--open .select2-dropdown--above {
border-bottom: none;
border-bottom-left-radius: 0;
border-bottom-right-radius: 0; }
.select2-container--open .select2-dropdown--below {
border-top: none;
border-top-left-radius: 0;
border-top-right-radius: 0; }
.select2-search--dropdown {
display: block;
padding: 4px; }
.select2-search--dropdown .select2-search__field {
padding: 4px;
width: 100%;
box-sizing: border-box; }
.select2-search--dropdown .select2-search__field::-webkit-search-cancel-button {
-webkit-appearance: none; }
.select2-search--dropdown.select2-search--hide {
display: none; }
.select2-close-mask {
border: 0;
margin: 0;
padding: 0;
display: block;
position: fixed;
left: 0;
top: 0;
min-height: 100%;
min-width: 100%;
height: auto;
width: auto;
opacity: 0;
z-index: 99;
background-color: #fff;
filter: alpha(opacity=0); }
.select2-hidden-accessible {
border: 0 !important;
clip: rect(0 0 0 0) !important;
-webkit-clip-path: inset(50%) !important;
clip-path: inset(50%) !important;
height: 1px !important;
overflow: hidden !important;
padding: 0 !important;
position: absolute !important;
width: 1px !important;
white-space: nowrap !important; }
.select2-container--default .select2-selection--single {
background-color: #fff;
border: 1px solid #aaa;
border-radius: 4px; }
.select2-container--default .select2-selection--single .select2-selection__rendered {
color: #444;
line-height: 28px; }
.select2-container--default .select2-selection--single .select2-selection__clear {
cursor: pointer;
float: right;
font-weight: bold; }
.select2-container--default .select2-selection--single .select2-selection__placeholder {
color: #999; }
.select2-container--default .select2-selection--single .select2-selection__arrow {
height: 26px;
position: absolute;
top: 1px;
right: 1px;
width: 20px; }
.select2-container--default .select2-selection--single .select2-selection__arrow b {
border-color: #888 transparent transparent transparent;
border-style: solid;
border-width: 5px 4px 0 4px;
height: 0;
left: 50%;
margin-left: -4px;
margin-top: -2px;
position: absolute;
top: 50%;
width: 0; }
.select2-container--default[dir="rtl"] .select2-selection--single .select2-selection__clear {
float: left; }
.select2-container--default[dir="rtl"] .select2-selection--single .select2-selection__arrow {
left: 1px;
right: auto; }
.select2-container--default.select2-container--disabled .select2-selection--single {
background-color: #eee;
cursor: default; }
.select2-container--default.select2-container--disabled .select2-selection--single .select2-selection__clear {
display: none; }
.select2-container--default.select2-container--open .select2-selection--single .select2-selection__arrow b {
border-color: transparent transparent #888 transparent;
border-width: 0 4px 5px 4px; }
.select2-container--default .select2-selection--multiple {
background-color: white;
border: 1px solid #aaa;
border-radius: 4px;
cursor: text; }
.select2-container--default .select2-selection--multiple .select2-selection__rendered {
box-sizing: border-box;
list-style: none;
margin: 0;
padding: 0 5px;
width: 100%; }
.select2-container--default .select2-selection--multiple .select2-selection__rendered li {
list-style: none; }
.select2-container--default .select2-selection--multiple .select2-selection__clear {
cursor: pointer;
float: right;
font-weight: bold;
margin-top: 5px;
margin-right: 10px;
padding: 1px; }
.select2-container--default .select2-selection--multiple .select2-selection__choice {
background-color: #e4e4e4;
border: 1px solid #aaa;
border-radius: 4px;
cursor: default;
float: left;
margin-right: 5px;
margin-top: 5px;
padding: 0 5px; }
.select2-container--default .select2-selection--multiple .select2-selection__choice__remove {
color: #999;
cursor: pointer;
display: inline-block;
font-weight: bold;
margin-right: 2px; }
.select2-container--default .select2-selection--multiple .select2-selection__choice__remove:hover {
color: #333; }
.select2-container--default[dir="rtl"] .select2-selection--multiple .select2-selection__choice, .select2-container--default[dir="rtl"] .select2-selection--multiple .select2-search--inline {
float: right; }
.select2-container--default[dir="rtl"] .select2-selection--multiple .select2-selection__choice {
margin-left: 5px;
margin-right: auto; }
.select2-container--default[dir="rtl"] .select2-selection--multiple .select2-selection__choice__remove {
margin-left: 2px;
margin-right: auto; }
.select2-container--default.select2-container--focus .select2-selection--multiple {
border: solid black 1px;
outline: 0; }
.select2-container--default.select2-container--disabled .select2-selection--multiple {
background-color: #eee;
cursor: default; }
.select2-container--default.select2-container--disabled .select2-selection__choice__remove {
display: none; }
.select2-container--default.select2-container--open.select2-container--above .select2-selection--single, .select2-container--default.select2-container--open.select2-container--above .select2-selection--multiple {
border-top-left-radius: 0;
border-top-right-radius: 0; }
.select2-container--default.select2-container--open.select2-container--below .select2-selection--single, .select2-container--default.select2-container--open.select2-container--below .select2-selection--multiple {
border-bottom-left-radius: 0;
border-bottom-right-radius: 0; }
.select2-container--default .select2-search--dropdown .select2-search__field {
border: 1px solid #aaa; }
.select2-container--default .select2-search--inline .select2-search__field {
background: transparent;
border: none;
outline: 0;
box-shadow: none;
-webkit-appearance: textfield; }
.select2-container--default .select2-results > .select2-results__options {
max-height: 200px;
overflow-y: auto; }
.select2-container--default .select2-results__option[role=group] {
padding: 0; }
.select2-container--default .select2-results__option[aria-disabled=true] {
color: #999; }
.select2-container--default .select2-results__option[aria-selected=true] {
background-color: #ddd; }
.select2-container--default .select2-results__option .select2-results__option {
padding-left: 1em; }
.select2-container--default .select2-results__option .select2-results__option .select2-results__group {
padding-left: 0; }
.select2-container--default .select2-results__option .select2-results__option .select2-results__option {
margin-left: -1em;
padding-left: 2em; }
.select2-container--default .select2-results__option .select2-results__option .select2-results__option .select2-results__option {
margin-left: -2em;
padding-left: 3em; }
.select2-container--default .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option {
margin-left: -3em;
padding-left: 4em; }
.select2-container--default .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option {
margin-left: -4em;
padding-left: 5em; }
.select2-container--default .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option {
margin-left: -5em;
padding-left: 6em; }
.select2-container--default .select2-results__option--highlighted[aria-selected] {
background-color: #5897fb;
color: white; }
.select2-container--default .select2-results__group {
cursor: default;
display: block;
padding: 6px; }
.select2-container--classic .select2-selection--single {
background-color: #f7f7f7;
border: 1px solid #aaa;
border-radius: 4px;
outline: 0;
background-image: -webkit-linear-gradient(top, white 50%, #eeeeee 100%);
background-image: -o-linear-gradient(top, white 50%, #eeeeee 100%);
background-image: linear-gradient(to bottom, white 50%, #eeeeee 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#FFFFFFFF', endColorstr='#FFEEEEEE', GradientType=0); }
.select2-container--classic .select2-selection--single:focus {
border: 1px solid #5897fb; }
.select2-container--classic .select2-selection--single .select2-selection__rendered {
color: #444;
line-height: 28px; }
.select2-container--classic .select2-selection--single .select2-selection__clear {
cursor: pointer;
float: right;
font-weight: bold;
margin-right: 10px; }
.select2-container--classic .select2-selection--single .select2-selection__placeholder {
color: #999; }
.select2-container--classic .select2-selection--single .select2-selection__arrow {
background-color: #ddd;
border: none;
border-left: 1px solid #aaa;
border-top-right-radius: 4px;
border-bottom-right-radius: 4px;
height: 26px;
position: absolute;
top: 1px;
right: 1px;
width: 20px;
background-image: -webkit-linear-gradient(top, #eeeeee 50%, #cccccc 100%);
background-image: -o-linear-gradient(top, #eeeeee 50%, #cccccc 100%);
background-image: linear-gradient(to bottom, #eeeeee 50%, #cccccc 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#FFEEEEEE', endColorstr='#FFCCCCCC', GradientType=0); }
.select2-container--classic .select2-selection--single .select2-selection__arrow b {
border-color: #888 transparent transparent transparent;
border-style: solid;
border-width: 5px 4px 0 4px;
height: 0;
left: 50%;
margin-left: -4px;
margin-top: -2px;
position: absolute;
top: 50%;
width: 0; }
.select2-container--classic[dir="rtl"] .select2-selection--single .select2-selection__clear {
float: left; }
.select2-container--classic[dir="rtl"] .select2-selection--single .select2-selection__arrow {
border: none;
border-right: 1px solid #aaa;
border-radius: 0;
border-top-left-radius: 4px;
border-bottom-left-radius: 4px;
left: 1px;
right: auto; }
.select2-container--classic.select2-container--open .select2-selection--single {
border: 1px solid #5897fb; }
.select2-container--classic.select2-container--open .select2-selection--single .select2-selection__arrow {
background: transparent;
border: none; }
.select2-container--classic.select2-container--open .select2-selection--single .select2-selection__arrow b {
border-color: transparent transparent #888 transparent;
border-width: 0 4px 5px 4px; }
.select2-container--classic.select2-container--open.select2-container--above .select2-selection--single {
border-top: none;
border-top-left-radius: 0;
border-top-right-radius: 0;
background-image: -webkit-linear-gradient(top, white 0%, #eeeeee 50%);
background-image: -o-linear-gradient(top, white 0%, #eeeeee 50%);
background-image: linear-gradient(to bottom, white 0%, #eeeeee 50%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#FFFFFFFF', endColorstr='#FFEEEEEE', GradientType=0); }
.select2-container--classic.select2-container--open.select2-container--below .select2-selection--single {
border-bottom: none;
border-bottom-left-radius: 0;
border-bottom-right-radius: 0;
background-image: -webkit-linear-gradient(top, #eeeeee 50%, white 100%);
background-image: -o-linear-gradient(top, #eeeeee 50%, white 100%);
background-image: linear-gradient(to bottom, #eeeeee 50%, white 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#FFEEEEEE', endColorstr='#FFFFFFFF', GradientType=0); }
.select2-container--classic .select2-selection--multiple {
background-color: white;
border: 1px solid #aaa;
border-radius: 4px;
cursor: text;
outline: 0; }
.select2-container--classic .select2-selection--multiple:focus {
border: 1px solid #5897fb; }
.select2-container--classic .select2-selection--multiple .select2-selection__rendered {
list-style: none;
margin: 0;
padding: 0 5px; }
.select2-container--classic .select2-selection--multiple .select2-selection__clear {
display: none; }
.select2-container--classic .select2-selection--multiple .select2-selection__choice {
background-color: #e4e4e4;
border: 1px solid #aaa;
border-radius: 4px;
cursor: default;
float: left;
margin-right: 5px;
margin-top: 5px;
padding: 0 5px; }
.select2-container--classic .select2-selection--multiple .select2-selection__choice__remove {
color: #888;
cursor: pointer;
display: inline-block;
font-weight: bold;
margin-right: 2px; }
.select2-container--classic .select2-selection--multiple .select2-selection__choice__remove:hover {
color: #555; }
.select2-container--classic[dir="rtl"] .select2-selection--multiple .select2-selection__choice {
float: right;
margin-left: 5px;
margin-right: auto; }
.select2-container--classic[dir="rtl"] .select2-selection--multiple .select2-selection__choice__remove {
margin-left: 2px;
margin-right: auto; }
.select2-container--classic.select2-container--open .select2-selection--multiple {
border: 1px solid #5897fb; }
.select2-container--classic.select2-container--open.select2-container--above .select2-selection--multiple {
border-top: none;
border-top-left-radius: 0;
border-top-right-radius: 0; }
.select2-container--classic.select2-container--open.select2-container--below .select2-selection--multiple {
border-bottom: none;
border-bottom-left-radius: 0;
border-bottom-right-radius: 0; }
.select2-container--classic .select2-search--dropdown .select2-search__field {
border: 1px solid #aaa;
outline: 0; }
.select2-container--classic .select2-search--inline .select2-search__field {
outline: 0;
box-shadow: none; }
.select2-container--classic .select2-dropdown {
background-color: white;
border: 1px solid transparent; }
.select2-container--classic .select2-dropdown--above {
border-bottom: none; }
.select2-container--classic .select2-dropdown--below {
border-top: none; }
.select2-container--classic .select2-results > .select2-results__options {
max-height: 200px;
overflow-y: auto; }
.select2-container--classic .select2-results__option[role=group] {
padding: 0; }
.select2-container--classic .select2-results__option[aria-disabled=true] {
color: grey; }
.select2-container--classic .select2-results__option--highlighted[aria-selected] {
background-color: #3875d7;
color: white; }
.select2-container--classic .select2-results__group {
cursor: default;
display: block;
padding: 6px; }
.select2-container--classic.select2-container--open .select2-dropdown {
border-color: #5897fb; }
| Django-locallibrary/LocalLibrary/staticfiles/admin/css/vendor/select2/select2.a2194c262648.css/0 | {
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/css/vendor/select2/select2.a2194c262648.css",
"repo_id": "Django-locallibrary",
"token_count": 6147
} | 6 |
pip
| Django-locallibrary/env/Lib/site-packages/pip-20.2.3.dist-info/INSTALLER/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip-20.2.3.dist-info/INSTALLER",
"repo_id": "Django-locallibrary",
"token_count": 3
} | 7 |
from __future__ import absolute_import
SUCCESS = 0
ERROR = 1
UNKNOWN_ERROR = 2
VIRTUALENV_NOT_FOUND = 3
PREVIOUS_BUILD_DIR_ERROR = 4
NO_MATCHES_FOUND = 23
| Django-locallibrary/env/Lib/site-packages/pip/_internal/cli/status_codes.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/cli/status_codes.py",
"repo_id": "Django-locallibrary",
"token_count": 64
} | 8 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import os
import shutil
from pip._internal.cache import WheelCache
from pip._internal.cli import cmdoptions
from pip._internal.cli.req_command import RequirementCommand, with_cleanup
from pip._internal.cli.status_codes import SUCCESS
from pip._internal.exceptions import CommandError
from pip._internal.req.req_tracker import get_requirement_tracker
from pip._internal.utils.misc import ensure_dir, normalize_path
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.wheel_builder import build, should_build_for_wheel_command
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import List
logger = logging.getLogger(__name__)
class WheelCommand(RequirementCommand):
"""
Build Wheel archives for your requirements and dependencies.
Wheel is a built-package format, and offers the advantage of not
recompiling your software during every install. For more details, see the
wheel docs: https://wheel.readthedocs.io/en/latest/
Requirements: setuptools>=0.8, and wheel.
'pip wheel' uses the bdist_wheel setuptools extension from the wheel
package to build individual wheels.
"""
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
def add_options(self):
# type: () -> None
self.cmd_opts.add_option(
'-w', '--wheel-dir',
dest='wheel_dir',
metavar='dir',
default=os.curdir,
help=("Build wheels into <dir>, where the default is the "
"current working directory."),
)
self.cmd_opts.add_option(cmdoptions.no_binary())
self.cmd_opts.add_option(cmdoptions.only_binary())
self.cmd_opts.add_option(cmdoptions.prefer_binary())
self.cmd_opts.add_option(
'--build-option',
dest='build_options',
metavar='options',
action='append',
help="Extra arguments to be supplied to 'setup.py bdist_wheel'.",
)
self.cmd_opts.add_option(cmdoptions.no_build_isolation())
self.cmd_opts.add_option(cmdoptions.use_pep517())
self.cmd_opts.add_option(cmdoptions.no_use_pep517())
self.cmd_opts.add_option(cmdoptions.constraints())
self.cmd_opts.add_option(cmdoptions.editable())
self.cmd_opts.add_option(cmdoptions.requirements())
self.cmd_opts.add_option(cmdoptions.src())
self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
self.cmd_opts.add_option(cmdoptions.no_deps())
self.cmd_opts.add_option(cmdoptions.build_dir())
self.cmd_opts.add_option(cmdoptions.progress_bar())
self.cmd_opts.add_option(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the 'bdist_wheel' command.")
self.cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
self.cmd_opts.add_option(cmdoptions.require_hashes())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, self.cmd_opts)
@with_cleanup
def run(self, options, args):
# type: (Values, List[str]) -> int
cmdoptions.check_install_build_global(options)
session = self.get_default_session(options)
finder = self._build_package_finder(options, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
options.wheel_dir = normalize_path(options.wheel_dir)
ensure_dir(options.wheel_dir)
req_tracker = self.enter_context(get_requirement_tracker())
directory = TempDirectory(
options.build_dir,
delete=build_delete,
kind="wheel",
globally_managed=True,
)
reqs = self.get_requirements(args, options, finder, session)
preparer = self.make_requirement_preparer(
temp_build_dir=directory,
options=options,
req_tracker=req_tracker,
session=session,
finder=finder,
wheel_download_dir=options.wheel_dir,
use_user_site=False,
)
resolver = self.make_resolver(
preparer=preparer,
finder=finder,
options=options,
wheel_cache=wheel_cache,
ignore_requires_python=options.ignore_requires_python,
use_pep517=options.use_pep517,
)
self.trace_basic_info(finder)
requirement_set = resolver.resolve(
reqs, check_supported_wheels=True
)
reqs_to_build = [
r for r in requirement_set.requirements.values()
if should_build_for_wheel_command(r)
]
# build wheels
build_successes, build_failures = build(
reqs_to_build,
wheel_cache=wheel_cache,
build_options=options.build_options or [],
global_options=options.global_options or [],
)
for req in build_successes:
assert req.link and req.link.is_wheel
assert req.local_file_path
# copy from cache to target directory
try:
shutil.copy(req.local_file_path, options.wheel_dir)
except OSError as e:
logger.warning(
"Building wheel for %s failed: %s",
req.name, e,
)
build_failures.append(req)
if len(build_failures) != 0:
raise CommandError(
"Failed to build one or more wheels"
)
return SUCCESS
| Django-locallibrary/env/Lib/site-packages/pip/_internal/commands/wheel.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/commands/wheel.py",
"repo_id": "Django-locallibrary",
"token_count": 2878
} | 9 |
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
from __future__ import absolute_import
import logging
import os
import shutil
import sys
import uuid
import zipfile
from pip._vendor import pkg_resources, six
from pip._vendor.packaging.requirements import Requirement
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.packaging.version import Version
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.pep517.wrappers import Pep517HookCaller
from pip._internal.build_env import NoOpBuildEnvironment
from pip._internal.exceptions import InstallationError
from pip._internal.locations import get_scheme
from pip._internal.models.link import Link
from pip._internal.operations.build.metadata import generate_metadata
from pip._internal.operations.build.metadata_legacy import \
generate_metadata as generate_metadata_legacy
from pip._internal.operations.install.editable_legacy import \
install_editable as install_editable_legacy
from pip._internal.operations.install.legacy import LegacyInstallFailure
from pip._internal.operations.install.legacy import install as install_legacy
from pip._internal.operations.install.wheel import install_wheel
from pip._internal.pyproject import load_pyproject_toml, make_pyproject_path
from pip._internal.req.req_uninstall import UninstallPathSet
from pip._internal.utils.deprecation import deprecated
from pip._internal.utils.direct_url_helpers import direct_url_from_link
from pip._internal.utils.hashes import Hashes
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import (
ask_path_exists,
backup_dir,
display_path,
dist_in_site_packages,
dist_in_usersite,
get_distribution,
get_installed_version,
hide_url,
redact_auth_from_url,
)
from pip._internal.utils.packaging import get_metadata
from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.virtualenv import running_under_virtualenv
from pip._internal.vcs import vcs
if MYPY_CHECK_RUNNING:
from typing import (
Any, Dict, Iterable, List, Optional, Sequence, Union,
)
from pip._internal.build_env import BuildEnvironment
from pip._vendor.pkg_resources import Distribution
from pip._vendor.packaging.specifiers import SpecifierSet
from pip._vendor.packaging.markers import Marker
logger = logging.getLogger(__name__)
def _get_dist(metadata_directory):
# type: (str) -> Distribution
"""Return a pkg_resources.Distribution for the provided
metadata directory.
"""
dist_dir = metadata_directory.rstrip(os.sep)
# Build a PathMetadata object, from path to metadata. :wink:
base_dir, dist_dir_name = os.path.split(dist_dir)
metadata = pkg_resources.PathMetadata(base_dir, dist_dir)
# Determine the correct Distribution object type.
if dist_dir.endswith(".egg-info"):
dist_cls = pkg_resources.Distribution
dist_name = os.path.splitext(dist_dir_name)[0]
else:
assert dist_dir.endswith(".dist-info")
dist_cls = pkg_resources.DistInfoDistribution
dist_name = os.path.splitext(dist_dir_name)[0].split("-")[0]
return dist_cls(
base_dir,
project_name=dist_name,
metadata=metadata,
)
class InstallRequirement(object):
"""
Represents something that may be installed later on, may have information
about where to fetch the relevant requirement and also contains logic for
installing the said requirement.
"""
def __init__(
self,
req, # type: Optional[Requirement]
comes_from, # type: Optional[Union[str, InstallRequirement]]
editable=False, # type: bool
link=None, # type: Optional[Link]
markers=None, # type: Optional[Marker]
use_pep517=None, # type: Optional[bool]
isolated=False, # type: bool
install_options=None, # type: Optional[List[str]]
global_options=None, # type: Optional[List[str]]
hash_options=None, # type: Optional[Dict[str, List[str]]]
constraint=False, # type: bool
extras=(), # type: Iterable[str]
user_supplied=False, # type: bool
):
# type: (...) -> None
assert req is None or isinstance(req, Requirement), req
self.req = req
self.comes_from = comes_from
self.constraint = constraint
self.editable = editable
self.legacy_install_reason = None # type: Optional[int]
# source_dir is the local directory where the linked requirement is
# located, or unpacked. In case unpacking is needed, creating and
# populating source_dir is done by the RequirementPreparer. Note this
# is not necessarily the directory where pyproject.toml or setup.py is
# located - that one is obtained via unpacked_source_directory.
self.source_dir = None # type: Optional[str]
if self.editable:
assert link
if link.is_file:
self.source_dir = os.path.normpath(
os.path.abspath(link.file_path)
)
if link is None and req and req.url:
# PEP 508 URL requirement
link = Link(req.url)
self.link = self.original_link = link
self.original_link_is_in_wheel_cache = False
# Path to any downloaded or already-existing package.
self.local_file_path = None # type: Optional[str]
if self.link and self.link.is_file:
self.local_file_path = self.link.file_path
if extras:
self.extras = extras
elif req:
self.extras = {
pkg_resources.safe_extra(extra) for extra in req.extras
}
else:
self.extras = set()
if markers is None and req:
markers = req.marker
self.markers = markers
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None # type: Optional[Distribution]
# Whether the installation process should try to uninstall an existing
# distribution before installing this requirement.
self.should_reinstall = False
# Temporary build location
self._temp_build_dir = None # type: Optional[TempDirectory]
# Set to True after successful installation
self.install_succeeded = None # type: Optional[bool]
# Supplied options
self.install_options = install_options if install_options else []
self.global_options = global_options if global_options else []
self.hash_options = hash_options if hash_options else {}
# Set to True after successful preparation of this requirement
self.prepared = False
# User supplied requirement are explicitly requested for installation
# by the user via CLI arguments or requirements files, as opposed to,
# e.g. dependencies, extras or constraints.
self.user_supplied = user_supplied
# Set by the legacy resolver when the requirement has been downloaded
# TODO: This introduces a strong coupling between the resolver and the
# requirement (the coupling was previously between the resolver
# and the requirement set). This should be refactored to allow
# the requirement to decide for itself when it has been
# successfully downloaded - but that is more tricky to get right,
# se we are making the change in stages.
self.successfully_downloaded = False
self.isolated = isolated
self.build_env = NoOpBuildEnvironment() # type: BuildEnvironment
# For PEP 517, the directory where we request the project metadata
# gets stored. We need this to pass to build_wheel, so the backend
# can ensure that the wheel matches the metadata (see the PEP for
# details).
self.metadata_directory = None # type: Optional[str]
# The static build requirements (from pyproject.toml)
self.pyproject_requires = None # type: Optional[List[str]]
# Build requirements that we will check are available
self.requirements_to_check = [] # type: List[str]
# The PEP 517 backend we should use to build the project
self.pep517_backend = None # type: Optional[Pep517HookCaller]
# Are we using PEP 517 for this requirement?
# After pyproject.toml has been loaded, the only valid values are True
# and False. Before loading, None is valid (meaning "use the default").
# Setting an explicit value before loading pyproject.toml is supported,
# but after loading this flag should be treated as read only.
self.use_pep517 = use_pep517
def __str__(self):
# type: () -> str
if self.req:
s = str(self.req)
if self.link:
s += ' from {}'.format(redact_auth_from_url(self.link.url))
elif self.link:
s = redact_auth_from_url(self.link.url)
else:
s = '<InstallRequirement>'
if self.satisfied_by is not None:
s += ' in {}'.format(display_path(self.satisfied_by.location))
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from # type: Optional[str]
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += ' (from {})'.format(comes_from)
return s
def __repr__(self):
# type: () -> str
return '<{} object: {} editable={!r}>'.format(
self.__class__.__name__, str(self), self.editable)
def format_debug(self):
# type: () -> str
"""An un-tested helper for getting state, for debugging.
"""
attributes = vars(self)
names = sorted(attributes)
state = (
"{}={!r}".format(attr, attributes[attr]) for attr in sorted(names)
)
return '<{name} object: {{{state}}}>'.format(
name=self.__class__.__name__,
state=", ".join(state),
)
# Things that are valid for all kinds of requirements?
@property
def name(self):
# type: () -> Optional[str]
if self.req is None:
return None
return six.ensure_str(pkg_resources.safe_name(self.req.name))
@property
def specifier(self):
# type: () -> SpecifierSet
return self.req.specifier
@property
def is_pinned(self):
# type: () -> bool
"""Return whether I am pinned to an exact version.
For example, some-package==1.2 is pinned; some-package>1.2 is not.
"""
specifiers = self.specifier
return (len(specifiers) == 1 and
next(iter(specifiers)).operator in {'==', '==='})
@property
def installed_version(self):
# type: () -> Optional[str]
return get_installed_version(self.name)
def match_markers(self, extras_requested=None):
# type: (Optional[Iterable[str]]) -> bool
if not extras_requested:
# Provide an extra to safely evaluate the markers
# without matching any extra
extras_requested = ('',)
if self.markers is not None:
return any(
self.markers.evaluate({'extra': extra})
for extra in extras_requested)
else:
return True
@property
def has_hash_options(self):
# type: () -> bool
"""Return whether any known-good hashes are specified as options.
These activate --require-hashes mode; hashes specified as part of a
URL do not.
"""
return bool(self.hash_options)
def hashes(self, trust_internet=True):
# type: (bool) -> Hashes
"""Return a hash-comparer that considers my option- and URL-based
hashes to be known-good.
Hashes in URLs--ones embedded in the requirements file, not ones
downloaded from an index server--are almost peers with ones from
flags. They satisfy --require-hashes (whether it was implicitly or
explicitly activated) but do not activate it. md5 and sha224 are not
allowed in flags, which should nudge people toward good algos. We
always OR all hashes together, even ones from URLs.
:param trust_internet: Whether to trust URL-based (#md5=...) hashes
downloaded from the internet, as by populate_link()
"""
good_hashes = self.hash_options.copy()
link = self.link if trust_internet else self.original_link
if link and link.hash:
good_hashes.setdefault(link.hash_name, []).append(link.hash)
return Hashes(good_hashes)
def from_path(self):
# type: () -> Optional[str]
"""Format a nice indicator to show where this "comes from"
"""
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += '->' + comes_from
return s
def ensure_build_location(self, build_dir, autodelete, parallel_builds):
# type: (str, bool, bool) -> str
assert build_dir is not None
if self._temp_build_dir is not None:
assert self._temp_build_dir.path
return self._temp_build_dir.path
if self.req is None:
# Some systems have /tmp as a symlink which confuses custom
# builds (such as numpy). Thus, we ensure that the real path
# is returned.
self._temp_build_dir = TempDirectory(
kind=tempdir_kinds.REQ_BUILD, globally_managed=True
)
return self._temp_build_dir.path
# When parallel builds are enabled, add a UUID to the build directory
# name so multiple builds do not interfere with each other.
dir_name = canonicalize_name(self.name)
if parallel_builds:
dir_name = "{}_{}".format(dir_name, uuid.uuid4().hex)
# FIXME: Is there a better place to create the build_dir? (hg and bzr
# need this)
if not os.path.exists(build_dir):
logger.debug('Creating directory %s', build_dir)
os.makedirs(build_dir)
actual_build_dir = os.path.join(build_dir, dir_name)
# `None` indicates that we respect the globally-configured deletion
# settings, which is what we actually want when auto-deleting.
delete_arg = None if autodelete else False
return TempDirectory(
path=actual_build_dir,
delete=delete_arg,
kind=tempdir_kinds.REQ_BUILD,
globally_managed=True,
).path
def _set_requirement(self):
# type: () -> None
"""Set requirement after generating metadata.
"""
assert self.req is None
assert self.metadata is not None
assert self.source_dir is not None
# Construct a Requirement object from the generated metadata
if isinstance(parse_version(self.metadata["Version"]), Version):
op = "=="
else:
op = "==="
self.req = Requirement(
"".join([
self.metadata["Name"],
op,
self.metadata["Version"],
])
)
def warn_on_mismatching_name(self):
# type: () -> None
metadata_name = canonicalize_name(self.metadata["Name"])
if canonicalize_name(self.req.name) == metadata_name:
# Everything is fine.
return
# If we're here, there's a mismatch. Log a warning about it.
logger.warning(
'Generating metadata for package %s '
'produced metadata for project name %s. Fix your '
'#egg=%s fragments.',
self.name, metadata_name, self.name
)
self.req = Requirement(metadata_name)
def check_if_exists(self, use_user_site):
# type: (bool) -> None
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.should_reinstall appropriately.
"""
if self.req is None:
return
existing_dist = get_distribution(self.req.name)
if not existing_dist:
return
existing_version = existing_dist.parsed_version
if not self.req.specifier.contains(existing_version, prereleases=True):
self.satisfied_by = None
if use_user_site:
if dist_in_usersite(existing_dist):
self.should_reinstall = True
elif (running_under_virtualenv() and
dist_in_site_packages(existing_dist)):
raise InstallationError(
"Will not install to the user site because it will "
"lack sys.path precedence to {} in {}".format(
existing_dist.project_name, existing_dist.location)
)
else:
self.should_reinstall = True
else:
if self.editable:
self.should_reinstall = True
# when installing editables, nothing pre-existing should ever
# satisfy
self.satisfied_by = None
else:
self.satisfied_by = existing_dist
# Things valid for wheels
@property
def is_wheel(self):
# type: () -> bool
if not self.link:
return False
return self.link.is_wheel
# Things valid for sdists
@property
def unpacked_source_directory(self):
# type: () -> str
return os.path.join(
self.source_dir,
self.link and self.link.subdirectory_fragment or '')
@property
def setup_py_path(self):
# type: () -> str
assert self.source_dir, "No source dir for {}".format(self)
setup_py = os.path.join(self.unpacked_source_directory, 'setup.py')
# Python2 __file__ should not be unicode
if six.PY2 and isinstance(setup_py, six.text_type):
setup_py = setup_py.encode(sys.getfilesystemencoding())
return setup_py
@property
def pyproject_toml_path(self):
# type: () -> str
assert self.source_dir, "No source dir for {}".format(self)
return make_pyproject_path(self.unpacked_source_directory)
def load_pyproject_toml(self):
# type: () -> None
"""Load the pyproject.toml file.
After calling this routine, all of the attributes related to PEP 517
processing for this requirement have been set. In particular, the
use_pep517 attribute can be used to determine whether we should
follow the PEP 517 or legacy (setup.py) code path.
"""
pyproject_toml_data = load_pyproject_toml(
self.use_pep517,
self.pyproject_toml_path,
self.setup_py_path,
str(self)
)
if pyproject_toml_data is None:
self.use_pep517 = False
return
self.use_pep517 = True
requires, backend, check, backend_path = pyproject_toml_data
self.requirements_to_check = check
self.pyproject_requires = requires
self.pep517_backend = Pep517HookCaller(
self.unpacked_source_directory, backend, backend_path=backend_path,
)
def _generate_metadata(self):
# type: () -> str
"""Invokes metadata generator functions, with the required arguments.
"""
if not self.use_pep517:
assert self.unpacked_source_directory
return generate_metadata_legacy(
build_env=self.build_env,
setup_py_path=self.setup_py_path,
source_dir=self.unpacked_source_directory,
isolated=self.isolated,
details=self.name or "from {}".format(self.link)
)
assert self.pep517_backend is not None
return generate_metadata(
build_env=self.build_env,
backend=self.pep517_backend,
)
def prepare_metadata(self):
# type: () -> None
"""Ensure that project metadata is available.
Under PEP 517, call the backend hook to prepare the metadata.
Under legacy processing, call setup.py egg-info.
"""
assert self.source_dir
with indent_log():
self.metadata_directory = self._generate_metadata()
# Act on the newly generated metadata, based on the name and version.
if not self.name:
self._set_requirement()
else:
self.warn_on_mismatching_name()
self.assert_source_matches_version()
@property
def metadata(self):
# type: () -> Any
if not hasattr(self, '_metadata'):
self._metadata = get_metadata(self.get_dist())
return self._metadata
def get_dist(self):
# type: () -> Distribution
return _get_dist(self.metadata_directory)
def assert_source_matches_version(self):
# type: () -> None
assert self.source_dir
version = self.metadata['version']
if self.req.specifier and version not in self.req.specifier:
logger.warning(
'Requested %s, but installing version %s',
self,
version,
)
else:
logger.debug(
'Source in %s has version %s, which satisfies requirement %s',
display_path(self.source_dir),
version,
self,
)
# For both source distributions and editables
def ensure_has_source_dir(
self,
parent_dir,
autodelete=False,
parallel_builds=False,
):
# type: (str, bool, bool) -> None
"""Ensure that a source_dir is set.
This will create a temporary build dir if the name of the requirement
isn't known yet.
:param parent_dir: The ideal pip parent_dir for the source_dir.
Generally src_dir for editables and build_dir for sdists.
:return: self.source_dir
"""
if self.source_dir is None:
self.source_dir = self.ensure_build_location(
parent_dir,
autodelete=autodelete,
parallel_builds=parallel_builds,
)
# For editable installations
def update_editable(self, obtain=True):
# type: (bool) -> None
if not self.link:
logger.debug(
"Cannot update repository at %s; repository location is "
"unknown",
self.source_dir,
)
return
assert self.editable
assert self.source_dir
if self.link.scheme == 'file':
# Static paths don't get updated
return
assert '+' in self.link.url, \
"bad url: {self.link.url!r}".format(**locals())
vc_type, url = self.link.url.split('+', 1)
vcs_backend = vcs.get_backend(vc_type)
if vcs_backend:
if not self.link.is_vcs:
reason = (
"This form of VCS requirement is being deprecated: {}."
).format(
self.link.url
)
replacement = None
if self.link.url.startswith("git+git@"):
replacement = (
"git+https://[email protected]/..., "
"git+ssh://[email protected]/..., "
"or the insecure git+git://[email protected]/..."
)
deprecated(reason, replacement, gone_in="21.0", issue=7554)
hidden_url = hide_url(self.link.url)
if obtain:
vcs_backend.obtain(self.source_dir, url=hidden_url)
else:
vcs_backend.export(self.source_dir, url=hidden_url)
else:
assert 0, (
'Unexpected version control type (in {}): {}'.format(
self.link, vc_type))
# Top-level Actions
def uninstall(self, auto_confirm=False, verbose=False):
# type: (bool, bool) -> Optional[UninstallPathSet]
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
assert self.req
dist = get_distribution(self.req.name)
if not dist:
logger.warning("Skipping %s as it is not installed.", self.name)
return None
logger.info('Found existing installation: %s', dist)
uninstalled_pathset = UninstallPathSet.from_dist(dist)
uninstalled_pathset.remove(auto_confirm, verbose)
return uninstalled_pathset
def _get_archive_name(self, path, parentdir, rootdir):
# type: (str, str, str) -> str
def _clean_zip_name(name, prefix):
# type: (str, str) -> str
assert name.startswith(prefix + os.path.sep), (
"name {name!r} doesn't start with prefix {prefix!r}"
.format(**locals())
)
name = name[len(prefix) + 1:]
name = name.replace(os.path.sep, '/')
return name
path = os.path.join(parentdir, path)
name = _clean_zip_name(path, rootdir)
return self.name + '/' + name
def archive(self, build_dir):
# type: (str) -> None
"""Saves archive to provided build_dir.
Used for saving downloaded VCS requirements as part of `pip download`.
"""
assert self.source_dir
create_archive = True
archive_name = '{}-{}.zip'.format(self.name, self.metadata["version"])
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
'The file {} exists. (i)gnore, (w)ipe, '
'(b)ackup, (a)bort '.format(
display_path(archive_path)),
('i', 'w', 'b', 'a'))
if response == 'i':
create_archive = False
elif response == 'w':
logger.warning('Deleting %s', display_path(archive_path))
os.remove(archive_path)
elif response == 'b':
dest_file = backup_dir(archive_path)
logger.warning(
'Backing up %s to %s',
display_path(archive_path),
display_path(dest_file),
)
shutil.move(archive_path, dest_file)
elif response == 'a':
sys.exit(-1)
if not create_archive:
return
zip_output = zipfile.ZipFile(
archive_path, 'w', zipfile.ZIP_DEFLATED, allowZip64=True,
)
with zip_output:
dir = os.path.normcase(
os.path.abspath(self.unpacked_source_directory)
)
for dirpath, dirnames, filenames in os.walk(dir):
for dirname in dirnames:
dir_arcname = self._get_archive_name(
dirname, parentdir=dirpath, rootdir=dir,
)
zipdir = zipfile.ZipInfo(dir_arcname + '/')
zipdir.external_attr = 0x1ED << 16 # 0o755
zip_output.writestr(zipdir, '')
for filename in filenames:
file_arcname = self._get_archive_name(
filename, parentdir=dirpath, rootdir=dir,
)
filename = os.path.join(dirpath, filename)
zip_output.write(filename, file_arcname)
logger.info('Saved %s', display_path(archive_path))
def install(
self,
install_options, # type: List[str]
global_options=None, # type: Optional[Sequence[str]]
root=None, # type: Optional[str]
home=None, # type: Optional[str]
prefix=None, # type: Optional[str]
warn_script_location=True, # type: bool
use_user_site=False, # type: bool
pycompile=True # type: bool
):
# type: (...) -> None
scheme = get_scheme(
self.name,
user=use_user_site,
home=home,
root=root,
isolated=self.isolated,
prefix=prefix,
)
global_options = global_options if global_options is not None else []
if self.editable:
install_editable_legacy(
install_options,
global_options,
prefix=prefix,
home=home,
use_user_site=use_user_site,
name=self.name,
setup_py_path=self.setup_py_path,
isolated=self.isolated,
build_env=self.build_env,
unpacked_source_directory=self.unpacked_source_directory,
)
self.install_succeeded = True
return
if self.is_wheel:
assert self.local_file_path
direct_url = None
if self.original_link:
direct_url = direct_url_from_link(
self.original_link,
self.source_dir,
self.original_link_is_in_wheel_cache,
)
install_wheel(
self.name,
self.local_file_path,
scheme=scheme,
req_description=str(self.req),
pycompile=pycompile,
warn_script_location=warn_script_location,
direct_url=direct_url,
requested=self.user_supplied,
)
self.install_succeeded = True
return
# TODO: Why don't we do this for editable installs?
# Extend the list of global and install options passed on to
# the setup.py call with the ones from the requirements file.
# Options specified in requirements file override those
# specified on the command line, since the last option given
# to setup.py is the one that is used.
global_options = list(global_options) + self.global_options
install_options = list(install_options) + self.install_options
try:
success = install_legacy(
install_options=install_options,
global_options=global_options,
root=root,
home=home,
prefix=prefix,
use_user_site=use_user_site,
pycompile=pycompile,
scheme=scheme,
setup_py_path=self.setup_py_path,
isolated=self.isolated,
req_name=self.name,
build_env=self.build_env,
unpacked_source_directory=self.unpacked_source_directory,
req_description=str(self.req),
)
except LegacyInstallFailure as exc:
self.install_succeeded = False
six.reraise(*exc.parent)
except Exception:
self.install_succeeded = True
raise
self.install_succeeded = success
if success and self.legacy_install_reason == 8368:
deprecated(
reason=(
"{} was installed using the legacy 'setup.py install' "
"method, because a wheel could not be built for it.".
format(self.name)
),
replacement="to fix the wheel build issue reported above",
gone_in="21.0",
issue=8368,
)
def check_invalid_constraint_type(req):
# type: (InstallRequirement) -> str
# Check for unsupported forms
problem = ""
if not req.name:
problem = "Unnamed requirements are not allowed as constraints"
elif req.link:
problem = "Links are not allowed as constraints"
elif req.extras:
problem = "Constraints cannot have extras"
if problem:
deprecated(
reason=(
"Constraints are only allowed to take the form of a package "
"name and a version specifier. Other forms were originally "
"permitted as an accident of the implementation, but were "
"undocumented. The new implementation of the resolver no "
"longer supports these forms."
),
replacement=(
"replacing the constraint with a requirement."
),
# No plan yet for when the new resolver becomes default
gone_in=None,
issue=8210
)
return problem
| Django-locallibrary/env/Lib/site-packages/pip/_internal/req/req_install.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/req/req_install.py",
"repo_id": "Django-locallibrary",
"token_count": 15107
} | 10 |
import codecs
import locale
import re
import sys
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List, Tuple, Text
BOMS = [
(codecs.BOM_UTF8, 'utf-8'),
(codecs.BOM_UTF16, 'utf-16'),
(codecs.BOM_UTF16_BE, 'utf-16-be'),
(codecs.BOM_UTF16_LE, 'utf-16-le'),
(codecs.BOM_UTF32, 'utf-32'),
(codecs.BOM_UTF32_BE, 'utf-32-be'),
(codecs.BOM_UTF32_LE, 'utf-32-le'),
] # type: List[Tuple[bytes, Text]]
ENCODING_RE = re.compile(br'coding[:=]\s*([-\w.]+)')
def auto_decode(data):
# type: (bytes) -> Text
"""Check a bytes string for a BOM to correctly detect the encoding
Fallback to locale.getpreferredencoding(False) like open() on Python3"""
for bom, encoding in BOMS:
if data.startswith(bom):
return data[len(bom):].decode(encoding)
# Lets check the first two lines as in PEP263
for line in data.split(b'\n')[:2]:
if line[0:1] == b'#' and ENCODING_RE.search(line):
result = ENCODING_RE.search(line)
assert result is not None
encoding = result.groups()[0].decode('ascii')
return data.decode(encoding)
return data.decode(
locale.getpreferredencoding(False) or sys.getdefaultencoding(),
)
| Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/encoding.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/encoding.py",
"repo_id": "Django-locallibrary",
"token_count": 590
} | 11 |
"""For neatly implementing static typing in pip.
`mypy` - the static type analysis tool we use - uses the `typing` module, which
provides core functionality fundamental to mypy's functioning.
Generally, `typing` would be imported at runtime and used in that fashion -
it acts as a no-op at runtime and does not have any run-time overhead by
design.
As it turns out, `typing` is not vendorable - it uses separate sources for
Python 2/Python 3. Thus, this codebase can not expect it to be present.
To work around this, mypy allows the typing import to be behind a False-y
optional to prevent it from running at runtime and type-comments can be used
to remove the need for the types to be accessible directly during runtime.
This module provides the False-y guard in a nicely named fashion so that a
curious maintainer can reach here to read this.
In pip, all static-typing related imports should be guarded as follows:
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import ...
Ref: https://github.com/python/mypy/issues/3216
"""
MYPY_CHECK_RUNNING = False
if MYPY_CHECK_RUNNING:
from typing import cast
else:
# typing's cast() is needed at runtime, but we don't want to import typing.
# Thus, we use a dummy no-op version, which we tell mypy to ignore.
def cast(type_, value): # type: ignore
return value
| Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/typing.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/typing.py",
"repo_id": "Django-locallibrary",
"token_count": 409
} | 12 |
"""Handles all VCS (version control) support"""
from __future__ import absolute_import
import errno
import logging
import os
import shutil
import subprocess
import sys
from pip._vendor import pkg_resources
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._internal.exceptions import (
BadCommand,
InstallationError,
SubProcessError,
)
from pip._internal.utils.compat import console_to_str, samefile
from pip._internal.utils.logging import subprocess_logger
from pip._internal.utils.misc import (
ask_path_exists,
backup_dir,
display_path,
hide_url,
hide_value,
rmtree,
)
from pip._internal.utils.subprocess import (
format_command_args,
make_command,
make_subprocess_output_error,
reveal_command_args,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import get_url_scheme
if MYPY_CHECK_RUNNING:
from typing import (
Dict, Iterable, Iterator, List, Optional, Text, Tuple,
Type, Union, Mapping, Any
)
from pip._internal.utils.misc import HiddenText
from pip._internal.utils.subprocess import CommandArgs
AuthInfo = Tuple[Optional[str], Optional[str]]
__all__ = ['vcs']
logger = logging.getLogger(__name__)
def is_url(name):
# type: (Union[str, Text]) -> bool
"""
Return true if the name looks like a URL.
"""
scheme = get_url_scheme(name)
if scheme is None:
return False
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def make_vcs_requirement_url(repo_url, rev, project_name, subdir=None):
# type: (str, str, str, Optional[str]) -> str
"""
Return the URL for a VCS requirement.
Args:
repo_url: the remote VCS url, with any needed VCS prefix (e.g. "git+").
project_name: the (unescaped) project name.
"""
egg_project_name = pkg_resources.to_filename(project_name)
req = '{}@{}#egg={}'.format(repo_url, rev, egg_project_name)
if subdir:
req += '&subdirectory={}'.format(subdir)
return req
def call_subprocess(
cmd, # type: Union[List[str], CommandArgs]
cwd=None, # type: Optional[str]
extra_environ=None, # type: Optional[Mapping[str, Any]]
extra_ok_returncodes=None, # type: Optional[Iterable[int]]
log_failed_cmd=True # type: Optional[bool]
):
# type: (...) -> Text
"""
Args:
extra_ok_returncodes: an iterable of integer return codes that are
acceptable, in addition to 0. Defaults to None, which means [].
log_failed_cmd: if false, failed commands are not logged,
only raised.
"""
if extra_ok_returncodes is None:
extra_ok_returncodes = []
# log the subprocess output at DEBUG level.
log_subprocess = subprocess_logger.debug
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
# Whether the subprocess will be visible in the console.
showing_subprocess = True
command_desc = format_command_args(cmd)
try:
proc = subprocess.Popen(
# Convert HiddenText objects to the underlying str.
reveal_command_args(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd
)
if proc.stdin:
proc.stdin.close()
except Exception as exc:
if log_failed_cmd:
subprocess_logger.critical(
"Error %s while executing command %s", exc, command_desc,
)
raise
all_output = []
while True:
# The "line" value is a unicode string in Python 2.
line = None
if proc.stdout:
line = console_to_str(proc.stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
# Show the line immediately.
log_subprocess(line)
try:
proc.wait()
finally:
if proc.stdout:
proc.stdout.close()
proc_had_error = (
proc.returncode and proc.returncode not in extra_ok_returncodes
)
if proc_had_error:
if not showing_subprocess and log_failed_cmd:
# Then the subprocess streams haven't been logged to the
# console yet.
msg = make_subprocess_output_error(
cmd_args=cmd,
cwd=cwd,
lines=all_output,
exit_status=proc.returncode,
)
subprocess_logger.error(msg)
exc_msg = (
'Command errored out with exit status {}: {} '
'Check the logs for full command output.'
).format(proc.returncode, command_desc)
raise SubProcessError(exc_msg)
return ''.join(all_output)
def find_path_to_setup_from_repo_root(location, repo_root):
# type: (str, str) -> Optional[str]
"""
Find the path to `setup.py` by searching up the filesystem from `location`.
Return the path to `setup.py` relative to `repo_root`.
Return None if `setup.py` is in `repo_root` or cannot be found.
"""
# find setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without
# finding setup.py
logger.warning(
"Could not find setup.py for directory %s (tried all "
"parent directories)",
orig_location,
)
return None
if samefile(repo_root, location):
return None
return os.path.relpath(location, repo_root)
class RemoteNotFoundError(Exception):
pass
class RevOptions(object):
"""
Encapsulates a VCS-specific revision to install, along with any VCS
install options.
Instances of this class should be treated as if immutable.
"""
def __init__(
self,
vc_class, # type: Type[VersionControl]
rev=None, # type: Optional[str]
extra_args=None, # type: Optional[CommandArgs]
):
# type: (...) -> None
"""
Args:
vc_class: a VersionControl subclass.
rev: the name of the revision to install.
extra_args: a list of extra options.
"""
if extra_args is None:
extra_args = []
self.extra_args = extra_args
self.rev = rev
self.vc_class = vc_class
self.branch_name = None # type: Optional[str]
def __repr__(self):
# type: () -> str
return '<RevOptions {}: rev={!r}>'.format(self.vc_class.name, self.rev)
@property
def arg_rev(self):
# type: () -> Optional[str]
if self.rev is None:
return self.vc_class.default_arg_rev
return self.rev
def to_args(self):
# type: () -> CommandArgs
"""
Return the VCS-specific command arguments.
"""
args = [] # type: CommandArgs
rev = self.arg_rev
if rev is not None:
args += self.vc_class.get_base_rev_args(rev)
args += self.extra_args
return args
def to_display(self):
# type: () -> str
if not self.rev:
return ''
return ' (to revision {})'.format(self.rev)
def make_new(self, rev):
# type: (str) -> RevOptions
"""
Make a copy of the current instance, but with a new rev.
Args:
rev: the name of the revision for the new object.
"""
return self.vc_class.make_rev_options(rev, extra_args=self.extra_args)
class VcsSupport(object):
_registry = {} # type: Dict[str, VersionControl]
schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']
def __init__(self):
# type: () -> None
# Register more schemes with urlparse for various version control
# systems
urllib_parse.uses_netloc.extend(self.schemes)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(self.schemes)
super(VcsSupport, self).__init__()
def __iter__(self):
# type: () -> Iterator[str]
return self._registry.__iter__()
@property
def backends(self):
# type: () -> List[VersionControl]
return list(self._registry.values())
@property
def dirnames(self):
# type: () -> List[str]
return [backend.dirname for backend in self.backends]
@property
def all_schemes(self):
# type: () -> List[str]
schemes = [] # type: List[str]
for backend in self.backends:
schemes.extend(backend.schemes)
return schemes
def register(self, cls):
# type: (Type[VersionControl]) -> None
if not hasattr(cls, 'name'):
logger.warning('Cannot register VCS %s', cls.__name__)
return
if cls.name not in self._registry:
self._registry[cls.name] = cls()
logger.debug('Registered VCS backend: %s', cls.name)
def unregister(self, name):
# type: (str) -> None
if name in self._registry:
del self._registry[name]
def get_backend_for_dir(self, location):
# type: (str) -> Optional[VersionControl]
"""
Return a VersionControl object if a repository of that type is found
at the given directory.
"""
vcs_backends = {}
for vcs_backend in self._registry.values():
repo_path = vcs_backend.get_repository_root(location)
if not repo_path:
continue
logger.debug('Determine that %s uses VCS: %s',
location, vcs_backend.name)
vcs_backends[repo_path] = vcs_backend
if not vcs_backends:
return None
# Choose the VCS in the inner-most directory. Since all repository
# roots found here would be either `location` or one of its
# parents, the longest path should have the most path components,
# i.e. the backend representing the inner-most repository.
inner_most_repo_path = max(vcs_backends, key=len)
return vcs_backends[inner_most_repo_path]
def get_backend_for_scheme(self, scheme):
# type: (str) -> Optional[VersionControl]
"""
Return a VersionControl object or None.
"""
for vcs_backend in self._registry.values():
if scheme in vcs_backend.schemes:
return vcs_backend
return None
def get_backend(self, name):
# type: (str) -> Optional[VersionControl]
"""
Return a VersionControl object or None.
"""
name = name.lower()
return self._registry.get(name)
vcs = VcsSupport()
class VersionControl(object):
name = ''
dirname = ''
repo_name = ''
# List of supported schemes for this Version Control
schemes = () # type: Tuple[str, ...]
# Iterable of environment variable names to pass to call_subprocess().
unset_environ = () # type: Tuple[str, ...]
default_arg_rev = None # type: Optional[str]
@classmethod
def should_add_vcs_url_prefix(cls, remote_url):
# type: (str) -> bool
"""
Return whether the vcs prefix (e.g. "git+") should be added to a
repository's remote url when used in a requirement.
"""
return not remote_url.lower().startswith('{}:'.format(cls.name))
@classmethod
def get_subdirectory(cls, location):
# type: (str) -> Optional[str]
"""
Return the path to setup.py, relative to the repo root.
Return None if setup.py is in the repo root.
"""
return None
@classmethod
def get_requirement_revision(cls, repo_dir):
# type: (str) -> str
"""
Return the revision string that should be used in a requirement.
"""
return cls.get_revision(repo_dir)
@classmethod
def get_src_requirement(cls, repo_dir, project_name):
# type: (str, str) -> Optional[str]
"""
Return the requirement string to use to redownload the files
currently at the given repository directory.
Args:
project_name: the (unescaped) project name.
The return value has a form similar to the following:
{repository_url}@{revision}#egg={project_name}
"""
repo_url = cls.get_remote_url(repo_dir)
if repo_url is None:
return None
if cls.should_add_vcs_url_prefix(repo_url):
repo_url = '{}+{}'.format(cls.name, repo_url)
revision = cls.get_requirement_revision(repo_dir)
subdir = cls.get_subdirectory(repo_dir)
req = make_vcs_requirement_url(repo_url, revision, project_name,
subdir=subdir)
return req
@staticmethod
def get_base_rev_args(rev):
# type: (str) -> List[str]
"""
Return the base revision arguments for a vcs command.
Args:
rev: the name of a revision to install. Cannot be None.
"""
raise NotImplementedError
def is_immutable_rev_checkout(self, url, dest):
# type: (str, str) -> bool
"""
Return true if the commit hash checked out at dest matches
the revision in url.
Always return False, if the VCS does not support immutable commit
hashes.
This method does not check if there are local uncommitted changes
in dest after checkout, as pip currently has no use case for that.
"""
return False
@classmethod
def make_rev_options(cls, rev=None, extra_args=None):
# type: (Optional[str], Optional[CommandArgs]) -> RevOptions
"""
Return a RevOptions object.
Args:
rev: the name of a revision to install.
extra_args: a list of extra options.
"""
return RevOptions(cls, rev, extra_args=extra_args)
@classmethod
def _is_local_repository(cls, repo):
# type: (str) -> bool
"""
posix absolute paths start with os.path.sep,
win32 ones start with drive (like c:\\folder)
"""
drive, tail = os.path.splitdrive(repo)
return repo.startswith(os.path.sep) or bool(drive)
def export(self, location, url):
# type: (str, HiddenText) -> None
"""
Export the repository at the url to the destination location
i.e. only download the files, without vcs informations
:param url: the repository URL starting with a vcs prefix.
"""
raise NotImplementedError
@classmethod
def get_netloc_and_auth(cls, netloc, scheme):
# type: (str, str) -> Tuple[str, Tuple[Optional[str], Optional[str]]]
"""
Parse the repository URL's netloc, and return the new netloc to use
along with auth information.
Args:
netloc: the original repository URL netloc.
scheme: the repository URL's scheme without the vcs prefix.
This is mainly for the Subversion class to override, so that auth
information can be provided via the --username and --password options
instead of through the URL. For other subclasses like Git without
such an option, auth information must stay in the URL.
Returns: (netloc, (username, password)).
"""
return netloc, (None, None)
@classmethod
def get_url_rev_and_auth(cls, url):
# type: (str) -> Tuple[str, Optional[str], AuthInfo]
"""
Parse the repository URL to use, and return the URL, revision,
and auth info to use.
Returns: (url, rev, (username, password)).
"""
scheme, netloc, path, query, frag = urllib_parse.urlsplit(url)
if '+' not in scheme:
raise ValueError(
"Sorry, {!r} is a malformed VCS url. "
"The format is <vcs>+<protocol>://<url>, "
"e.g. svn+http://myrepo/svn/MyApp#egg=MyApp".format(url)
)
# Remove the vcs prefix.
scheme = scheme.split('+', 1)[1]
netloc, user_pass = cls.get_netloc_and_auth(netloc, scheme)
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
if not rev:
raise InstallationError(
"The URL {!r} has an empty revision (after @) "
"which is not supported. Include a revision after @ "
"or remove @ from the URL.".format(url)
)
url = urllib_parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev, user_pass
@staticmethod
def make_rev_args(username, password):
# type: (Optional[str], Optional[HiddenText]) -> CommandArgs
"""
Return the RevOptions "extra arguments" to use in obtain().
"""
return []
def get_url_rev_options(self, url):
# type: (HiddenText) -> Tuple[HiddenText, RevOptions]
"""
Return the URL and RevOptions object to use in obtain() and in
some cases export(), as a tuple (url, rev_options).
"""
secret_url, rev, user_pass = self.get_url_rev_and_auth(url.secret)
username, secret_password = user_pass
password = None # type: Optional[HiddenText]
if secret_password is not None:
password = hide_value(secret_password)
extra_args = self.make_rev_args(username, password)
rev_options = self.make_rev_options(rev, extra_args=extra_args)
return hide_url(secret_url), rev_options
@staticmethod
def normalize_url(url):
# type: (str) -> str
"""
Normalize a URL for comparison by unquoting it and removing any
trailing slash.
"""
return urllib_parse.unquote(url).rstrip('/')
@classmethod
def compare_urls(cls, url1, url2):
# type: (str, str) -> bool
"""
Compare two repo URLs for identity, ignoring incidental differences.
"""
return (cls.normalize_url(url1) == cls.normalize_url(url2))
def fetch_new(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
"""
Fetch a revision from a repository, in the case that this is the
first fetch from the repository.
Args:
dest: the directory to fetch the repository to.
rev_options: a RevOptions object.
"""
raise NotImplementedError
def switch(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
"""
Switch the repo at ``dest`` to point to ``URL``.
Args:
rev_options: a RevOptions object.
"""
raise NotImplementedError
def update(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
"""
Update an already-existing repo to the given ``rev_options``.
Args:
rev_options: a RevOptions object.
"""
raise NotImplementedError
@classmethod
def is_commit_id_equal(cls, dest, name):
# type: (str, Optional[str]) -> bool
"""
Return whether the id of the current commit equals the given name.
Args:
dest: the repository directory.
name: a string name.
"""
raise NotImplementedError
def obtain(self, dest, url):
# type: (str, HiddenText) -> None
"""
Install or update in editable mode the package represented by this
VersionControl object.
:param dest: the repository directory in which to install or update.
:param url: the repository URL starting with a vcs prefix.
"""
url, rev_options = self.get_url_rev_options(url)
if not os.path.exists(dest):
self.fetch_new(dest, url, rev_options)
return
rev_display = rev_options.to_display()
if self.is_repository_directory(dest):
existing_url = self.get_remote_url(dest)
if self.compare_urls(existing_url, url.secret):
logger.debug(
'%s in %s exists, and has correct URL (%s)',
self.repo_name.title(),
display_path(dest),
url,
)
if not self.is_commit_id_equal(dest, rev_options.rev):
logger.info(
'Updating %s %s%s',
display_path(dest),
self.repo_name,
rev_display,
)
self.update(dest, url, rev_options)
else:
logger.info('Skipping because already up-to-date.')
return
logger.warning(
'%s %s in %s exists with URL %s',
self.name,
self.repo_name,
display_path(dest),
existing_url,
)
prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',
('s', 'i', 'w', 'b'))
else:
logger.warning(
'Directory %s already exists, and is not a %s %s.',
dest,
self.name,
self.repo_name,
)
# https://github.com/python/mypy/issues/1174
prompt = ('(i)gnore, (w)ipe, (b)ackup ', # type: ignore
('i', 'w', 'b'))
logger.warning(
'The plan is to install the %s repository %s',
self.name,
url,
)
response = ask_path_exists('What to do? {}'.format(
prompt[0]), prompt[1])
if response == 'a':
sys.exit(-1)
if response == 'w':
logger.warning('Deleting %s', display_path(dest))
rmtree(dest)
self.fetch_new(dest, url, rev_options)
return
if response == 'b':
dest_dir = backup_dir(dest)
logger.warning(
'Backing up %s to %s', display_path(dest), dest_dir,
)
shutil.move(dest, dest_dir)
self.fetch_new(dest, url, rev_options)
return
# Do nothing if the response is "i".
if response == 's':
logger.info(
'Switching %s %s to %s%s',
self.repo_name,
display_path(dest),
url,
rev_display,
)
self.switch(dest, url, rev_options)
def unpack(self, location, url):
# type: (str, HiddenText) -> None
"""
Clean up current location and download the url repository
(and vcs infos) into location
:param url: the repository URL starting with a vcs prefix.
"""
if os.path.exists(location):
rmtree(location)
self.obtain(location, url=url)
@classmethod
def get_remote_url(cls, location):
# type: (str) -> str
"""
Return the url used at location
Raises RemoteNotFoundError if the repository does not have a remote
url configured.
"""
raise NotImplementedError
@classmethod
def get_revision(cls, location):
# type: (str) -> str
"""
Return the current commit id of the files at the given location.
"""
raise NotImplementedError
@classmethod
def run_command(
cls,
cmd, # type: Union[List[str], CommandArgs]
cwd=None, # type: Optional[str]
extra_environ=None, # type: Optional[Mapping[str, Any]]
extra_ok_returncodes=None, # type: Optional[Iterable[int]]
log_failed_cmd=True # type: bool
):
# type: (...) -> Text
"""
Run a VCS subcommand
This is simply a wrapper around call_subprocess that adds the VCS
command name, and checks that the VCS is available
"""
cmd = make_command(cls.name, *cmd)
try:
return call_subprocess(cmd, cwd,
extra_environ=extra_environ,
extra_ok_returncodes=extra_ok_returncodes,
log_failed_cmd=log_failed_cmd)
except OSError as e:
# errno.ENOENT = no such file or directory
# In other words, the VCS executable isn't available
if e.errno == errno.ENOENT:
raise BadCommand(
'Cannot find command {cls.name!r} - do you have '
'{cls.name!r} installed and in your '
'PATH?'.format(**locals()))
else:
raise # re-raise exception if a different error occurred
@classmethod
def is_repository_directory(cls, path):
# type: (str) -> bool
"""
Return whether a directory path is a repository directory.
"""
logger.debug('Checking in %s for %s (%s)...',
path, cls.dirname, cls.name)
return os.path.exists(os.path.join(path, cls.dirname))
@classmethod
def get_repository_root(cls, location):
# type: (str) -> Optional[str]
"""
Return the "root" (top-level) directory controlled by the vcs,
or `None` if the directory is not in any.
It is meant to be overridden to implement smarter detection
mechanisms for specific vcs.
This can do more than is_repository_directory() alone. For
example, the Git override checks that Git is actually available.
"""
if cls.is_repository_directory(location):
return location
return None
| Django-locallibrary/env/Lib/site-packages/pip/_internal/vcs/versioncontrol.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/vcs/versioncontrol.py",
"repo_id": "Django-locallibrary",
"token_count": 11818
} | 13 |
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
try:
import cPickle as pickle
except ImportError:
import pickle
# Handle the case where the requests module has been patched to not have
# urllib3 bundled as part of its source.
try:
from pip._vendor.requests.packages.urllib3.response import HTTPResponse
except ImportError:
from pip._vendor.urllib3.response import HTTPResponse
try:
from pip._vendor.requests.packages.urllib3.util import is_fp_closed
except ImportError:
from pip._vendor.urllib3.util import is_fp_closed
# Replicate some six behaviour
try:
text_type = unicode
except NameError:
text_type = str
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/cachecontrol/compat.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/cachecontrol/compat.py",
"repo_id": "Django-locallibrary",
"token_count": 237
} | 14 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Backports for individual classes and functions."""
import os
import sys
__all__ = ['cache_from_source', 'callable', 'fsencode']
try:
from imp import cache_from_source
except ImportError:
def cache_from_source(py_file, debug=__debug__):
ext = debug and 'c' or 'o'
return py_file + ext
try:
callable = callable
except NameError:
from collections import Callable
def callable(obj):
return isinstance(obj, Callable)
try:
fsencode = os.fsencode
except AttributeError:
def fsencode(filename):
if isinstance(filename, bytes):
return filename
elif isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/distlib/_backport/misc.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/distlib/_backport/misc.py",
"repo_id": "Django-locallibrary",
"token_count": 394
} | 15 |
#
# Copyright (C) 2012-2017 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import socket
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
import subprocess
import sys
import tarfile
import tempfile
import textwrap
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
splittype, HTTPHandler, BaseConfigurator, valid_ident,
Container, configparser, URLError, ZipFile, fsdecode,
unquote, urlparse)
logger = logging.getLogger(__name__)
#
# Requirement parsing code as per PEP 508
#
IDENTIFIER = re.compile(r'^([\w\.-]+)\s*')
VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*')
COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*')
MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*')
OR = re.compile(r'^or\b\s*')
AND = re.compile(r'^and\b\s*')
NON_SPACE = re.compile(r'(\S+)\s*')
STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)')
def parse_marker(marker_string):
"""
Parse a marker string and return a dictionary containing a marker expression.
The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in
the expression grammar, or strings. A string contained in quotes is to be
interpreted as a literal string, and a string not contained in quotes is a
variable (such as os_name).
"""
def marker_var(remaining):
# either identifier, or literal string
m = IDENTIFIER.match(remaining)
if m:
result = m.groups()[0]
remaining = remaining[m.end():]
elif not remaining:
raise SyntaxError('unexpected end of input')
else:
q = remaining[0]
if q not in '\'"':
raise SyntaxError('invalid expression: %s' % remaining)
oq = '\'"'.replace(q, '')
remaining = remaining[1:]
parts = [q]
while remaining:
# either a string chunk, or oq, or q to terminate
if remaining[0] == q:
break
elif remaining[0] == oq:
parts.append(oq)
remaining = remaining[1:]
else:
m = STRING_CHUNK.match(remaining)
if not m:
raise SyntaxError('error in string literal: %s' % remaining)
parts.append(m.groups()[0])
remaining = remaining[m.end():]
else:
s = ''.join(parts)
raise SyntaxError('unterminated string: %s' % s)
parts.append(q)
result = ''.join(parts)
remaining = remaining[1:].lstrip() # skip past closing quote
return result, remaining
def marker_expr(remaining):
if remaining and remaining[0] == '(':
result, remaining = marker(remaining[1:].lstrip())
if remaining[0] != ')':
raise SyntaxError('unterminated parenthesis: %s' % remaining)
remaining = remaining[1:].lstrip()
else:
lhs, remaining = marker_var(remaining)
while remaining:
m = MARKER_OP.match(remaining)
if not m:
break
op = m.groups()[0]
remaining = remaining[m.end():]
rhs, remaining = marker_var(remaining)
lhs = {'op': op, 'lhs': lhs, 'rhs': rhs}
result = lhs
return result, remaining
def marker_and(remaining):
lhs, remaining = marker_expr(remaining)
while remaining:
m = AND.match(remaining)
if not m:
break
remaining = remaining[m.end():]
rhs, remaining = marker_expr(remaining)
lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs}
return lhs, remaining
def marker(remaining):
lhs, remaining = marker_and(remaining)
while remaining:
m = OR.match(remaining)
if not m:
break
remaining = remaining[m.end():]
rhs, remaining = marker_and(remaining)
lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs}
return lhs, remaining
return marker(marker_string)
def parse_requirement(req):
"""
Parse a requirement passed in as a string. Return a Container
whose attributes contain the various parts of the requirement.
"""
remaining = req.strip()
if not remaining or remaining.startswith('#'):
return None
m = IDENTIFIER.match(remaining)
if not m:
raise SyntaxError('name expected: %s' % remaining)
distname = m.groups()[0]
remaining = remaining[m.end():]
extras = mark_expr = versions = uri = None
if remaining and remaining[0] == '[':
i = remaining.find(']', 1)
if i < 0:
raise SyntaxError('unterminated extra: %s' % remaining)
s = remaining[1:i]
remaining = remaining[i + 1:].lstrip()
extras = []
while s:
m = IDENTIFIER.match(s)
if not m:
raise SyntaxError('malformed extra: %s' % s)
extras.append(m.groups()[0])
s = s[m.end():]
if not s:
break
if s[0] != ',':
raise SyntaxError('comma expected in extras: %s' % s)
s = s[1:].lstrip()
if not extras:
extras = None
if remaining:
if remaining[0] == '@':
# it's a URI
remaining = remaining[1:].lstrip()
m = NON_SPACE.match(remaining)
if not m:
raise SyntaxError('invalid URI: %s' % remaining)
uri = m.groups()[0]
t = urlparse(uri)
# there are issues with Python and URL parsing, so this test
# is a bit crude. See bpo-20271, bpo-23505. Python doesn't
# always parse invalid URLs correctly - it should raise
# exceptions for malformed URLs
if not (t.scheme and t.netloc):
raise SyntaxError('Invalid URL: %s' % uri)
remaining = remaining[m.end():].lstrip()
else:
def get_versions(ver_remaining):
"""
Return a list of operator, version tuples if any are
specified, else None.
"""
m = COMPARE_OP.match(ver_remaining)
versions = None
if m:
versions = []
while True:
op = m.groups()[0]
ver_remaining = ver_remaining[m.end():]
m = VERSION_IDENTIFIER.match(ver_remaining)
if not m:
raise SyntaxError('invalid version: %s' % ver_remaining)
v = m.groups()[0]
versions.append((op, v))
ver_remaining = ver_remaining[m.end():]
if not ver_remaining or ver_remaining[0] != ',':
break
ver_remaining = ver_remaining[1:].lstrip()
m = COMPARE_OP.match(ver_remaining)
if not m:
raise SyntaxError('invalid constraint: %s' % ver_remaining)
if not versions:
versions = None
return versions, ver_remaining
if remaining[0] != '(':
versions, remaining = get_versions(remaining)
else:
i = remaining.find(')', 1)
if i < 0:
raise SyntaxError('unterminated parenthesis: %s' % remaining)
s = remaining[1:i]
remaining = remaining[i + 1:].lstrip()
# As a special diversion from PEP 508, allow a version number
# a.b.c in parentheses as a synonym for ~= a.b.c (because this
# is allowed in earlier PEPs)
if COMPARE_OP.match(s):
versions, _ = get_versions(s)
else:
m = VERSION_IDENTIFIER.match(s)
if not m:
raise SyntaxError('invalid constraint: %s' % s)
v = m.groups()[0]
s = s[m.end():].lstrip()
if s:
raise SyntaxError('invalid constraint: %s' % s)
versions = [('~=', v)]
if remaining:
if remaining[0] != ';':
raise SyntaxError('invalid requirement: %s' % remaining)
remaining = remaining[1:].lstrip()
mark_expr, remaining = parse_marker(remaining)
if remaining and remaining[0] != '#':
raise SyntaxError('unexpected trailing data: %s' % remaining)
if not versions:
rs = distname
else:
rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions]))
return Container(name=distname, extras=extras, constraints=versions,
marker=mark_expr, url=uri, requirement=rs)
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(root, path):
# normalizes and returns a lstripped-/-separated path
root = root.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(root)
return path[len(root):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on OS X
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
result = os.path.normcase(sys.executable)
if not isinstance(result, text_type):
result = fsdecode(result)
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
jdata = json.load(stream)
result = jdata['extensions']['python.exports']['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
def read_stream(cp, stream):
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
cp = configparser.ConfigParser()
try:
read_stream(cp, stream)
except configparser.MissingSectionHeaderError:
stream.close()
data = textwrap.dedent(data)
stream = StringIO(data)
read_stream(cp, stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
if os.path.exists(path):
os.remove(path)
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.write_binary_file(path, data.encode(encoding))
def set_mode(self, bits, mask, files):
if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
compile_kwargs = {}
if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'):
compile_kwargs['invalidation_mode'] = py_compile.PycInvalidationMode.CHECKED_HASH
py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self): # pragma: no cover
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>[\w-]+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException("Invalid specification "
"'%s'" % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.rsplit('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
if username:
username = unquote(username)
if password:
password = unquote(password)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
filename = unquote(filename).replace(' ', '-')
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
ct = headers.get('Content-Type')
if not ct.startswith('application/json'):
logger.debug('Unexpected response for JSON request: %s', ct)
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
def get_project_data(name):
url = '%s/%s/project.json' % (name[0].upper(), name)
url = urljoin(_external_data_base_url, url)
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
url = urljoin(_external_data_base_url, url)
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base): # pragma: no cover
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else: # pragma: no cover
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
if ssl:
from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
CertificateError)
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else: # pragma: no cover
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
if hasattr(ssl, 'OP_NO_SSLv2'):
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError: # pragma: no cover
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
if ssl:
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
if ssl:
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
# Python 3 determines encoding from locale. Force 'utf-8'
# file encoding to match other forced utf-8 encoding
kwargs['encoding'] = 'utf-8'
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
class SubprocessMixin(object):
"""
Mixin for running subprocesses and capturing their output
"""
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/distlib/util.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/distlib/util.py",
"repo_id": "Django-locallibrary",
"token_count": 29401
} | 16 |
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from pip._vendor.six.moves import http_client, urllib
import codecs
import re
from io import BytesIO, StringIO
from pip._vendor import webencodings
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import _ReparseException
from . import _utils
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]" # noqa
if _utils.supports_lone_surrogates:
# Use one extra step of indirection and create surrogates with
# eval. Not using this indirection would introduce an illegal
# unicode literal on platforms not supporting such lone
# surrogates.
assert invalid_unicode_no_surrogate[-1] == "]" and invalid_unicode_no_surrogate.count("]") == 1
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate[:-1] +
eval('"\\uD800-\\uDFFF"') + # pylint:disable=eval-used
"]")
else:
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate)
non_bmp_invalid_codepoints = {0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF}
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005C\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, **kwargs):
# Work around Python bug #20007: read(0) closes the connection.
# http://bugs.python.org/issue20007
if (isinstance(source, http_client.HTTPResponse) or
# Also check for addinfourl wrapping HTTPResponse
(isinstance(source, urllib.response.addbase) and
isinstance(source.fp, http_client.HTTPResponse))):
isUnicode = False
elif hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
encodings = [x for x in kwargs if x.endswith("_encoding")]
if encodings:
raise TypeError("Cannot set an encoding with a unicode input, set %r" % encodings)
return HTMLUnicodeInputStream(source, **kwargs)
else:
return HTMLBinaryInputStream(source, **kwargs)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
if not _utils.supports_lone_surrogates:
# Such platforms will have already checked for such
# surrogate errors, so no need to do this checking.
self.reportCharacterErrors = None
elif len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
else:
self.reportCharacterErrors = self.characterErrorsUCS2
# List of where new lines occur
self.newLines = [0]
self.charEncoding = (lookupEncoding("utf-8"), "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
if self.reportCharacterErrors:
self.reportCharacterErrors(data)
# Replace invalid characters
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for _ in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if _utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = _utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not EOF:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, override_encoding=None, transport_encoding=None,
same_origin_parent_encoding=None, likely_encoding=None,
default_encoding="windows-1252", useChardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 1024
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Things from args
self.override_encoding = override_encoding
self.transport_encoding = transport_encoding
self.same_origin_parent_encoding = same_origin_parent_encoding
self.likely_encoding = likely_encoding
self.default_encoding = default_encoding
# Determine encoding
self.charEncoding = self.determineEncoding(useChardet)
assert self.charEncoding[0] is not None
# Call superclass
self.reset()
def reset(self):
self.dataStream = self.charEncoding[0].codec_info.streamreader(self.rawStream, 'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except Exception:
stream = BufferedStream(stream)
return stream
def determineEncoding(self, chardet=True):
# BOMs take precedence over everything
# This will also read past the BOM if present
charEncoding = self.detectBOM(), "certain"
if charEncoding[0] is not None:
return charEncoding
# If we've been overridden, we've been overridden
charEncoding = lookupEncoding(self.override_encoding), "certain"
if charEncoding[0] is not None:
return charEncoding
# Now check the transport layer
charEncoding = lookupEncoding(self.transport_encoding), "certain"
if charEncoding[0] is not None:
return charEncoding
# Look for meta elements with encoding information
charEncoding = self.detectEncodingMeta(), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Parent document encoding
charEncoding = lookupEncoding(self.same_origin_parent_encoding), "tentative"
if charEncoding[0] is not None and not charEncoding[0].name.startswith("utf-16"):
return charEncoding
# "likely" encoding
charEncoding = lookupEncoding(self.likely_encoding), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Guess with chardet, if available
if chardet:
try:
from pip._vendor.chardet.universaldetector import UniversalDetector
except ImportError:
pass
else:
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = lookupEncoding(detector.result['encoding'])
self.rawStream.seek(0)
if encoding is not None:
return encoding, "tentative"
# Try the default encoding
charEncoding = lookupEncoding(self.default_encoding), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Fallback to html5lib's default if even that hasn't worked
return lookupEncoding("windows-1252"), "tentative"
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = lookupEncoding(newEncoding)
if newEncoding is None:
return
if newEncoding.name in ("utf-16be", "utf-16le"):
newEncoding = lookupEncoding("utf-8")
assert newEncoding is not None
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.charEncoding = (newEncoding, "certain")
self.reset()
raise _ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16le', codecs.BOM_UTF16_BE: 'utf-16be',
codecs.BOM_UTF32_LE: 'utf-32le', codecs.BOM_UTF32_BE: 'utf-32be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
if encoding:
self.rawStream.seek(seek)
return lookupEncoding(encoding)
else:
self.rawStream.seek(0)
return None
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding is not None and encoding.name in ("utf-16be", "utf-16le"):
encoding = lookupEncoding("utf-8")
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
# pylint:disable=unused-argument
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
rv = self.startswith(bytes, self.position)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
try:
self._position = self.index(bytes, self.position) + len(bytes) - 1
except ValueError:
raise StopIteration
return True
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
if b"<meta" not in self.data:
return None
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for _ in self.data:
keepParsing = True
try:
self.data.jumpTo(b"<")
except StopIteration:
break
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = lookupEncoding(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = lookupEncoding(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def lookupEncoding(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, bytes):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding is not None:
try:
return webencodings.lookup(encoding)
except AttributeError:
return None
else:
return None
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/_inputstream.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/_inputstream.py",
"repo_id": "Django-locallibrary",
"token_count": 15205
} | 17 |
from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName, Attrs
from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
def to_genshi(walker):
"""Convert a tree to a genshi tree
:arg walker: the treewalker to use to walk the tree to convert it
:returns: generator of genshi nodes
"""
text = []
for token in walker:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
text.append(token["data"])
elif text:
yield TEXT, "".join(text), (None, -1, -1)
text = []
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text:
yield TEXT, "".join(text), (None, -1, -1)
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py",
"repo_id": "Django-locallibrary",
"token_count": 858
} | 18 |
class UnpackException(Exception):
"""Base class for some exceptions raised while unpacking.
NOTE: unpack may raise exception other than subclass of
UnpackException. If you want to catch all error, catch
Exception instead.
"""
class BufferFull(UnpackException):
pass
class OutOfData(UnpackException):
pass
class FormatError(ValueError, UnpackException):
"""Invalid msgpack format"""
class StackError(ValueError, UnpackException):
"""Too nested"""
# Deprecated. Use ValueError instead
UnpackValueError = ValueError
class ExtraData(UnpackValueError):
"""ExtraData is raised when there is trailing data.
This exception is raised while only one-shot (not streaming)
unpack.
"""
def __init__(self, unpacked, extra):
self.unpacked = unpacked
self.extra = extra
def __str__(self):
return "unpack(b) received extra data."
# Deprecated. Use Exception instead to catch all exception during packing.
PackException = Exception
PackValueError = ValueError
PackOverflowError = OverflowError
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/msgpack/exceptions.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/msgpack/exceptions.py",
"repo_id": "Django-locallibrary",
"token_count": 329
} | 19 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import sys
from ._typing import TYPE_CHECKING
if TYPE_CHECKING: # pragma: no cover
from typing import Any, Dict, Tuple, Type
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
# flake8: noqa
if PY3:
string_types = (str,)
else:
string_types = (basestring,)
def with_metaclass(meta, *bases):
# type: (Type[Any], Tuple[Type[Any], ...]) -> Any
"""
Create a base class with a metaclass.
"""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta): # type: ignore
def __new__(cls, name, this_bases, d):
# type: (Type[Any], str, Tuple[Any], Dict[Any, Any]) -> Any
return meta(name, bases, d)
return type.__new__(metaclass, "temporary_class", (), {})
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/packaging/_compat.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/packaging/_compat.py",
"repo_id": "Django-locallibrary",
"token_count": 414
} | 20 |
import os
import errno
import sys
from pip._vendor import six
def _makedirs_31(path, exist_ok=False):
try:
os.makedirs(path)
except OSError as exc:
if not exist_ok or exc.errno != errno.EEXIST:
raise
# rely on compatibility behavior until mode considerations
# and exists_ok considerations are disentangled.
# See https://github.com/pypa/setuptools/pull/1083#issuecomment-315168663
needs_makedirs = (
six.PY2 or
(3, 4) <= sys.version_info < (3, 4, 1)
)
makedirs = _makedirs_31 if needs_makedirs else os.makedirs
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/pkg_resources/py31compat.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/pkg_resources/py31compat.py",
"repo_id": "Django-locallibrary",
"token_count": 227
} | 21 |
# -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the query string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
to add for the file.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How many seconds to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'https://httpbin.org/get')
>>> req
<Response [200]>
"""
# By using the 'with' statement we are sure the session is closed, thus we
# avoid leaving sockets open which can trigger a ResourceWarning in some
# cases, and look like a memory leak in others.
with sessions.Session() as session:
return session.request(method=method, url=url, **kwargs)
def get(url, params=None, **kwargs):
r"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
r"""Sends an OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
r"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes. If
`allow_redirects` is not provided, it will be set to `False` (as
opposed to the default :meth:`request` behavior).
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
r"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
r"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
r"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
r"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs)
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/requests/api.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/requests/api.py",
"repo_id": "Django-locallibrary",
"token_count": 2227
} | 22 |
"""
This module provides means to detect the App Engine environment.
"""
import os
def is_appengine():
return is_local_appengine() or is_prod_appengine()
def is_appengine_sandbox():
"""Reports if the app is running in the first generation sandbox.
The second generation runtimes are technically still in a sandbox, but it
is much less restrictive, so generally you shouldn't need to check for it.
see https://cloud.google.com/appengine/docs/standard/runtimes
"""
return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27"
def is_local_appengine():
return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
"SERVER_SOFTWARE", ""
).startswith("Development/")
def is_prod_appengine():
return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
"SERVER_SOFTWARE", ""
).startswith("Google App Engine/")
def is_prod_appengine_mvms():
"""Deprecated."""
return False
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py",
"repo_id": "Django-locallibrary",
"token_count": 335
} | 23 |
from __future__ import absolute_import
import re
from collections import namedtuple
from ..exceptions import LocationParseError
from ..packages import six
url_attrs = ["scheme", "auth", "host", "port", "path", "query", "fragment"]
# We only want to normalize urls with an HTTP(S) scheme.
# urllib3 infers URLs without a scheme (None) to be http.
NORMALIZABLE_SCHEMES = ("http", "https", None)
# Almost all of these patterns were derived from the
# 'rfc3986' module: https://github.com/python-hyper/rfc3986
PERCENT_RE = re.compile(r"%[a-fA-F0-9]{2}")
SCHEME_RE = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+-]*:|/)")
URI_RE = re.compile(
r"^(?:([a-zA-Z][a-zA-Z0-9+.-]*):)?"
r"(?://([^\\/?#]*))?"
r"([^?#]*)"
r"(?:\?([^#]*))?"
r"(?:#(.*))?$",
re.UNICODE | re.DOTALL,
)
IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}"
HEX_PAT = "[0-9A-Fa-f]{1,4}"
LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT)
_subs = {"hex": HEX_PAT, "ls32": LS32_PAT}
_variations = [
# 6( h16 ":" ) ls32
"(?:%(hex)s:){6}%(ls32)s",
# "::" 5( h16 ":" ) ls32
"::(?:%(hex)s:){5}%(ls32)s",
# [ h16 ] "::" 4( h16 ":" ) ls32
"(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s",
# [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
"(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s",
# [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
"(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s",
# [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
"(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s",
# [ *4( h16 ":" ) h16 ] "::" ls32
"(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s",
# [ *5( h16 ":" ) h16 ] "::" h16
"(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s",
# [ *6( h16 ":" ) h16 ] "::"
"(?:(?:%(hex)s:){0,6}%(hex)s)?::",
]
UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~"
IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")"
ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+"
IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]"
REG_NAME_PAT = r"(?:[^\[\]%:/?#]|%[a-fA-F0-9]{2})*"
TARGET_RE = re.compile(r"^(/[^?#]*)(?:\?([^#]*))?(?:#.*)?$")
IPV4_RE = re.compile("^" + IPV4_PAT + "$")
IPV6_RE = re.compile("^" + IPV6_PAT + "$")
IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$")
BRACELESS_IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT[2:-2] + "$")
ZONE_ID_RE = re.compile("(" + ZONE_ID_PAT + r")\]$")
SUBAUTHORITY_PAT = (u"^(?:(.*)@)?(%s|%s|%s)(?::([0-9]{0,5}))?$") % (
REG_NAME_PAT,
IPV4_PAT,
IPV6_ADDRZ_PAT,
)
SUBAUTHORITY_RE = re.compile(SUBAUTHORITY_PAT, re.UNICODE | re.DOTALL)
UNRESERVED_CHARS = set(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._-~"
)
SUB_DELIM_CHARS = set("!$&'()*+,;=")
USERINFO_CHARS = UNRESERVED_CHARS | SUB_DELIM_CHARS | {":"}
PATH_CHARS = USERINFO_CHARS | {"@", "/"}
QUERY_CHARS = FRAGMENT_CHARS = PATH_CHARS | {"?"}
class Url(namedtuple("Url", url_attrs)):
"""
Data structure for representing an HTTP URL. Used as a return value for
:func:`parse_url`. Both the scheme and host are normalized as they are
both case-insensitive according to RFC 3986.
"""
__slots__ = ()
def __new__(
cls,
scheme=None,
auth=None,
host=None,
port=None,
path=None,
query=None,
fragment=None,
):
if path and not path.startswith("/"):
path = "/" + path
if scheme is not None:
scheme = scheme.lower()
return super(Url, cls).__new__(
cls, scheme, auth, host, port, path, query, fragment
)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or "/"
if self.query is not None:
uri += "?" + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return "%s:%d" % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:[email protected]:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = u""
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + u"://"
if auth is not None:
url += auth + u"@"
if host is not None:
url += host
if port is not None:
url += u":" + str(port)
if path is not None:
url += path
if query is not None:
url += u"?" + query
if fragment is not None:
url += u"#" + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
.. deprecated:: 1.25
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, "", None
return s[:min_idx], s[min_idx + 1 :], min_delim
def _encode_invalid_chars(component, allowed_chars, encoding="utf-8"):
"""Percent-encodes a URI component without reapplying
onto an already percent-encoded component.
"""
if component is None:
return component
component = six.ensure_text(component)
# Normalize existing percent-encoded bytes.
# Try to see if the component we're encoding is already percent-encoded
# so we can skip all '%' characters but still encode all others.
component, percent_encodings = PERCENT_RE.subn(
lambda match: match.group(0).upper(), component
)
uri_bytes = component.encode("utf-8", "surrogatepass")
is_percent_encoded = percent_encodings == uri_bytes.count(b"%")
encoded_component = bytearray()
for i in range(0, len(uri_bytes)):
# Will return a single character bytestring on both Python 2 & 3
byte = uri_bytes[i : i + 1]
byte_ord = ord(byte)
if (is_percent_encoded and byte == b"%") or (
byte_ord < 128 and byte.decode() in allowed_chars
):
encoded_component += byte
continue
encoded_component.extend(b"%" + (hex(byte_ord)[2:].encode().zfill(2).upper()))
return encoded_component.decode(encoding)
def _remove_path_dot_segments(path):
# See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code
segments = path.split("/") # Turn the path into a list of segments
output = [] # Initialize the variable to use to store output
for segment in segments:
# '.' is the current directory, so ignore it, it is superfluous
if segment == ".":
continue
# Anything other than '..', should be appended to the output
elif segment != "..":
output.append(segment)
# In this case segment == '..', if we can, we should pop the last
# element
elif output:
output.pop()
# If the path starts with '/' and the output is empty or the first string
# is non-empty
if path.startswith("/") and (not output or output[0]):
output.insert(0, "")
# If the path starts with '/.' or '/..' ensure we add one more empty
# string to add a trailing '/'
if path.endswith(("/.", "/..")):
output.append("")
return "/".join(output)
def _normalize_host(host, scheme):
if host:
if isinstance(host, six.binary_type):
host = six.ensure_str(host)
if scheme in NORMALIZABLE_SCHEMES:
is_ipv6 = IPV6_ADDRZ_RE.match(host)
if is_ipv6:
match = ZONE_ID_RE.search(host)
if match:
start, end = match.span(1)
zone_id = host[start:end]
if zone_id.startswith("%25") and zone_id != "%25":
zone_id = zone_id[3:]
else:
zone_id = zone_id[1:]
zone_id = "%" + _encode_invalid_chars(zone_id, UNRESERVED_CHARS)
return host[:start].lower() + zone_id + host[end:]
else:
return host.lower()
elif not IPV4_RE.match(host):
return six.ensure_str(
b".".join([_idna_encode(label) for label in host.split(".")])
)
return host
def _idna_encode(name):
if name and any([ord(x) > 128 for x in name]):
try:
from pip._vendor import idna
except ImportError:
six.raise_from(
LocationParseError("Unable to parse URL without the 'idna' module"),
None,
)
try:
return idna.encode(name.lower(), strict=True, std3_rules=True)
except idna.IDNAError:
six.raise_from(
LocationParseError(u"Name '%s' is not a valid IDNA label" % name), None
)
return name.lower().encode("ascii")
def _encode_target(target):
"""Percent-encodes a request target so that there are no invalid characters"""
path, query = TARGET_RE.match(target).groups()
target = _encode_invalid_chars(path, PATH_CHARS)
query = _encode_invalid_chars(query, QUERY_CHARS)
if query is not None:
target += "?" + query
return target
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
This parser is RFC 3986 compliant.
The parser logic and helper functions are based heavily on
work done in the ``rfc3986`` module.
:param str url: URL to parse into a :class:`.Url` namedtuple.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
if not url:
# Empty
return Url()
source_url = url
if not SCHEME_RE.search(url):
url = "//" + url
try:
scheme, authority, path, query, fragment = URI_RE.match(url).groups()
normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES
if scheme:
scheme = scheme.lower()
if authority:
auth, host, port = SUBAUTHORITY_RE.match(authority).groups()
if auth and normalize_uri:
auth = _encode_invalid_chars(auth, USERINFO_CHARS)
if port == "":
port = None
else:
auth, host, port = None, None, None
if port is not None:
port = int(port)
if not (0 <= port <= 65535):
raise LocationParseError(url)
host = _normalize_host(host, scheme)
if normalize_uri and path:
path = _remove_path_dot_segments(path)
path = _encode_invalid_chars(path, PATH_CHARS)
if normalize_uri and query:
query = _encode_invalid_chars(query, QUERY_CHARS)
if normalize_uri and fragment:
fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS)
except (ValueError, AttributeError):
return six.raise_from(LocationParseError(source_url), None)
# For the sake of backwards compatibility we put empty
# string values for path if there are any defined values
# beyond the path in the URL.
# TODO: Remove this when we break backwards compatibility.
if not path:
if query is not None or fragment is not None:
path = ""
else:
path = None
# Ensure that each part of the URL is a `str` for
# backwards compatibility.
if isinstance(url, six.text_type):
ensure_func = six.ensure_text
else:
ensure_func = six.ensure_str
def ensure_type(x):
return x if x is None else ensure_func(x)
return Url(
scheme=ensure_type(scheme),
auth=ensure_type(auth),
host=ensure_type(host),
port=port,
path=ensure_type(path),
query=ensure_type(query),
fragment=ensure_type(fragment),
)
def get_host(url):
"""
Deprecated. Use :func:`parse_url` instead.
"""
p = parse_url(url)
return p.scheme or "http", p.hostname, p.port
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/util/url.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/util/url.py",
"repo_id": "Django-locallibrary",
"token_count": 6692
} | 24 |
../../Scripts/easy_install-3.9.exe,sha256=zsknhmZapzS7xw5XH_A2rq33RLjrPPisYle1M8PmjdU,106374
../../Scripts/easy_install.exe,sha256=zsknhmZapzS7xw5XH_A2rq33RLjrPPisYle1M8PmjdU,106374
__pycache__/easy_install.cpython-39.pyc,,
easy_install.py,sha256=MDC9vt5AxDsXX5qcKlBz2TnW6Tpuv_AobnfhCJ9X3PM,126
pkg_resources/__init__.py,sha256=44G2LkL_lXbDzjTukLmR5baLQtE3S4IaFciSZPDcOM8,108481
pkg_resources/__pycache__/__init__.cpython-39.pyc,,
pkg_resources/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
pkg_resources/_vendor/__pycache__/__init__.cpython-39.pyc,,
pkg_resources/_vendor/__pycache__/appdirs.cpython-39.pyc,,
pkg_resources/_vendor/__pycache__/pyparsing.cpython-39.pyc,,
pkg_resources/_vendor/__pycache__/six.cpython-39.pyc,,
pkg_resources/_vendor/appdirs.py,sha256=MievUEuv3l_mQISH5SF0shDk_BNhHHzYiAPrT3ITN4I,24701
pkg_resources/_vendor/packaging/__about__.py,sha256=CpuMSyh1V7adw8QMjWKkY3LtdqRUkRX4MgJ6nF4stM0,744
pkg_resources/_vendor/packaging/__init__.py,sha256=6enbp5XgRfjBjsI9-bn00HjHf5TH21PDMOKkJW8xw-w,562
pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-39.pyc,,
pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-39.pyc,,
pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-39.pyc,,
pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-39.pyc,,
pkg_resources/_vendor/packaging/__pycache__/markers.cpython-39.pyc,,
pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-39.pyc,,
pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-39.pyc,,
pkg_resources/_vendor/packaging/__pycache__/tags.cpython-39.pyc,,
pkg_resources/_vendor/packaging/__pycache__/utils.cpython-39.pyc,,
pkg_resources/_vendor/packaging/__pycache__/version.cpython-39.pyc,,
pkg_resources/_vendor/packaging/_compat.py,sha256=Ugdm-qcneSchW25JrtMIKgUxfEEBcCAz6WrEeXeqz9o,865
pkg_resources/_vendor/packaging/_structures.py,sha256=pVd90XcXRGwpZRB_qdFuVEibhCHpX_bL5zYr9-N0mc8,1416
pkg_resources/_vendor/packaging/markers.py,sha256=-3GbxB_JjpWPBlTjvo_rCMJZ17i96VvHjtZ3URklwhg,8277
pkg_resources/_vendor/packaging/requirements.py,sha256=syt3EodrY6_UtlfeJDuhVYXcEDEweTSt2pyslLYlX3I,4757
pkg_resources/_vendor/packaging/specifiers.py,sha256=0ZzQpcUnvrQ6LjR-mQRLzMr8G6hdRv-mY0VSf_amFtI,27778
pkg_resources/_vendor/packaging/tags.py,sha256=EPLXhO6GTD7_oiWEO1U0l0PkfR8R_xivpMDHXnsTlts,12933
pkg_resources/_vendor/packaging/utils.py,sha256=VaTC0Ei7zO2xl9ARiWmz2YFLFt89PuuhLbAlXMyAGms,1520
pkg_resources/_vendor/packaging/version.py,sha256=Npdwnb8OHedj_2L86yiUqscujb7w_i5gmSK1PhOAFzg,11978
pkg_resources/_vendor/pyparsing.py,sha256=tmrp-lu-qO1i75ZzIN5A12nKRRD1Cm4Vpk-5LR9rims,232055
pkg_resources/_vendor/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098
pkg_resources/extern/__init__.py,sha256=w_3T8ntsvFFioQYOgYoGGqafDiv4sLzecQRDjsB5yeE,2101
pkg_resources/extern/__pycache__/__init__.cpython-39.pyc,,
setuptools-49.2.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
setuptools-49.2.1.dist-info/LICENSE,sha256=wyo6w5WvYyHv0ovnPQagDw22q4h9HCHU_sRhKNIFbVo,1078
setuptools-49.2.1.dist-info/METADATA,sha256=BpVxLXLg7oFfk05RuRuAea4JPTfYrVCX0nfW4A9S9w8,4819
setuptools-49.2.1.dist-info/RECORD,,
setuptools-49.2.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
setuptools-49.2.1.dist-info/WHEEL,sha256=g4nMs7d-Xl9-xC9XovUrsDHGXt-FT0E17Yqo92DEfvY,92
setuptools-49.2.1.dist-info/dependency_links.txt,sha256=HlkCFkoK5TbZ5EMLbLKYhLcY_E31kBWD8TqW2EgmatQ,239
setuptools-49.2.1.dist-info/entry_points.txt,sha256=1K5Fr0-5Ph3ZRZFuwNaw8ERGiNLVqHvdKDNt3oXGS6w,3143
setuptools-49.2.1.dist-info/top_level.txt,sha256=2HUXVVwA4Pff1xgTFr3GsTXXKaPaO6vlG6oNJ_4u4Tg,38
setuptools-49.2.1.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
setuptools/__init__.py,sha256=MeXBA4OH_MiIlHhecZLuoNjYbQP2CrMof2wS5qfKDNg,7943
setuptools/__pycache__/__init__.cpython-39.pyc,,
setuptools/__pycache__/_deprecation_warning.cpython-39.pyc,,
setuptools/__pycache__/_imp.cpython-39.pyc,,
setuptools/__pycache__/archive_util.cpython-39.pyc,,
setuptools/__pycache__/build_meta.cpython-39.pyc,,
setuptools/__pycache__/config.cpython-39.pyc,,
setuptools/__pycache__/dep_util.cpython-39.pyc,,
setuptools/__pycache__/depends.cpython-39.pyc,,
setuptools/__pycache__/dist.cpython-39.pyc,,
setuptools/__pycache__/distutils_patch.cpython-39.pyc,,
setuptools/__pycache__/errors.cpython-39.pyc,,
setuptools/__pycache__/extension.cpython-39.pyc,,
setuptools/__pycache__/glob.cpython-39.pyc,,
setuptools/__pycache__/installer.cpython-39.pyc,,
setuptools/__pycache__/launch.cpython-39.pyc,,
setuptools/__pycache__/lib2to3_ex.cpython-39.pyc,,
setuptools/__pycache__/monkey.cpython-39.pyc,,
setuptools/__pycache__/msvc.cpython-39.pyc,,
setuptools/__pycache__/namespaces.cpython-39.pyc,,
setuptools/__pycache__/package_index.cpython-39.pyc,,
setuptools/__pycache__/py27compat.cpython-39.pyc,,
setuptools/__pycache__/py31compat.cpython-39.pyc,,
setuptools/__pycache__/py33compat.cpython-39.pyc,,
setuptools/__pycache__/py34compat.cpython-39.pyc,,
setuptools/__pycache__/sandbox.cpython-39.pyc,,
setuptools/__pycache__/ssl_support.cpython-39.pyc,,
setuptools/__pycache__/unicode_utils.cpython-39.pyc,,
setuptools/__pycache__/version.cpython-39.pyc,,
setuptools/__pycache__/wheel.cpython-39.pyc,,
setuptools/__pycache__/windows_support.cpython-39.pyc,,
setuptools/_deprecation_warning.py,sha256=jU9-dtfv6cKmtQJOXN8nP1mm7gONw5kKEtiPtbwnZyI,218
setuptools/_distutils/__init__.py,sha256=lpQAphR_7uhWC2fbSEps4Ja9W4YwezN_IX_LJEt3khU,250
setuptools/_distutils/__pycache__/__init__.cpython-39.pyc,,
setuptools/_distutils/__pycache__/_msvccompiler.cpython-39.pyc,,
setuptools/_distutils/__pycache__/archive_util.cpython-39.pyc,,
setuptools/_distutils/__pycache__/bcppcompiler.cpython-39.pyc,,
setuptools/_distutils/__pycache__/ccompiler.cpython-39.pyc,,
setuptools/_distutils/__pycache__/cmd.cpython-39.pyc,,
setuptools/_distutils/__pycache__/config.cpython-39.pyc,,
setuptools/_distutils/__pycache__/core.cpython-39.pyc,,
setuptools/_distutils/__pycache__/cygwinccompiler.cpython-39.pyc,,
setuptools/_distutils/__pycache__/debug.cpython-39.pyc,,
setuptools/_distutils/__pycache__/dep_util.cpython-39.pyc,,
setuptools/_distutils/__pycache__/dir_util.cpython-39.pyc,,
setuptools/_distutils/__pycache__/dist.cpython-39.pyc,,
setuptools/_distutils/__pycache__/errors.cpython-39.pyc,,
setuptools/_distutils/__pycache__/extension.cpython-39.pyc,,
setuptools/_distutils/__pycache__/fancy_getopt.cpython-39.pyc,,
setuptools/_distutils/__pycache__/file_util.cpython-39.pyc,,
setuptools/_distutils/__pycache__/filelist.cpython-39.pyc,,
setuptools/_distutils/__pycache__/log.cpython-39.pyc,,
setuptools/_distutils/__pycache__/msvc9compiler.cpython-39.pyc,,
setuptools/_distutils/__pycache__/msvccompiler.cpython-39.pyc,,
setuptools/_distutils/__pycache__/spawn.cpython-39.pyc,,
setuptools/_distutils/__pycache__/sysconfig.cpython-39.pyc,,
setuptools/_distutils/__pycache__/text_file.cpython-39.pyc,,
setuptools/_distutils/__pycache__/unixccompiler.cpython-39.pyc,,
setuptools/_distutils/__pycache__/util.cpython-39.pyc,,
setuptools/_distutils/__pycache__/version.cpython-39.pyc,,
setuptools/_distutils/__pycache__/versionpredicate.cpython-39.pyc,,
setuptools/_distutils/_msvccompiler.py,sha256=RHCjIg5d2O6BxWDRotab1dgX-lhcSglHtzF2ZZgHwbA,19968
setuptools/_distutils/archive_util.py,sha256=qW-uiGwYexTvK5e-iSel_31Dshx-CqTanNPK6snwf98,8572
setuptools/_distutils/bcppcompiler.py,sha256=OJDVpCUmX6H8v_7lV1zifV1fcx92Cr2dhiUh6989UJI,14894
setuptools/_distutils/ccompiler.py,sha256=4cqQgq06NbGo0vazGMT2aPZ6K2Z-HcuRn9Pfz_bQUPw,47437
setuptools/_distutils/cmd.py,sha256=eco6LAGUtobLuPafuhmgKgkwRRL_WY8KJ4YeDCHpcls,18079
setuptools/_distutils/command/__init__.py,sha256=2TA-rlNDlzeI-csbWHXFjGD8uOYqALMfyWOhT49nC6g,799
setuptools/_distutils/command/__pycache__/__init__.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/bdist.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/bdist_dumb.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/bdist_msi.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/bdist_rpm.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/bdist_wininst.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/build.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/build_clib.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/build_ext.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/build_py.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/build_scripts.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/check.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/clean.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/config.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/install.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/install_data.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/install_egg_info.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/install_headers.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/install_lib.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/install_scripts.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/register.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/sdist.cpython-39.pyc,,
setuptools/_distutils/command/__pycache__/upload.cpython-39.pyc,,
setuptools/_distutils/command/bdist.py,sha256=2z4eudRl_n7m3lG9leL0IYqes4bsm8c0fxfZuiafjMg,5562
setuptools/_distutils/command/bdist_dumb.py,sha256=BTur9jcIppyP7Piavjfsk7YjElqvxeYO2npUyPPOekc,4913
setuptools/_distutils/command/bdist_msi.py,sha256=EVFQYN_X-ExeeP8gmdV9JcINsuUGsLJUz9afMU0Rt8c,35579
setuptools/_distutils/command/bdist_rpm.py,sha256=gjOw22GhDSbcq0bdq25cTb-n6HWWm0bShLQad_mkJ4k,21537
setuptools/_distutils/command/bdist_wininst.py,sha256=iGlaI-VfElHOneeczKHWnSN5a10-7IMcJaXuR1mdS3c,16030
setuptools/_distutils/command/build.py,sha256=11NyR2UAUzalrkTZ2ph0BAHFWFC2jtSsN7gIaF-NC08,5767
setuptools/_distutils/command/build_clib.py,sha256=bgVTHh28eLQA2Gkw68amApd_j7qQBX4MTI-zTvAK_J4,8022
setuptools/_distutils/command/build_ext.py,sha256=MMJPCxHlf9rgUkizn4Kjq9vYeAEfxyqfq8XsTE-EpWM,31635
setuptools/_distutils/command/build_py.py,sha256=S_Nlw4hZE8PnIgqX5OFMdmt-GSmOhPQQ4f2jr1uBnoU,17190
setuptools/_distutils/command/build_scripts.py,sha256=aKycJJPx3LfZ1cvZgSJaxnD2LnvRM5WJ-8xkpdgcLsI,6232
setuptools/_distutils/command/check.py,sha256=5qDtI75ccZg3sAItQWeaIu8y3FR314O4rr9Smz4HsEo,5637
setuptools/_distutils/command/clean.py,sha256=2TCt47ru4hZZM0RfVfUYj5bbpicpGLP4Qhw5jBtvp9k,2776
setuptools/_distutils/command/config.py,sha256=2aTjww3PwjMB8-ZibCe4P7B-qG1hM1gn_rJXYyxRz6c,13117
setuptools/_distutils/command/install.py,sha256=oOM2rD7l_SglARNVDmiZn8u6DAfidXRF_yE5QS328B4,27482
setuptools/_distutils/command/install_data.py,sha256=YhGOAwh3gJPqF7em5XA0rmpR42z1bLh80ooElzDyUvk,2822
setuptools/_distutils/command/install_egg_info.py,sha256=0kW0liVMeadkjX0ZcRfMptKFen07Gw6gyw1VHT5KIwc,2603
setuptools/_distutils/command/install_headers.py,sha256=XQ6idkbIDfr1ljXCOznuVUMvOFpHBn6cK0Wz9gIM2b4,1298
setuptools/_distutils/command/install_lib.py,sha256=9AofR-MO9lAtjwwuukCptepOaJEKMZW2VHiyR5hU7HA,8397
setuptools/_distutils/command/install_scripts.py,sha256=_CLUeQwGJRcY2kik7azPMn5IdtDCrjWdUvZ1khlG6ck,2017
setuptools/_distutils/command/register.py,sha256=2jaq9968rt2puRVDBx1HbNiXv27uOk8idE_4lPf_3VM,11712
setuptools/_distutils/command/sdist.py,sha256=qotJjAOzyhJjq2-oDImjNFrOtaSneEFDJTB-sEk1wnU,19005
setuptools/_distutils/command/upload.py,sha256=BLO1w7eSAqsCjCLXtf_CRVSjwF1WmyOByGVGNdcQ8oY,7597
setuptools/_distutils/config.py,sha256=dtHgblx9JhfyrKx1-J7Jlxw_f7s8ZbPFQii2UWMTZpY,4827
setuptools/_distutils/core.py,sha256=jbdOkpOK09xi-56vhhwvn3fYdhLb5DJO8q3K1fnQz0Q,8876
setuptools/_distutils/cygwinccompiler.py,sha256=9U4JAusUzlAGJl0Y5nToPkQ3ldzseAtiye434mwJ0ow,16380
setuptools/_distutils/debug.py,sha256=N6MrTAqK6l9SVk6tWweR108PM8Ol7qNlfyV-nHcLhsY,139
setuptools/_distutils/dep_util.py,sha256=GuR9Iw_jzZRkyemJ5HX8rB_wRGxkIBcBm1qh54r7zhk,3491
setuptools/_distutils/dir_util.py,sha256=UwhBOUTcV65GTwce4SPuTXR8Z8q3LYEcmttqcGb0bYo,7778
setuptools/_distutils/dist.py,sha256=Biuf6ca8uiFfMScRFsYUKtb5neMPtxKxRtXn50_1f3U,50421
setuptools/_distutils/errors.py,sha256=Yr6tKZGdzBoNi53vBtiq0UJ__X05CmxSdQJqOWaw6SY,3577
setuptools/_distutils/extension.py,sha256=bTb3Q0CoevGKYv5dX1ls--Ln8tlB0-UEOsi9BwzlZ-s,10515
setuptools/_distutils/fancy_getopt.py,sha256=OPxp2CxHi1Yp_d1D8JxW4Ueq9fC71tegQFaafh58GGU,17784
setuptools/_distutils/file_util.py,sha256=0hUqfItN_x2DVihR0MHdA4KCMVCOO8VoByaFp_a6MDg,8148
setuptools/_distutils/filelist.py,sha256=8bRxhzp2FsaoHT7TuKD4Qjcuh_B9Ow_xTt_htZJvN2Q,12832
setuptools/_distutils/log.py,sha256=hWBmdUC2K927QcVv3REMW3HMPclxccPQngxLSuUXQl0,1969
setuptools/_distutils/msvc9compiler.py,sha256=uv0TAfoWrxEBOQL-Z2uws5g4AXoTPahUEMuq6FLkCYY,30453
setuptools/_distutils/msvccompiler.py,sha256=ZYsnUgIC4tZT2WkJbTkTUyVSCAc2nFM9DVKIuIfPBU0,23540
setuptools/_distutils/spawn.py,sha256=XBmUqzhxXfay_JE18RkaalHf9kgi7NvXeBPW9BfTqmw,4408
setuptools/_distutils/sysconfig.py,sha256=5z55MU7gXeceL_G9FK6ex-2OvdeIXJRZJafrtthJcfU,21349
setuptools/_distutils/text_file.py,sha256=PsuAJeWdKJoLSV_6N6IpB5-0Pa84KzLUucJMFRazw3I,12483
setuptools/_distutils/unixccompiler.py,sha256=E65edChYLoHY8wi4OxFu_wKt3hJe3GySF6v51G_ZzL0,14696
setuptools/_distutils/util.py,sha256=Z-FtpvCo1szNkssI-it-uWhA35996XHcttLZiUzc1_Y,20913
setuptools/_distutils/version.py,sha256=8NogP6NPPQpp3EUMZcT9czEHia-ehqPo8spo_e7AgUU,12514
setuptools/_distutils/versionpredicate.py,sha256=ZxpEA-TQv88mUWc6hetUO4qSqA2sa7ipjZ3QEK5evDk,5133
setuptools/_imp.py,sha256=Qx0LJzEBaWk_6PfICamJtfBN2rh5K9sJq1wXvtZW-mc,2388
setuptools/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
setuptools/_vendor/__pycache__/__init__.cpython-39.pyc,,
setuptools/_vendor/__pycache__/ordered_set.cpython-39.pyc,,
setuptools/_vendor/__pycache__/pyparsing.cpython-39.pyc,,
setuptools/_vendor/__pycache__/six.cpython-39.pyc,,
setuptools/_vendor/ordered_set.py,sha256=dbaCcs27dyN9gnMWGF5nA_BrVn6Q-NrjKYJpV9_fgBs,15130
setuptools/_vendor/packaging/__about__.py,sha256=CpuMSyh1V7adw8QMjWKkY3LtdqRUkRX4MgJ6nF4stM0,744
setuptools/_vendor/packaging/__init__.py,sha256=6enbp5XgRfjBjsI9-bn00HjHf5TH21PDMOKkJW8xw-w,562
setuptools/_vendor/packaging/__pycache__/__about__.cpython-39.pyc,,
setuptools/_vendor/packaging/__pycache__/__init__.cpython-39.pyc,,
setuptools/_vendor/packaging/__pycache__/_compat.cpython-39.pyc,,
setuptools/_vendor/packaging/__pycache__/_structures.cpython-39.pyc,,
setuptools/_vendor/packaging/__pycache__/markers.cpython-39.pyc,,
setuptools/_vendor/packaging/__pycache__/requirements.cpython-39.pyc,,
setuptools/_vendor/packaging/__pycache__/specifiers.cpython-39.pyc,,
setuptools/_vendor/packaging/__pycache__/tags.cpython-39.pyc,,
setuptools/_vendor/packaging/__pycache__/utils.cpython-39.pyc,,
setuptools/_vendor/packaging/__pycache__/version.cpython-39.pyc,,
setuptools/_vendor/packaging/_compat.py,sha256=Ugdm-qcneSchW25JrtMIKgUxfEEBcCAz6WrEeXeqz9o,865
setuptools/_vendor/packaging/_structures.py,sha256=pVd90XcXRGwpZRB_qdFuVEibhCHpX_bL5zYr9-N0mc8,1416
setuptools/_vendor/packaging/markers.py,sha256=-meFl9Fr9V8rF5Rduzgett5EHK9wBYRUqssAV2pj0lw,8268
setuptools/_vendor/packaging/requirements.py,sha256=3dwIJekt8RRGCUbgxX8reeAbgmZYjb0wcCRtmH63kxI,4742
setuptools/_vendor/packaging/specifiers.py,sha256=0ZzQpcUnvrQ6LjR-mQRLzMr8G6hdRv-mY0VSf_amFtI,27778
setuptools/_vendor/packaging/tags.py,sha256=EPLXhO6GTD7_oiWEO1U0l0PkfR8R_xivpMDHXnsTlts,12933
setuptools/_vendor/packaging/utils.py,sha256=VaTC0Ei7zO2xl9ARiWmz2YFLFt89PuuhLbAlXMyAGms,1520
setuptools/_vendor/packaging/version.py,sha256=Npdwnb8OHedj_2L86yiUqscujb7w_i5gmSK1PhOAFzg,11978
setuptools/_vendor/pyparsing.py,sha256=tmrp-lu-qO1i75ZzIN5A12nKRRD1Cm4Vpk-5LR9rims,232055
setuptools/_vendor/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098
setuptools/archive_util.py,sha256=F1-XrQJTdXHRPRA09kxPWwm9Z2Ms1lE_IQZKG_JZ7rM,6638
setuptools/build_meta.py,sha256=qFxrLAwgKPS3TxEi8NNbFxfEvb192pzSgARS8nZZ_Ek,9917
setuptools/cli-32.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536
setuptools/cli-64.exe,sha256=KLABu5pyrnokJCv6skjXZ6GsXeyYHGcqOUT3oHI3Xpo,74752
setuptools/cli.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536
setuptools/command/__init__.py,sha256=QCAuA9whnq8Bnoc0bBaS6Lw_KAUO0DiHYZQXEMNn5hg,568
setuptools/command/__pycache__/__init__.cpython-39.pyc,,
setuptools/command/__pycache__/alias.cpython-39.pyc,,
setuptools/command/__pycache__/bdist_egg.cpython-39.pyc,,
setuptools/command/__pycache__/bdist_rpm.cpython-39.pyc,,
setuptools/command/__pycache__/bdist_wininst.cpython-39.pyc,,
setuptools/command/__pycache__/build_clib.cpython-39.pyc,,
setuptools/command/__pycache__/build_ext.cpython-39.pyc,,
setuptools/command/__pycache__/build_py.cpython-39.pyc,,
setuptools/command/__pycache__/develop.cpython-39.pyc,,
setuptools/command/__pycache__/dist_info.cpython-39.pyc,,
setuptools/command/__pycache__/easy_install.cpython-39.pyc,,
setuptools/command/__pycache__/egg_info.cpython-39.pyc,,
setuptools/command/__pycache__/install.cpython-39.pyc,,
setuptools/command/__pycache__/install_egg_info.cpython-39.pyc,,
setuptools/command/__pycache__/install_lib.cpython-39.pyc,,
setuptools/command/__pycache__/install_scripts.cpython-39.pyc,,
setuptools/command/__pycache__/py36compat.cpython-39.pyc,,
setuptools/command/__pycache__/register.cpython-39.pyc,,
setuptools/command/__pycache__/rotate.cpython-39.pyc,,
setuptools/command/__pycache__/saveopts.cpython-39.pyc,,
setuptools/command/__pycache__/sdist.cpython-39.pyc,,
setuptools/command/__pycache__/setopt.cpython-39.pyc,,
setuptools/command/__pycache__/test.cpython-39.pyc,,
setuptools/command/__pycache__/upload.cpython-39.pyc,,
setuptools/command/__pycache__/upload_docs.cpython-39.pyc,,
setuptools/command/alias.py,sha256=KjpE0sz_SDIHv3fpZcIQK-sCkJz-SrC6Gmug6b9Nkc8,2426
setuptools/command/bdist_egg.py,sha256=pVY95-nsM0U1_QmK01eLRedjWDw9ruEwrZxBae8FyZA,18482
setuptools/command/bdist_rpm.py,sha256=B7l0TnzCGb-0nLlm6rS00jWLkojASwVmdhW2w5Qz_Ak,1508
setuptools/command/bdist_wininst.py,sha256=Tmqa9wW0F8i_72KHWpu9pDdnCN6Er_8uJUs2UmCAwTA,922
setuptools/command/build_clib.py,sha256=fWHSFGkk10VCddBWCszvNhowbG9Z9CZXVjQ2uSInoOs,4415
setuptools/command/build_ext.py,sha256=RYS8cJvCwvusFnbKllvLtd6-HcR0dVIzX6zVrtw1Vc8,13187
setuptools/command/build_py.py,sha256=fho10QRGOaJcc3vttQ5vk5KYMV6HdZwj9HUIob6NHDM,9737
setuptools/command/develop.py,sha256=wF2CiU9wjCF8ZcfFzn02j2ylez8r13z_fEco6vWx3DM,8118
setuptools/command/dist_info.py,sha256=5t6kOfrdgALT-P3ogss6PF9k-Leyesueycuk3dUyZnI,960
setuptools/command/easy_install.py,sha256=T1d_3uQFLur6qNrNtEiiRVzleECvHBe9etr7o3Imquw,86924
setuptools/command/egg_info.py,sha256=LKrhZuy-IoRJZ59orIB2-_Gj7NBj9MHm5uu16zZdE7U,25560
setuptools/command/install.py,sha256=8doMxeQEDoK4Eco0mO2WlXXzzp9QnsGJQ7Z7yWkZPG8,4705
setuptools/command/install_egg_info.py,sha256=bMgeIeRiXzQ4DAGPV1328kcjwQjHjOWU4FngAWLV78Q,2203
setuptools/command/install_lib.py,sha256=Uz42McsyHZAjrB6cw9E7Bz0xsaTbzxnM1PI9CBhiPtE,3875
setuptools/command/install_scripts.py,sha256=x7sdEICuyFpaf5LuWXcTp49oYt8EeNbwKkW2Pv-TVXI,2519
setuptools/command/launcher manifest.xml,sha256=xlLbjWrB01tKC0-hlVkOKkiSPbzMml2eOPtJ_ucCnbE,628
setuptools/command/py36compat.py,sha256=TKqF6CPv-vsEFpOJUYmjBzmck-mCv_zHJMXO500PEAI,4994
setuptools/command/register.py,sha256=kk3DxXCb5lXTvqnhfwx2g6q7iwbUmgTyXUCaBooBOUk,468
setuptools/command/rotate.py,sha256=1KD9hHoDWpyvsbc2L7ULrQxUpJsG5zIMlPfx8yLowk4,2176
setuptools/command/saveopts.py,sha256=za7QCBcQimKKriWcoCcbhxPjUz30gSB74zuTL47xpP4,658
setuptools/command/sdist.py,sha256=14kBw_QOZ9L_RQDqgf9DAlEuoj0zC30X5mfDWeiyZwU,8092
setuptools/command/setopt.py,sha256=NTWDyx-gjDF-txf4dO577s7LOzHVoKR0Mq33rFxaRr8,5085
setuptools/command/test.py,sha256=okVw2id6qYh8hFAVGziX6dEYekAbaYfMtEx7XhgsSbg,9623
setuptools/command/upload.py,sha256=XT3YFVfYPAmA5qhGg0euluU98ftxRUW-PzKcODMLxUs,462
setuptools/command/upload_docs.py,sha256=G2gHjeNPcUGe_pr3EEk_6AoVD0E6nCp52mZgU2nkCpU,7314
setuptools/config.py,sha256=Ncxt5IQTVyM9qvX3PxB-Eb67-zoZLq5WbDuyu3I2gd0,21782
setuptools/dep_util.py,sha256=BDx1BkzNQntvAB4alypHbW5UVBzjqths000PrUL4Zqc,949
setuptools/depends.py,sha256=qt2RWllArRvhnm8lxsyRpcthEZYp4GHQgREl1q0LkFw,5517
setuptools/dist.py,sha256=Of69bBpUzFWt9o_RTptPt-3MWVc3k_LId3b7hh8rBQs,39350
setuptools/distutils_patch.py,sha256=r8LauqtVguTUFxguvU7tDhF8HTgAkIBHg5-hgPeSJ5c,1754
setuptools/errors.py,sha256=MVOcv381HNSajDgEUWzOQ4J6B5BHCBMSjHfaWcEwA1o,524
setuptools/extension.py,sha256=uc6nHI-MxwmNCNPbUiBnybSyqhpJqjbhvOQ-emdvt_E,1729
setuptools/extern/__init__.py,sha256=BilMS9Hq18nBaUOzcCrzoI9HnIhju45iVJBscqTqlDI,2128
setuptools/extern/__pycache__/__init__.cpython-39.pyc,,
setuptools/glob.py,sha256=o75cHrOxYsvn854thSxE0x9k8JrKDuhP_rRXlVB00Q4,5084
setuptools/gui-32.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536
setuptools/gui-64.exe,sha256=aYKMhX1IJLn4ULHgWX0sE0yREUt6B3TEHf_jOw6yNyE,75264
setuptools/gui.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536
setuptools/installer.py,sha256=mJ6SdRmhWpZ1Cg3H_LWd1IoZoeC2t4BSkkXMuvhYeKw,5343
setuptools/launch.py,sha256=TyPT-Ic1T2EnYvGO26gfNRP4ysBlrhpbRjQxWsiO414,812
setuptools/lib2to3_ex.py,sha256=lrjhfs4QVtWp65PuATWjPBcXxwubg9d81e0qrv0qOpI,2384
setuptools/monkey.py,sha256=FGc9fffh7gAxMLFmJs2DW_OYWpBjkdbNS2n14UAK4NA,5264
setuptools/msvc.py,sha256=8xIqn20nZ_poynw6sDvZuUECN_KlOjdTNfossrlSMcY,51225
setuptools/namespaces.py,sha256=QuvIR8S5-u_S8_fLjPpn_utruUIsu2twdRu_KJPrKU0,3223
setuptools/package_index.py,sha256=oKRvghWBzlqlQV4iRUERwbpBs_rXL5mwlzNZdKI2yXs,40777
setuptools/py27compat.py,sha256=CWHkWWAYodu3QgiIAr8-34T-G6fiSgiVF0y7h11Ld7U,1504
setuptools/py31compat.py,sha256=h2rtZghOfwoGYd8sQ0-auaKiF3TcL3qX0bX3VessqcE,838
setuptools/py33compat.py,sha256=SMF9Z8wnGicTOkU1uRNwZ_kz5Z_bj29PUBbqdqeeNsc,1330
setuptools/py34compat.py,sha256=KYOd6ybRxjBW8NJmYD8t_UyyVmysppFXqHpFLdslGXU,245
setuptools/sandbox.py,sha256=KOWl011mwUX2OdLmcTM690CTOneZEJxK9RIXbXyGL_o,14251
setuptools/script (dev).tmpl,sha256=RUzQzCQUaXtwdLtYHWYbIQmOaES5Brqq1FvUA_tu-5I,218
setuptools/script.tmpl,sha256=WGTt5piezO27c-Dbx6l5Q4T3Ff20A5z7872hv3aAhYY,138
setuptools/ssl_support.py,sha256=TNNOq3VyV-4wkRwm0dmyIzF-iXBeWv4yIQ99eWa_bV8,8543
setuptools/unicode_utils.py,sha256=NOiZ_5hD72A6w-4wVj8awHFM3n51Kmw1Ic_vx15XFqw,996
setuptools/version.py,sha256=og_cuZQb0QI6ukKZFfZWPlr1HgJBPPn2vO2m_bI9ZTE,144
setuptools/wheel.py,sha256=YLN2fczDVxkX3wjHlt_EMh-4MfHO6Ns6ldRpnkn-aa8,8371
setuptools/windows_support.py,sha256=5GrfqSP2-dLGJoZTq2g6dCKkyQxxa2n5IQiXlJCoYEE,714
| Django-locallibrary/env/Lib/site-packages/setuptools-49.2.1.dist-info/RECORD/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools-49.2.1.dist-info/RECORD",
"repo_id": "Django-locallibrary",
"token_count": 11756
} | 25 |
"""distutils.command.install_egg_info
Implements the Distutils 'install_egg_info' command, for installing
a package's PKG-INFO metadata."""
from distutils.cmd import Command
from distutils import log, dir_util
import os, sys, re
class install_egg_info(Command):
"""Install an .egg-info file for the package"""
description = "Install package's PKG-INFO metadata as an .egg-info file"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',('install_dir','install_dir'))
basename = "%s-%s-py%d.%d.egg-info" % (
to_filename(safe_name(self.distribution.get_name())),
to_filename(safe_version(self.distribution.get_version())),
*sys.version_info[:2]
)
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
target = self.target
if os.path.isdir(target) and not os.path.islink(target):
dir_util.remove_tree(target, dry_run=self.dry_run)
elif os.path.exists(target):
self.execute(os.unlink,(self.target,),"Removing "+target)
elif not os.path.isdir(self.install_dir):
self.execute(os.makedirs, (self.install_dir,),
"Creating "+self.install_dir)
log.info("Writing %s", target)
if not self.dry_run:
with open(target, 'w', encoding='UTF-8') as f:
self.distribution.metadata.write_pkg_file(f)
def get_outputs(self):
return self.outputs
# The following routines are taken from setuptools' pkg_resources module and
# can be replaced by importing them from pkg_resources once it is included
# in the stdlib.
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
| Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/install_egg_info.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/install_egg_info.py",
"repo_id": "Django-locallibrary",
"token_count": 1022
} | 26 |
"""distutils.fancy_getopt
Wrapper around the standard getopt module that provides the following
additional features:
* short and long options are tied together
* options have help strings, so fancy_getopt could potentially
create a complete usage summary
* options set attributes of a passed-in object
"""
import sys, string, re
import getopt
from distutils.errors import *
# Much like command_re in distutils.core, this is close to but not quite
# the same as a Python NAME -- except, in the spirit of most GNU
# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
# The similarities to NAME are again not a coincidence...
longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
longopt_re = re.compile(r'^%s$' % longopt_pat)
# For recognizing "negative alias" options, eg. "quiet=!verbose"
neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat))
# This is used to translate long options to legitimate Python identifiers
# (for use as attributes of some object).
longopt_xlate = str.maketrans('-', '_')
class FancyGetopt:
"""Wrapper around the standard 'getopt()' module that provides some
handy extra functionality:
* short and long options are tied together
* options have help strings, and help text can be assembled
from them
* options set attributes of a passed-in object
* boolean options can have "negative aliases" -- eg. if
--quiet is the "negative alias" of --verbose, then "--quiet"
on the command line sets 'verbose' to false
"""
def __init__(self, option_table=None):
# The option table is (currently) a list of tuples. The
# tuples may have 3 or four values:
# (long_option, short_option, help_string [, repeatable])
# if an option takes an argument, its long_option should have '='
# appended; short_option should just be a single character, no ':'
# in any case. If a long_option doesn't have a corresponding
# short_option, short_option should be None. All option tuples
# must have long options.
self.option_table = option_table
# 'option_index' maps long option names to entries in the option
# table (ie. those 3-tuples).
self.option_index = {}
if self.option_table:
self._build_index()
# 'alias' records (duh) alias options; {'foo': 'bar'} means
# --foo is an alias for --bar
self.alias = {}
# 'negative_alias' keeps track of options that are the boolean
# opposite of some other option
self.negative_alias = {}
# These keep track of the information in the option table. We
# don't actually populate these structures until we're ready to
# parse the command-line, since the 'option_table' passed in here
# isn't necessarily the final word.
self.short_opts = []
self.long_opts = []
self.short2long = {}
self.attr_name = {}
self.takes_arg = {}
# And 'option_order' is filled up in 'getopt()'; it records the
# original order of options (and their values) on the command-line,
# but expands short options, converts aliases, etc.
self.option_order = []
def _build_index(self):
self.option_index.clear()
for option in self.option_table:
self.option_index[option[0]] = option
def set_option_table(self, option_table):
self.option_table = option_table
self._build_index()
def add_option(self, long_option, short_option=None, help_string=None):
if long_option in self.option_index:
raise DistutilsGetoptError(
"option conflict: already an option '%s'" % long_option)
else:
option = (long_option, short_option, help_string)
self.option_table.append(option)
self.option_index[long_option] = option
def has_option(self, long_option):
"""Return true if the option table for this parser has an
option with long name 'long_option'."""
return long_option in self.option_index
def get_attr_name(self, long_option):
"""Translate long option name 'long_option' to the form it
has as an attribute of some object: ie., translate hyphens
to underscores."""
return long_option.translate(longopt_xlate)
def _check_alias_dict(self, aliases, what):
assert isinstance(aliases, dict)
for (alias, opt) in aliases.items():
if alias not in self.option_index:
raise DistutilsGetoptError(("invalid %s '%s': "
"option '%s' not defined") % (what, alias, alias))
if opt not in self.option_index:
raise DistutilsGetoptError(("invalid %s '%s': "
"aliased option '%s' not defined") % (what, alias, opt))
def set_aliases(self, alias):
"""Set the aliases for this option parser."""
self._check_alias_dict(alias, "alias")
self.alias = alias
def set_negative_aliases(self, negative_alias):
"""Set the negative aliases for this option parser.
'negative_alias' should be a dictionary mapping option names to
option names, both the key and value must already be defined
in the option table."""
self._check_alias_dict(negative_alias, "negative alias")
self.negative_alias = negative_alias
def _grok_option_table(self):
"""Populate the various data structures that keep tabs on the
option table. Called by 'getopt()' before it can do anything
worthwhile.
"""
self.long_opts = []
self.short_opts = []
self.short2long.clear()
self.repeat = {}
for option in self.option_table:
if len(option) == 3:
long, short, help = option
repeat = 0
elif len(option) == 4:
long, short, help, repeat = option
else:
# the option table is part of the code, so simply
# assert that it is correct
raise ValueError("invalid option tuple: %r" % (option,))
# Type- and value-check the option names
if not isinstance(long, str) or len(long) < 2:
raise DistutilsGetoptError(("invalid long option '%s': "
"must be a string of length >= 2") % long)
if (not ((short is None) or
(isinstance(short, str) and len(short) == 1))):
raise DistutilsGetoptError("invalid short option '%s': "
"must a single character or None" % short)
self.repeat[long] = repeat
self.long_opts.append(long)
if long[-1] == '=': # option takes an argument?
if short: short = short + ':'
long = long[0:-1]
self.takes_arg[long] = 1
else:
# Is option is a "negative alias" for some other option (eg.
# "quiet" == "!verbose")?
alias_to = self.negative_alias.get(long)
if alias_to is not None:
if self.takes_arg[alias_to]:
raise DistutilsGetoptError(
"invalid negative alias '%s': "
"aliased option '%s' takes a value"
% (long, alias_to))
self.long_opts[-1] = long # XXX redundant?!
self.takes_arg[long] = 0
# If this is an alias option, make sure its "takes arg" flag is
# the same as the option it's aliased to.
alias_to = self.alias.get(long)
if alias_to is not None:
if self.takes_arg[long] != self.takes_arg[alias_to]:
raise DistutilsGetoptError(
"invalid alias '%s': inconsistent with "
"aliased option '%s' (one of them takes a value, "
"the other doesn't"
% (long, alias_to))
# Now enforce some bondage on the long option name, so we can
# later translate it to an attribute name on some object. Have
# to do this a bit late to make sure we've removed any trailing
# '='.
if not longopt_re.match(long):
raise DistutilsGetoptError(
"invalid long option name '%s' "
"(must be letters, numbers, hyphens only" % long)
self.attr_name[long] = self.get_attr_name(long)
if short:
self.short_opts.append(short)
self.short2long[short[0]] = long
def getopt(self, args=None, object=None):
"""Parse command-line options in args. Store as attributes on object.
If 'args' is None or not supplied, uses 'sys.argv[1:]'. If
'object' is None or not supplied, creates a new OptionDummy
object, stores option values there, and returns a tuple (args,
object). If 'object' is supplied, it is modified in place and
'getopt()' just returns 'args'; in both cases, the returned
'args' is a modified copy of the passed-in 'args' list, which
is left untouched.
"""
if args is None:
args = sys.argv[1:]
if object is None:
object = OptionDummy()
created_object = True
else:
created_object = False
self._grok_option_table()
short_opts = ' '.join(self.short_opts)
try:
opts, args = getopt.getopt(args, short_opts, self.long_opts)
except getopt.error as msg:
raise DistutilsArgError(msg)
for opt, val in opts:
if len(opt) == 2 and opt[0] == '-': # it's a short option
opt = self.short2long[opt[1]]
else:
assert len(opt) > 2 and opt[:2] == '--'
opt = opt[2:]
alias = self.alias.get(opt)
if alias:
opt = alias
if not self.takes_arg[opt]: # boolean option?
assert val == '', "boolean option can't have value"
alias = self.negative_alias.get(opt)
if alias:
opt = alias
val = 0
else:
val = 1
attr = self.attr_name[opt]
# The only repeating option at the moment is 'verbose'.
# It has a negative option -q quiet, which should set verbose = 0.
if val and self.repeat.get(attr) is not None:
val = getattr(object, attr, 0) + 1
setattr(object, attr, val)
self.option_order.append((opt, val))
# for opts
if created_object:
return args, object
else:
return args
def get_option_order(self):
"""Returns the list of (option, value) tuples processed by the
previous run of 'getopt()'. Raises RuntimeError if
'getopt()' hasn't been called yet.
"""
if self.option_order is None:
raise RuntimeError("'getopt()' hasn't been called yet")
else:
return self.option_order
def generate_help(self, header=None):
"""Generate help text (a list of strings, one per suggested line of
output) from the option table for this FancyGetopt object.
"""
# Blithely assume the option table is good: probably wouldn't call
# 'generate_help()' unless you've already called 'getopt()'.
# First pass: determine maximum length of long option names
max_opt = 0
for option in self.option_table:
long = option[0]
short = option[1]
l = len(long)
if long[-1] == '=':
l = l - 1
if short is not None:
l = l + 5 # " (-x)" where short == 'x'
if l > max_opt:
max_opt = l
opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter
# Typical help block looks like this:
# --foo controls foonabulation
# Help block for longest option looks like this:
# --flimflam set the flim-flam level
# and with wrapped text:
# --flimflam set the flim-flam level (must be between
# 0 and 100, except on Tuesdays)
# Options with short names will have the short name shown (but
# it doesn't contribute to max_opt):
# --foo (-f) controls foonabulation
# If adding the short option would make the left column too wide,
# we push the explanation off to the next line
# --flimflam (-l)
# set the flim-flam level
# Important parameters:
# - 2 spaces before option block start lines
# - 2 dashes for each long option name
# - min. 2 spaces between option and explanation (gutter)
# - 5 characters (incl. space) for short option name
# Now generate lines of help text. (If 80 columns were good enough
# for Jesus, then 78 columns are good enough for me!)
line_width = 78
text_width = line_width - opt_width
big_indent = ' ' * opt_width
if header:
lines = [header]
else:
lines = ['Option summary:']
for option in self.option_table:
long, short, help = option[:3]
text = wrap_text(help, text_width)
if long[-1] == '=':
long = long[0:-1]
# Case 1: no short option at all (makes life easy)
if short is None:
if text:
lines.append(" --%-*s %s" % (max_opt, long, text[0]))
else:
lines.append(" --%-*s " % (max_opt, long))
# Case 2: we have a short option, so we have to include it
# just after the long option
else:
opt_names = "%s (-%s)" % (long, short)
if text:
lines.append(" --%-*s %s" %
(max_opt, opt_names, text[0]))
else:
lines.append(" --%-*s" % opt_names)
for l in text[1:]:
lines.append(big_indent + l)
return lines
def print_help(self, header=None, file=None):
if file is None:
file = sys.stdout
for line in self.generate_help(header):
file.write(line + "\n")
def fancy_getopt(options, negative_opt, object, args):
parser = FancyGetopt(options)
parser.set_negative_aliases(negative_opt)
return parser.getopt(args, object)
WS_TRANS = {ord(_wschar) : ' ' for _wschar in string.whitespace}
def wrap_text(text, width):
"""wrap_text(text : string, width : int) -> [string]
Split 'text' into multiple lines of no more than 'width' characters
each, and return the list of strings that results.
"""
if text is None:
return []
if len(text) <= width:
return [text]
text = text.expandtabs()
text = text.translate(WS_TRANS)
chunks = re.split(r'( +|-+)', text)
chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings
lines = []
while chunks:
cur_line = [] # list of chunks (to-be-joined)
cur_len = 0 # length of current line
while chunks:
l = len(chunks[0])
if cur_len + l <= width: # can squeeze (at least) this chunk in
cur_line.append(chunks[0])
del chunks[0]
cur_len = cur_len + l
else: # this line is full
# drop last chunk if all space
if cur_line and cur_line[-1][0] == ' ':
del cur_line[-1]
break
if chunks: # any chunks left to process?
# if the current line is still empty, then we had a single
# chunk that's too big too fit on a line -- so we break
# down and break it up at the line width
if cur_len == 0:
cur_line.append(chunks[0][0:width])
chunks[0] = chunks[0][width:]
# all-whitespace chunks at the end of a line can be discarded
# (and we know from the re.split above that if a chunk has
# *any* whitespace, it is *all* whitespace)
if chunks[0][0] == ' ':
del chunks[0]
# and store this line in the list-of-all-lines -- as a single
# string, of course!
lines.append(''.join(cur_line))
return lines
def translate_longopt(opt):
"""Convert a long option name to a valid Python identifier by
changing "-" to "_".
"""
return opt.translate(longopt_xlate)
class OptionDummy:
"""Dummy class just used as a place to hold command-line option
values as instance attributes."""
def __init__(self, options=[]):
"""Create a new OptionDummy instance. The attributes listed in
'options' will be initialized to None."""
for opt in options:
setattr(self, opt, None)
if __name__ == "__main__":
text = """\
Tra-la-la, supercalifragilisticexpialidocious.
How *do* you spell that odd word, anyways?
(Someone ask Mary -- she'll know [or she'll
say, "How should I know?"].)"""
for w in (10, 20, 30, 40):
print("width: %d" % w)
print("\n".join(wrap_text(text, w)))
print()
| Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/fancy_getopt.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/fancy_getopt.py",
"repo_id": "Django-locallibrary",
"token_count": 8062
} | 27 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
# flake8: noqa
if PY3:
string_types = (str,)
else:
string_types = (basestring,)
def with_metaclass(meta, *bases):
"""
Create a base class with a metaclass.
"""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, "temporary_class", (), {})
| Django-locallibrary/env/Lib/site-packages/setuptools/_vendor/packaging/_compat.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_vendor/packaging/_compat.py",
"repo_id": "Django-locallibrary",
"token_count": 308
} | 28 |
from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsError, DistutilsOptionError
import os
import glob
import io
from setuptools.extern import six
import pkg_resources
from setuptools.command.easy_install import easy_install
from setuptools import namespaces
import setuptools
__metaclass__ = type
class develop(namespaces.DevelopInstaller, easy_install):
"""Set up package for development"""
description = "install package in 'development mode'"
user_options = easy_install.user_options + [
("uninstall", "u", "Uninstall this source package"),
("egg-path=", None, "Set the path to be used in the .egg-link file"),
]
boolean_options = easy_install.boolean_options + ['uninstall']
command_consumes_arguments = False # override base
def run(self):
if self.uninstall:
self.multi_version = True
self.uninstall_link()
self.uninstall_namespaces()
else:
self.install_for_development()
self.warn_deprecated_options()
def initialize_options(self):
self.uninstall = None
self.egg_path = None
easy_install.initialize_options(self)
self.setup_path = None
self.always_copy_from = '.' # always copy eggs installed in curdir
def finalize_options(self):
ei = self.get_finalized_command("egg_info")
if ei.broken_egg_info:
template = "Please rename %r to %r before using 'develop'"
args = ei.egg_info, ei.broken_egg_info
raise DistutilsError(template % args)
self.args = [ei.egg_name]
easy_install.finalize_options(self)
self.expand_basedirs()
self.expand_dirs()
# pick up setup-dir .egg files only: no .egg-info
self.package_index.scan(glob.glob('*.egg'))
egg_link_fn = ei.egg_name + '.egg-link'
self.egg_link = os.path.join(self.install_dir, egg_link_fn)
self.egg_base = ei.egg_base
if self.egg_path is None:
self.egg_path = os.path.abspath(ei.egg_base)
target = pkg_resources.normalize_path(self.egg_base)
egg_path = pkg_resources.normalize_path(
os.path.join(self.install_dir, self.egg_path))
if egg_path != target:
raise DistutilsOptionError(
"--egg-path must be a relative path from the install"
" directory to " + target
)
# Make a distribution for the package's source
self.dist = pkg_resources.Distribution(
target,
pkg_resources.PathMetadata(target, os.path.abspath(ei.egg_info)),
project_name=ei.egg_name
)
self.setup_path = self._resolve_setup_path(
self.egg_base,
self.install_dir,
self.egg_path,
)
@staticmethod
def _resolve_setup_path(egg_base, install_dir, egg_path):
"""
Generate a path from egg_base back to '.' where the
setup script resides and ensure that path points to the
setup path from $install_dir/$egg_path.
"""
path_to_setup = egg_base.replace(os.sep, '/').rstrip('/')
if path_to_setup != os.curdir:
path_to_setup = '../' * (path_to_setup.count('/') + 1)
resolved = pkg_resources.normalize_path(
os.path.join(install_dir, egg_path, path_to_setup)
)
if resolved != pkg_resources.normalize_path(os.curdir):
raise DistutilsOptionError(
"Can't get a consistent path to setup script from"
" installation directory", resolved,
pkg_resources.normalize_path(os.curdir))
return path_to_setup
def install_for_development(self):
if not six.PY2 and getattr(self.distribution, 'use_2to3', False):
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = pkg_resources.normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
# Fixup egg-link and easy-install.pth
ei_cmd = self.get_finalized_command("egg_info")
self.egg_path = build_path
self.dist.location = build_path
# XXX
self.dist._provider = pkg_resources.PathMetadata(
build_path, ei_cmd.egg_info)
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
if setuptools.bootstrap_install_from:
self.easy_install(setuptools.bootstrap_install_from)
setuptools.bootstrap_install_from = None
self.install_namespaces()
# create an .egg-link in the installation dir, pointing to our egg
log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
if not self.dry_run:
with open(self.egg_link, "w") as f:
f.write(self.egg_path + "\n" + self.setup_path)
# postprocess the installed distro, fixing up .pth, installing scripts,
# and handling requirements
self.process_distribution(None, self.dist, not self.no_deps)
def uninstall_link(self):
if os.path.exists(self.egg_link):
log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
egg_link_file = open(self.egg_link)
contents = [line.rstrip() for line in egg_link_file]
egg_link_file.close()
if contents not in ([self.egg_path],
[self.egg_path, self.setup_path]):
log.warn("Link points to %s: uninstall aborted", contents)
return
if not self.dry_run:
os.unlink(self.egg_link)
if not self.dry_run:
self.update_pth(self.dist) # remove any .pth link to us
if self.distribution.scripts:
# XXX should also check for entry point scripts!
log.warn("Note: you must uninstall or replace scripts manually!")
def install_egg_scripts(self, dist):
if dist is not self.dist:
# Installing a dependency, so fall back to normal behavior
return easy_install.install_egg_scripts(self, dist)
# create wrapper scripts in the script dir, pointing to dist.scripts
# new-style...
self.install_wrapper_scripts(dist)
# ...and old-style
for script_name in self.distribution.scripts or []:
script_path = os.path.abspath(convert_path(script_name))
script_name = os.path.basename(script_path)
with io.open(script_path) as strm:
script_text = strm.read()
self.install_script(dist, script_name, script_text, script_path)
def install_wrapper_scripts(self, dist):
dist = VersionlessRequirement(dist)
return easy_install.install_wrapper_scripts(self, dist)
class VersionlessRequirement:
"""
Adapt a pkg_resources.Distribution to simply return the project
name as the 'requirement' so that scripts will work across
multiple versions.
>>> from pkg_resources import Distribution
>>> dist = Distribution(project_name='foo', version='1.0')
>>> str(dist.as_requirement())
'foo==1.0'
>>> adapted_dist = VersionlessRequirement(dist)
>>> str(adapted_dist.as_requirement())
'foo'
"""
def __init__(self, dist):
self.__dist = dist
def __getattr__(self, name):
return getattr(self.__dist, name)
def as_requirement(self):
return self.project_name
| Django-locallibrary/env/Lib/site-packages/setuptools/command/develop.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/command/develop.py",
"repo_id": "Django-locallibrary",
"token_count": 3584
} | 29 |
from distutils import log
from distutils.command import upload as orig
from setuptools.errors import RemovedCommandError
class upload(orig.upload):
"""Formerly used to upload packages to PyPI."""
def run(self):
msg = (
"The upload command has been removed, use twine to upload "
+ "instead (https://pypi.org/p/twine)"
)
self.announce("ERROR: " + msg, log.ERROR)
raise RemovedCommandError(msg)
| Django-locallibrary/env/Lib/site-packages/setuptools/command/upload.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/command/upload.py",
"repo_id": "Django-locallibrary",
"token_count": 175
} | 30 |
"""
Launch the Python script on the command line after
setuptools is bootstrapped via import.
"""
# Note that setuptools gets imported implicitly by the
# invocation of this script using python -m setuptools.launch
import tokenize
import sys
def run():
"""
Run the script in sys.argv[1] as if it had
been invoked naturally.
"""
__builtins__
script_name = sys.argv[1]
namespace = dict(
__file__=script_name,
__name__='__main__',
__doc__=None,
)
sys.argv[:] = sys.argv[1:]
open_ = getattr(tokenize, 'open', open)
with open_(script_name) as fid:
script = fid.read()
norm_script = script.replace('\\r\\n', '\\n')
code = compile(norm_script, script_name, 'exec')
exec(code, namespace)
if __name__ == '__main__':
run()
| Django-locallibrary/env/Lib/site-packages/setuptools/launch.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/launch.py",
"repo_id": "Django-locallibrary",
"token_count": 317
} | 31 |
"""Wheels support."""
from distutils.util import get_platform
from distutils import log
import email
import itertools
import os
import posixpath
import re
import zipfile
import pkg_resources
import setuptools
from pkg_resources import parse_version
from setuptools.extern.packaging.tags import sys_tags
from setuptools.extern.packaging.utils import canonicalize_name
from setuptools.extern.six import PY3
from setuptools.command.egg_info import write_requirements
__metaclass__ = type
WHEEL_NAME = re.compile(
r"""^(?P<project_name>.+?)-(?P<version>\d.*?)
((-(?P<build>\d.*?))?-(?P<py_version>.+?)-(?P<abi>.+?)-(?P<platform>.+?)
)\.whl$""",
re.VERBOSE).match
NAMESPACE_PACKAGE_INIT = \
"__import__('pkg_resources').declare_namespace(__name__)\n"
def unpack(src_dir, dst_dir):
'''Move everything under `src_dir` to `dst_dir`, and delete the former.'''
for dirpath, dirnames, filenames in os.walk(src_dir):
subdir = os.path.relpath(dirpath, src_dir)
for f in filenames:
src = os.path.join(dirpath, f)
dst = os.path.join(dst_dir, subdir, f)
os.renames(src, dst)
for n, d in reversed(list(enumerate(dirnames))):
src = os.path.join(dirpath, d)
dst = os.path.join(dst_dir, subdir, d)
if not os.path.exists(dst):
# Directory does not exist in destination,
# rename it and prune it from os.walk list.
os.renames(src, dst)
del dirnames[n]
# Cleanup.
for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True):
assert not filenames
os.rmdir(dirpath)
class Wheel:
def __init__(self, filename):
match = WHEEL_NAME(os.path.basename(filename))
if match is None:
raise ValueError('invalid wheel name: %r' % filename)
self.filename = filename
for k, v in match.groupdict().items():
setattr(self, k, v)
def tags(self):
'''List tags (py_version, abi, platform) supported by this wheel.'''
return itertools.product(
self.py_version.split('.'),
self.abi.split('.'),
self.platform.split('.'),
)
def is_compatible(self):
'''Is the wheel is compatible with the current platform?'''
supported_tags = set(
(t.interpreter, t.abi, t.platform) for t in sys_tags())
return next((True for t in self.tags() if t in supported_tags), False)
def egg_name(self):
return pkg_resources.Distribution(
project_name=self.project_name, version=self.version,
platform=(None if self.platform == 'any' else get_platform()),
).egg_name() + '.egg'
def get_dist_info(self, zf):
# find the correct name of the .dist-info dir in the wheel file
for member in zf.namelist():
dirname = posixpath.dirname(member)
if (dirname.endswith('.dist-info') and
canonicalize_name(dirname).startswith(
canonicalize_name(self.project_name))):
return dirname
raise ValueError("unsupported wheel format. .dist-info not found")
def install_as_egg(self, destination_eggdir):
'''Install wheel as an egg directory.'''
with zipfile.ZipFile(self.filename) as zf:
self._install_as_egg(destination_eggdir, zf)
def _install_as_egg(self, destination_eggdir, zf):
dist_basename = '%s-%s' % (self.project_name, self.version)
dist_info = self.get_dist_info(zf)
dist_data = '%s.data' % dist_basename
egg_info = os.path.join(destination_eggdir, 'EGG-INFO')
self._convert_metadata(zf, destination_eggdir, dist_info, egg_info)
self._move_data_entries(destination_eggdir, dist_data)
self._fix_namespace_packages(egg_info, destination_eggdir)
@staticmethod
def _convert_metadata(zf, destination_eggdir, dist_info, egg_info):
def get_metadata(name):
with zf.open(posixpath.join(dist_info, name)) as fp:
value = fp.read().decode('utf-8') if PY3 else fp.read()
return email.parser.Parser().parsestr(value)
wheel_metadata = get_metadata('WHEEL')
# Check wheel format version is supported.
wheel_version = parse_version(wheel_metadata.get('Wheel-Version'))
wheel_v1 = (
parse_version('1.0') <= wheel_version < parse_version('2.0dev0')
)
if not wheel_v1:
raise ValueError(
'unsupported wheel format version: %s' % wheel_version)
# Extract to target directory.
os.mkdir(destination_eggdir)
zf.extractall(destination_eggdir)
# Convert metadata.
dist_info = os.path.join(destination_eggdir, dist_info)
dist = pkg_resources.Distribution.from_location(
destination_eggdir, dist_info,
metadata=pkg_resources.PathMetadata(destination_eggdir, dist_info),
)
# Note: Evaluate and strip markers now,
# as it's difficult to convert back from the syntax:
# foobar; "linux" in sys_platform and extra == 'test'
def raw_req(req):
req.marker = None
return str(req)
install_requires = list(sorted(map(raw_req, dist.requires())))
extras_require = {
extra: sorted(
req
for req in map(raw_req, dist.requires((extra,)))
if req not in install_requires
)
for extra in dist.extras
}
os.rename(dist_info, egg_info)
os.rename(
os.path.join(egg_info, 'METADATA'),
os.path.join(egg_info, 'PKG-INFO'),
)
setup_dist = setuptools.Distribution(
attrs=dict(
install_requires=install_requires,
extras_require=extras_require,
),
)
# Temporarily disable info traces.
log_threshold = log._global_log.threshold
log.set_threshold(log.WARN)
try:
write_requirements(
setup_dist.get_command_obj('egg_info'),
None,
os.path.join(egg_info, 'requires.txt'),
)
finally:
log.set_threshold(log_threshold)
@staticmethod
def _move_data_entries(destination_eggdir, dist_data):
"""Move data entries to their correct location."""
dist_data = os.path.join(destination_eggdir, dist_data)
dist_data_scripts = os.path.join(dist_data, 'scripts')
if os.path.exists(dist_data_scripts):
egg_info_scripts = os.path.join(
destination_eggdir, 'EGG-INFO', 'scripts')
os.mkdir(egg_info_scripts)
for entry in os.listdir(dist_data_scripts):
# Remove bytecode, as it's not properly handled
# during easy_install scripts install phase.
if entry.endswith('.pyc'):
os.unlink(os.path.join(dist_data_scripts, entry))
else:
os.rename(
os.path.join(dist_data_scripts, entry),
os.path.join(egg_info_scripts, entry),
)
os.rmdir(dist_data_scripts)
for subdir in filter(os.path.exists, (
os.path.join(dist_data, d)
for d in ('data', 'headers', 'purelib', 'platlib')
)):
unpack(subdir, destination_eggdir)
if os.path.exists(dist_data):
os.rmdir(dist_data)
@staticmethod
def _fix_namespace_packages(egg_info, destination_eggdir):
namespace_packages = os.path.join(
egg_info, 'namespace_packages.txt')
if os.path.exists(namespace_packages):
with open(namespace_packages) as fp:
namespace_packages = fp.read().split()
for mod in namespace_packages:
mod_dir = os.path.join(destination_eggdir, *mod.split('.'))
mod_init = os.path.join(mod_dir, '__init__.py')
if not os.path.exists(mod_dir):
os.mkdir(mod_dir)
if not os.path.exists(mod_init):
with open(mod_init, 'w') as fp:
fp.write(NAMESPACE_PACKAGE_INIT)
| Django-locallibrary/env/Lib/site-packages/setuptools/wheel.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/wheel.py",
"repo_id": "Django-locallibrary",
"token_count": 4003
} | 32 |
# Linear-Regression-python-and-MQL5
The following code examples have been used on my Mql5 article on linear regression check it out on this link https://www.mql5.com/en/articles/10459
My new Trading robot based on Linear Regression hegdhing approach can be downloaded on this link https://www.mql5.com/en/market/product/79082
| Linear-Regression-python-and-MQL5/README.md/0 | {
"file_path": "Linear-Regression-python-and-MQL5/README.md",
"repo_id": "Linear-Regression-python-and-MQL5",
"token_count": 93
} | 33 |
//+------------------------------------------------------------------+
//| tree.mqh |
//| Copyright 2023, Omega Joctan |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2023, Omega Joctan"
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
//| defines |
//+------------------------------------------------------------------+
#include <MALE5\MatrixExtend.mqh>
#define log2(leaf_value) MathLog(leaf_value) / MathLog(2)
class Node
{
public:
// for decision node
uint feature_index;
double threshold;
double info_gain;
// for leaf node
double leaf_value;
Node *left_child; //left child Node
Node *right_child; //right child Node
Node() : left_child(NULL), right_child(NULL) {} // default constructor
Node(int feature_index_, double threshold_=0.0, Node *left_=NULL, Node *right_=NULL, double info_gain_=NULL, double value_=NULL)
: left_child(left_), right_child(right_)
{
this.feature_index = feature_index_;
this.threshold = threshold_;
this.info_gain = info_gain_;
this.leaf_value = value_;
}
void __Print__()
{
printf("feature_index: %d \nthreshold: %f \ninfo_gain: %f \nleaf_value: %f",feature_index,threshold, info_gain, leaf_value);
}
};
struct split_info
{
uint feature_index;
double threshold;
matrix dataset_left,
dataset_right;
double info_gain;
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
enum mode {MODE_ENTROPY, MODE_GINI};
class CDecisionTreeClassifier
{
protected:
Node *build_tree(matrix &data, uint curr_depth=0);
double calculate_leaf_value(vector &Y);
bool is_fitted;
bool check_is_fitted(string func)
{
if (!is_fitted)
{
Print(func," Tree not trained, Call fit function first to train the model");
return false;
}
return (true);
}
//---
uint m_max_depth;
uint m_min_samples_split;
mode m_mode;
double gini_index(vector &y);
double entropy(vector &y);
double information_gain(vector &parent, vector &l_child, vector &r_child);
split_info get_best_split(const matrix &data, uint num_features);
split_info split_data(const matrix &data, uint feature_index, double threshold=0.5);
double make_predictions(const vector &x, const Node &tree);
void delete_tree(Node* node);
Node *nodes[]; //Keeping track of all the nodes in a tree
public:
Node *root;
CDecisionTreeClassifier(uint min_samples_split=2, uint max_depth=2, mode mode_=MODE_GINI);
~CDecisionTreeClassifier(void);
void fit(const matrix &x, const vector &y);
void print_tree(Node *tree, string indent=" ",string padl="");
double predict(const vector &x);
vector predict(const matrix &x);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CDecisionTreeClassifier::CDecisionTreeClassifier(uint min_samples_split=2, uint max_depth=2, mode mode_=MODE_GINI)
{
m_min_samples_split = min_samples_split;
m_max_depth = max_depth;
m_mode = mode_;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CDecisionTreeClassifier::~CDecisionTreeClassifier(void)
{
#ifdef DEBUG_MODE
Print(__FUNCTION__," Deleting Tree nodes =",nodes.Size());
#endif
this.delete_tree(root);
for (int i=0; i<(int)nodes.Size(); i++)
this.delete_tree(nodes[i]);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CDecisionTreeClassifier::delete_tree(Node* node)
{
if (CheckPointer(node) != POINTER_INVALID)
{
delete_tree(node.left_child);
delete_tree(node.right_child);
delete node;
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CDecisionTreeClassifier::gini_index(vector &y)
{
vector unique = MatrixExtend::Unique_count(y);
vector probabilities = unique / (double)y.Size();
return 1.0 - MathPow(probabilities, 2).Sum();
}
//+------------------------------------------------------------------+
//| function to compute entropy |
//+------------------------------------------------------------------+
double CDecisionTreeClassifier::entropy(vector &y)
{
vector class_labels = MatrixExtend::Unique_count(y);
vector p_cls = class_labels / double(y.Size());
vector entropy = (-1 * p_cls) * log2(p_cls);
return entropy.Sum();
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CDecisionTreeClassifier::information_gain(vector &parent, vector &l_child, vector &r_child)
{
double weight_left = l_child.Size() / (double)parent.Size(),
weight_right = r_child.Size() / (double)parent.Size();
double gain =0;
switch(m_mode)
{
case MODE_GINI:
gain = gini_index(parent) - ( (weight_left*gini_index(l_child)) + (weight_right*gini_index(r_child)) );
break;
case MODE_ENTROPY:
gain = entropy(parent) - ( (weight_left*entropy(l_child)) + (weight_right*entropy(r_child)) );
break;
}
return gain;
}
//+------------------------------------------------------------------+
//| function to print the tree |
//+------------------------------------------------------------------+
void CDecisionTreeClassifier::print_tree(Node *tree, string indent=" ",string padl="")
{
if (tree.leaf_value != NULL)
Print((padl+indent+": "),tree.leaf_value);
else //if we havent' reached the leaf node keep printing child trees
{
padl += " ";
Print((padl+indent)+": X_",tree.feature_index, "<=", tree.threshold, "?", tree.info_gain);
print_tree(tree.left_child, "left","--->"+padl);
print_tree(tree.right_child, "right","--->"+padl);
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CDecisionTreeClassifier::fit(const matrix &x, const vector &y)
{
matrix data = MatrixExtend::concatenate(x, y, 1);
this.root = this.build_tree(data);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
split_info CDecisionTreeClassifier::split_data(const matrix &data, uint feature_index, double threshold=0.5)
{
int left_size=0, right_size =0;
vector row = {};
split_info split;
ulong cols = data.Cols(),
rows = data.Rows();
split.dataset_left.Resize(0, cols);
split.dataset_right.Resize(0, cols);
for (ulong i=0; i<rows; i++)
{
row = data.Row(i);
if (row[feature_index] <= threshold)
{
left_size++;
split.dataset_left.Resize(left_size, cols);
split.dataset_left.Row(row, left_size-1);
}
else
{
right_size++;
split.dataset_right.Resize(right_size, cols);
split.dataset_right.Row(row, right_size-1);
}
}
return split;
}
//+------------------------------------------------------------------+
//| Return the Node for the best split |
//+------------------------------------------------------------------+
split_info CDecisionTreeClassifier::get_best_split(const matrix &data, uint num_features)
{
double max_info_gain = -DBL_MAX;
vector feature_values = {};
vector left_v={}, right_v={}, y_v={};
//---
split_info best_split;
split_info split;
for (int i=0; i<(int)num_features; i++)
{
feature_values = data.Col(i);
vector possible_thresholds = MatrixExtend::Unique(feature_values);
if (possible_thresholds.Size() <= 1)
continue; // Skip this feature as it won't provide meaningful splits
//---
for (int j=0; j<(int)possible_thresholds.Size(); j++)
{
split = this.split_data(data, i, possible_thresholds[j]);
if (split.dataset_left.Rows()>0 && split.dataset_right.Rows() > 0)
{
y_v = data.Col(data.Cols()-1);
right_v = split.dataset_right.Col(split.dataset_right.Cols()-1);
left_v = split.dataset_left.Col(split.dataset_left.Cols()-1);
double curr_info_gain = this.information_gain(y_v, left_v, right_v);
if (curr_info_gain > max_info_gain)
{
#ifdef DEBUG_MODE
printf(" split left: [%dx%d] split right: [%dx%d] curr_info_gain: %f max_info_gain: %f",split.dataset_left.Rows(),split.dataset_left.Cols(),split.dataset_right.Rows(),split.dataset_right.Cols(),curr_info_gain,max_info_gain);
#endif
best_split.feature_index = i;
best_split.threshold = possible_thresholds[j];
best_split.dataset_left = split.dataset_left;
best_split.dataset_right = split.dataset_right;
best_split.info_gain = curr_info_gain;
max_info_gain = curr_info_gain;
}
}
}
}
return best_split;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
Node *CDecisionTreeClassifier::build_tree(matrix &data, uint curr_depth=0)
{
matrix X;
vector Y;
if (!MatrixExtend::XandYSplitMatrices(data,X,Y)) //Split the input matrix into feature matrix X and target vector Y.
{
#ifdef DEBUG_MODE
printf("%s Line %d Failed to build a tree Data Empty",__FUNCTION__,__LINE__);
#endif
return NULL; //return null pointer
}
is_fitted = true;
ulong samples = X.Rows(), features = X.Cols(); //Get the number of samples and features in the dataset.
ArrayResize(nodes, nodes.Size()+1); //Append the nodes to memory
Node *left_child, *right_child;
if (samples >= m_min_samples_split && curr_depth<=m_max_depth)
{
split_info best_split = this.get_best_split(data, (uint)features);
#ifdef DEBUG_MODE
Print(__FUNCTION__," | ",__LINE__,"\nbest_split left: [",best_split.dataset_left.Rows(),"x",best_split.dataset_left.Cols(),"]\nbest_split right: [",best_split.dataset_right.Rows(),"x",best_split.dataset_right.Cols(),"]\nfeature_index: ",best_split.feature_index,"\nInfo gain: ",best_split.info_gain,"\nThreshold: ",best_split.threshold);
#endif
if (best_split.info_gain > 0)
{
left_child = this.build_tree(best_split.dataset_left, curr_depth+1);
right_child = this.build_tree(best_split.dataset_right, curr_depth+1);
nodes[nodes.Size()-1] = new Node(best_split.feature_index,best_split.threshold,left_child,right_child,best_split.info_gain);
return nodes[nodes.Size()-1];
}
}
nodes[nodes.Size()-1] = new Node();
nodes[nodes.Size()-1].leaf_value = this.calculate_leaf_value(Y);
return nodes[nodes.Size()-1];
}
//+------------------------------------------------------------------+
//| returns the element from Y that has the highest count, |
//| effectively finding the most common element in the list. |
//+------------------------------------------------------------------+
double CDecisionTreeClassifier::calculate_leaf_value(vector &Y)
{
vector uniques_count = MatrixExtend::Unique_count(Y);
vector unique = MatrixExtend::Unique(Y);
return unique[uniques_count.ArgMax()];
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CDecisionTreeClassifier::make_predictions(const vector &x, const Node &tree)
{
if (!check_is_fitted(__FUNCTION__))
return 0;
//if (CheckPointer(tree)=POINTER_INVALID)
if (tree.leaf_value != NULL) //This is a leaf_value
return tree.leaf_value;
#ifdef DEBUG_MODE
printf("Tree.threshold %f tree.feature_index %d leaf_value %f",tree.threshold,tree.feature_index,tree.leaf_value);
#endif
if (tree.feature_index>=x.Size())
return tree.leaf_value;
double feature_value = x[tree.feature_index];
double pred = 0;
if (feature_value <= tree.threshold)
{
if (CheckPointer(tree.left_child)!=POINTER_INVALID)
pred = this.make_predictions(x, tree.left_child);
}
else
{
if (CheckPointer(tree.right_child)!=POINTER_INVALID)
pred = this.make_predictions(x, tree.right_child);
}
return pred;
}
//+------------------------------------------------------------------+
//| Commonly used for making predictions in REAL-TIME |
//+------------------------------------------------------------------+
double CDecisionTreeClassifier::predict(const vector &x)
{
if (!check_is_fitted(__FUNCTION__))
return 0;
return this.make_predictions(x, this.root);
}
//+------------------------------------------------------------------+
//| Commonly used for making predictions in TRAIN-TEST |
//+------------------------------------------------------------------+
vector CDecisionTreeClassifier::predict(const matrix &x)
{
vector ret(x.Rows());
if (!check_is_fitted(__FUNCTION__))
return ret;
for (ulong i=0; i<x.Rows(); i++)
ret[i] = this.predict(x.Row(i));
return ret;
}
//+------------------------------------------------------------------+
//| |
//| |
//| |
//| |
//| |
//| |
//| |
//+------------------------------------------------------------------+
class CDecisionTreeRegressor: public CDecisionTreeClassifier
{
private:
double calculate_leaf_value(vector &Y);
split_info get_best_split(matrix &data, uint num_features);
double variance_reduction(vector &parent, vector &l_child, vector &r_child);
Node *build_tree(matrix &data, uint curr_depth=0);
public:
CDecisionTreeRegressor(uint min_samples_split=2, uint max_depth=2);
~CDecisionTreeRegressor(void);
void fit(matrix &x, vector &y);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CDecisionTreeRegressor::CDecisionTreeRegressor(uint min_samples_split=2, uint max_depth=2):CDecisionTreeClassifier(min_samples_split, max_depth)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CDecisionTreeRegressor::~CDecisionTreeRegressor(void)
{
}
//+------------------------------------------------------------------+
//| function to compute variance reduction |
//+------------------------------------------------------------------+
double CDecisionTreeRegressor::variance_reduction(vector &parent, vector &l_child, vector &r_child)
{
double weight_l = l_child.Size() / (double)parent.Size(),
weight_r = r_child.Size() / (double)parent.Size();
return parent.Var() - ((weight_l * l_child.Var()) + (weight_r * r_child.Var()));
}
//+------------------------------------------------------------------+
//| Return the Node for the best split |
//+------------------------------------------------------------------+
split_info CDecisionTreeRegressor::get_best_split(matrix &data, uint num_features)
{
double max_info_gain = -DBL_MAX;
vector feature_values = {};
vector left_v={}, right_v={}, y_v={};
//---
split_info best_split;
split_info split;
for (uint i=0; i<num_features; i++)
{
feature_values = data.Col(i);
vector possible_thresholds = MatrixExtend::Unique(feature_values);
for (uint j=0; j<possible_thresholds.Size(); j++)
{
split = this.split_data(data, i, possible_thresholds[j]);
if (split.dataset_left.Rows()>0 && split.dataset_right.Rows() > 0)
{
y_v = data.Col(data.Cols()-1);
right_v = split.dataset_right.Col(split.dataset_right.Cols()-1);
left_v = split.dataset_left.Col(split.dataset_left.Cols()-1);
double curr_info_gain = this.variance_reduction(y_v, left_v, right_v);
if (curr_info_gain > max_info_gain)
{
#ifdef DEBUG_MODE
printf(__FUNCTION__," | ",__LINE__,"\nsplit left: [%dx%d] split right: [%dx%d] curr_info_gain: %f max_info_gain: %f",split.dataset_left.Rows(),split.dataset_left.Cols(),split.dataset_right.Rows(),split.dataset_right.Cols(),curr_info_gain,max_info_gain);
#endif
best_split.feature_index = i;
best_split.threshold = possible_thresholds[j];
best_split.dataset_left = split.dataset_left;
best_split.dataset_right = split.dataset_right;
best_split.info_gain = curr_info_gain;
max_info_gain = curr_info_gain;
}
}
}
}
return best_split;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
Node *CDecisionTreeRegressor::build_tree(matrix &data, uint curr_depth=0)
{
matrix X;
vector Y;
if (!MatrixExtend::XandYSplitMatrices(data,X,Y)) //Split the input matrix into feature matrix X and target vector Y.
{
#ifdef DEBUG_MODE
printf("%s Line %d Failed to build a tree Data Empty",__FUNCTION__,__LINE__);
#endif
return NULL; //Return a NULL pointer
}
ulong samples = X.Rows(), features = X.Cols(); //Get the number of samples and features in the dataset.
ArrayResize(nodes, nodes.Size()+1); //Append the nodes to memory
Node *left_child, *right_child;
if (samples >= m_min_samples_split && curr_depth<=m_max_depth)
{
split_info best_split = this.get_best_split(data, (uint)features);
#ifdef DEBUG_MODE
Print(__FUNCTION__," | ",__LINE__,"\nbest_split left: [",best_split.dataset_left.Rows(),"x",best_split.dataset_left.Cols(),"]\nbest_split right: [",best_split.dataset_right.Rows(),"x",best_split.dataset_right.Cols(),"]\nfeature_index: ",best_split.feature_index,"\nInfo gain: ",best_split.info_gain,"\nThreshold: ",best_split.threshold);
#endif
if (best_split.info_gain > 0)
{
left_child = this.build_tree(best_split.dataset_left, curr_depth+1);
right_child = this.build_tree(best_split.dataset_right, curr_depth+1);
nodes[nodes.Size()-1] = new Node(best_split.feature_index,best_split.threshold,left_child,right_child,best_split.info_gain);
return nodes[nodes.Size()-1];
}
}
nodes[nodes.Size()-1] = new Node();
nodes[nodes.Size()-1].leaf_value = this.calculate_leaf_value(Y);
return nodes[nodes.Size()-1];
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CDecisionTreeRegressor::fit(matrix &x, vector &y)
{
matrix data = MatrixExtend::concatenate(x, y, 1);
this.root = this.build_tree(data);
is_fitted = true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CDecisionTreeRegressor::calculate_leaf_value(vector &Y)
{
return Y.Mean();
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| MALE5/Decision Tree/tree.mqh/0 | {
"file_path": "MALE5/Decision Tree/tree.mqh",
"repo_id": "MALE5",
"token_count": 10811
} | 34 |
//+------------------------------------------------------------------+
//| matrix_utils.mqh |
//| Copyright 2022, Omega Joctan . |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Omega Joctan"
#property link "https://www.mql5.com/en/users/omegajoctan"
#include <MALE5\preprocessing.mqh>
//+------------------------------------------------------------------+
//| A class containing additional matrix manipulation functions |
//+------------------------------------------------------------------+
class MatrixExtend
{
protected:
template<typename T>
static T MathRandom(T mini, T maxi);
static string CalcTimeElapsed(double seconds);
static void Swap(double &var1, double &var2);
static string ConvertTime(double seconds);
template<typename T>
static void GetCol(const T &Matrix[], T &Col[], int column, int cols);
static bool IsNumber(string text);
static vector FixColumn(CLabelEncoder &encoder, string &Arr[], double threshold =0.3);
public:
MatrixExtend(void);
~MatrixExtend(void);
template<typename T>
static int Sign(T var)
{
if (var<0)
return -1;
else if (var==0)
return 0;
else
return 1;
}
//--- File Functions
template <typename T>
static bool WriteCsv(string csv_name, matrix<T> &matrix_, string &header[] ,bool common=false, int digits=5);
template <typename T>
static bool WriteCsv(string csv_name, matrix<T> &matrix_, string header_string="",bool common=false, int digits=5);
static matrix ReadCsv(string file_name, string &headers, string delimiter=",",bool common=false, bool auto_encode=false);
static matrix DBtoMatrix(int db_handle, string table_name,string &column_names[],int total=WHOLE_ARRAY);
static bool write_bin(vector &v, string file);
//--- Manipulations
template<typename T>
static bool RemoveCol(matrix<T> &mat, ulong col);
static void RemoveMultCols(matrix &mat, int &cols[]);
static void RemoveMultCols(matrix &mat, int from, int total=WHOLE_ARRAY);
static void RemoveRow(matrix &mat,ulong row);
static void VectorRemoveIndex(vector &v, ulong index);
//--- Machine Learning
template<typename T>
static bool XandYSplitMatrices(const matrix<T> &matrix_, matrix<T> &xmatrix, vector<T> &y_vector,int y_column=-1);
template <typename T>
static void TrainTestSplitMatrices(const matrix<T> &matrix_, matrix<T> &x_train, vector<T> &y_train, matrix<T> &x_test, vector<T> &y_test, double train_size=0.7,int random_state=-1);
static matrix DesignMatrix(matrix &x_matrix);
static matrix OneHotEncoding(const vector &v); //ONe hot encoding
static matrix Sign(matrix &x);
static vector Sign(vector &x);
static matrix eye(uint num_features);
//--- Detection
static void Unique(const string &Array[], string &classes_arr[]);
static vector Unique(const vector &v); //Identifies classes available in a vector
static vector Unique_count(vector &v);
template<typename T>
static vector Random(T min, T max, int size,int random_state=-1); //Generates a random vector of type T sized = size
static matrix Random(double min, double max, ulong rows, ulong cols, int random_state=-1);
template<typename T>
static vector Search(const vector<T> &v, T value);
//--- Transformations
static matrix VectorToMatrix(const vector &v, ulong cols=1);
template<typename T>
static vector MatrixToVector(const matrix<T> &mat);
template<typename T>
static vector ArrayToVector(const T &Arr[]);
template<typename T>
static bool VectorToArray(const vector<T> &v,T &arr[]);
//--- Manipulations
static vector concatenate(const vector &v1, const vector &v2); //Appends v2 to vector 1
static matrix concatenate(const matrix &mat1, const matrix &mat2, int axis = 0);
template<typename T>
static matrix<T> concatenate(const matrix<T> &mat, const vector<T> &v, int axis=1);
template<typename T>
static bool Copy(const vector<T> &src, vector<T> &dst, ulong src_start,ulong total=WHOLE_ARRAY);
template<typename T>
static void Reverse(vector<T> &v);
template<typename T>
static void Reverse(matrix<T> &mat);
static matrix HadamardProduct(matrix &a, matrix &b);
template<typename T>
static void Randomize(vector<T> &v, int random_state=-1, bool replace=false);
template<typename T>
static void Randomize(matrix<T> &matrix_,int random_state=-1, bool replace=false);
template<typename T>
static void NormalizeDouble_(vector<T> &v, int digits=3);
template<typename T>
static void NormalizeDouble_(matrix<T> &mat, int digits=3);
static int CopyBufferVector(int handle, int buff_num, int start_pos,int count, vector &v);
static string Stringfy(vector &v, int digits = 2);
static matrix Zeros(ulong rows, ulong cols) { matrix ret_mat(rows, cols); return(ret_mat.Fill(0.0)); }
static vector Zeros(ulong size) { vector ret_v(size); return( ret_v.Fill(0.0)); }
static matrix Get(const matrix &mat, ulong start_index, ulong end_index);
static vector Get(const vector &v, ulong start_index, ulong end_index);
template<typename T>
static vector Sort(vector<T> &v,ENUM_SORT_MODE sort_mode=SORT_ASCENDING);
template<typename T>
static vector ArgSort(vector<T> &v);
//--- Others
static void PrintShort(matrix &matrix_,ulong rows=5, int digits=5);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
MatrixExtend::MatrixExtend(void)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
MatrixExtend::~MatrixExtend(void)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix MatrixExtend::VectorToMatrix(const vector &v, ulong cols=1)
{
ulong rows = 0;
matrix mat = {};
if ( v.Size() % cols > 0) //If there is a reminder
{
printf("Invalid rows %d and cols %d for this vector size ",rows,v.Size()/cols);
return mat;
}
else
rows = v.Size()/cols;
//---
mat.Resize(rows, cols);
for(ulong i=0, index =0; i<rows; i++)
for(ulong j=0; j<cols; j++, index++)
{
mat[i][j] = v[index];
}
return(mat);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
template<typename T>
vector MatrixExtend::MatrixToVector(const matrix<T> &mat)
{
vector<T> v = {};
matrix<T> temp_mat = mat;
if (!temp_mat.Swap(v))
Print(__FUNCTION__," Failed to turn the matrix[",mat.Rows(),"x",mat.Cols(),"] into a vector");
return(v);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
template<typename T>
bool MatrixExtend::RemoveCol(matrix<T> &mat, ulong col)
{
matrix<T> new_matrix(mat.Rows(),mat.Cols()-1); //Remove the one Column
if (col > mat.Cols())
{
Print(__FUNCTION__," column out of range");
return false;
}
for (ulong i=0, new_col=0; i<mat.Cols(); i++)
{
if (i == col)
continue;
else
{
new_matrix.Col(mat.Col(i),new_col);
new_col++;
}
}
mat.Copy(new_matrix);
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void MatrixExtend::RemoveMultCols(matrix &mat, int &cols[])
{
ulong size = (int)ArraySize(cols);
if(size > mat.Cols())
{
Print(__FUNCTION__," Columns to remove can't be more than the available columns");
return;
}
vector Zeros(mat.Rows());
Zeros.Fill(0);
for(ulong i=0; i<size; i++)
for(ulong j=0; j<mat.Cols(); j++)
{
if(cols[i] == j)
mat.Col(Zeros,j);
}
//---
vector column_vector;
while (mat.Cols()-size >= size)
for(ulong i=0; i<mat.Cols(); i++)
{
column_vector = mat.Col(i);
if(column_vector.Sum()==0)
if (!RemoveCol(mat,i))
{
printf("%s Line %d Failed to remove a column %d from a matrix",__FUNCTION__,__LINE__,i);
break;
}
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void MatrixExtend::RemoveMultCols(matrix &mat, int from, int total=WHOLE_ARRAY)
{
total = total==WHOLE_ARRAY ? (int)mat.Cols()-from : total;
if(total > (int)mat.Cols())
{
Print(__FUNCTION__," Columns to remove can't be more than the available columns");
return;
}
vector Zeros(mat.Rows());
Zeros.Fill(0);
for (int i=from; i<total+from; i++)
mat.Col(Zeros, i);
//---
ulong remain_size = mat.Cols()-total;
while (mat.Cols() >= remain_size && !IsStopped())
{
//printf("cols %d total %d",cols,total);
for(ulong i=0; i<mat.Cols(); i++) //loop the entire matrix searching for columns to remove
if(mat.Col(i).Sum()==0)
if (!RemoveCol(mat,i))
{
printf("%s Line %s Failed to remove a column %d from a matrix",__FUNCTION__,__LINE__,i);
break;
}
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void MatrixExtend::RemoveRow(matrix &mat,ulong row)
{
matrix new_matrix(mat.Rows()-1,mat.Cols()); //Remove the one Row
for(ulong i=0, new_rows=0; i<mat.Rows(); i++)
{
if(i == row)
continue;
else
{
new_matrix.Row(mat.Row(i),new_rows);
new_rows++;
}
}
mat.Copy(new_matrix);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void MatrixExtend::VectorRemoveIndex(vector &v, ulong index)
{
vector new_v(v.Size()-1);
for(ulong i=0, count = 0; i<v.Size(); i++)
if(i != index)
{
new_v[count] = v[i];
count++;
}
v.Copy(new_v);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
template <typename T>
bool MatrixExtend::WriteCsv(string csv_name, matrix<T> &matrix_, string &header[], bool common=false, int digits=5)
{
string header_str = "";
for (int i=0; i<ArraySize(header); i++)
header_str += header[i] + ((i+1 == ArraySize(header)) ? "" : ",");
return WriteCsv(csv_name, matrix_, header_str, common, digits);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
template <typename T>
bool MatrixExtend::WriteCsv(string csv_name, matrix<T> &matrix_, string header_string="", bool common=false, int digits=5)
{
FileDelete(csv_name);
int handle = FileOpen(csv_name,FILE_WRITE|FILE_CSV|FILE_ANSI|(common?FILE_COMMON:FILE_IS_WRITABLE),",",CP_UTF8);
if (header_string == "" || header_string == NULL)
for (ulong i=0; i<matrix_.Cols(); i++)
header_string += "None"+ (i==matrix_.Cols()-1?"":",");
if(handle == INVALID_HANDLE)
{
printf("Invalid %s handle Error %d ",csv_name,GetLastError());
return (false);
}
string concstring;
vector<T> row = {};
datetime time_start = GetTickCount(), current_time;
string header[];
ushort u_sep;
u_sep = StringGetCharacter(",",0);
StringSplit(header_string,u_sep, header);
vector<T> colsinrows = matrix_.Row(0);
if (ArraySize(header) != (int)colsinrows.Size())
{
printf("headers=%d and columns=%d from the matrix vary is size ",ArraySize(header),colsinrows.Size());
return false;
}
//---
string header_str = "";
for (int i=0; i<ArraySize(header); i++)
header_str += header[i] + (i+1 == colsinrows.Size() ? "" : ",");
FileWrite(handle,header_str);
FileSeek(handle,0,SEEK_SET);
for(ulong i=0; i<matrix_.Rows() && !IsStopped(); i++)
{
ZeroMemory(concstring);
row = matrix_.Row(i);
for(ulong j=0, cols =1; j<row.Size() && !IsStopped(); j++, cols++)
{
current_time = GetTickCount();
Comment("Writting ",csv_name," record [",i+1,"/",matrix_.Rows(),"] Time taken | ",ConvertTime((current_time - time_start) / 1000.0));
concstring += (string)NormalizeDouble(row[j],digits) + (cols == matrix_.Cols() ? "" : ",");
}
FileSeek(handle,0,SEEK_END);
FileWrite(handle,concstring);
}
FileClose(handle);
return (true);
}
//+------------------------------------------------------------------+
//| This Function is aimed at Detectin the Strings columns and it |
//| encodes them, while fixing the missing information in the column |
//+------------------------------------------------------------------+
vector MatrixExtend::FixColumn(CLabelEncoder &encoder, string &Arr[], double threshold =0.3)
{
int size = ArraySize(Arr);
int str_count =0;
vector ret(size);
for (int i=0; i<size; i++) //Check what percentage of data is strings
if (!IsNumber(Arr[i]))
str_count++;
//---
bool is_strings_col = (str_count>=size*threshold);
if (is_strings_col) //if a column is detected to be a column full of strings
{
//Encode it
return encoder.encode(Arr);;
}
//---
string value = "";
int total =0;
double mean=0;
for (int i=0; i<size; i++) //Detect Missing values | Remove the rows
{
value = Arr[i];
if (value == "NaN" || value == "-NaN" || value == "!VALUE" ||
value == "" || value == "NA" || value == "N/A" || value == "null" ||
value == "Inf" || value == "Infinity" || value == "-Inf" || value == "-Infinity" ||
value == "#DIV/0!" || value == "#VALUE!") //Check if there are NotANumber values
continue;
mean += (double)Arr[i];
total++;
}
mean /= total;
//---
for (int i=0; i<size; i++) //Detect Missing values | Remove the rows
{
value = Arr[i];
if (value == "NaN" || value == "-NaN" || value == "!VALUE" ||
value == "" || value == "NA" || value == "N/A" || value == "null" ||
value == "Inf" || value == "Infinity" || value == "-Inf" || value == "-Infinity" ||
value == "#DIV/0!" || value == "#VALUE!") //Check if there are NotANumber values
{
ret[i] = mean;
continue;
}
ret[i] = double(Arr[i]);
}
return ret;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool MatrixExtend::IsNumber(string text)
{
int length = StringLen(text); // Get the length of the string.
int pointcount = 0; // Initialize a counter for the number of decimal points.
// Iterate through each character in the text.
for (int i = 0; i < length; i++)
{
int char1 = StringGetCharacter(text, i); // Get the ASCII code of the current character.
// If the character is a decimal point, increment the decimal point counter.
if (char1 == 46)
pointcount += 1;
// If the character is a digit or a decimal point and the number of decimal points is less than 2,
// continue to the next character; otherwise, return false.
if (((char1 >= 48 && char1 <= 57) || char1 == 46) && pointcount < 2)
continue;
else
return false;
}
// If all characters in the text have been checked without returning false, return true.
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix MatrixExtend::ReadCsv(string file_name, string &headers, string delimiter=",",bool common=false, bool auto_encode=false)
{
CLabelEncoder encoder;
string Arr[];
int all_size = 0;
int cols_total=0;
int handle = FileOpen(file_name,FILE_SHARE_READ|FILE_CSV|FILE_ANSI|(common?FILE_COMMON:FILE_ANSI),delimiter);
datetime time_start = GetTickCount(), current_time;
string header_arr[];
int header_column = 0;
if(handle == INVALID_HANDLE)
{
printf("Invalid %s handle Error %d ",file_name,GetLastError());
Print(GetLastError()==0?" TIP | File Might be in use Somewhere else or in another Directory":"");
}
else
{
int column = 0, rows=0;
while(!FileIsEnding(handle) && !IsStopped())
{
string data = FileReadString(handle);
//---
if(rows ==0)
{
header_column++;
ArrayResize(header_arr, header_column);
header_arr[header_column-1] = data;
}
column++;
if(rows>0) //Avoid the first column which contains the column's header
{
all_size++;
ArrayResize(Arr,all_size);
Arr[all_size-1] = data;
}
//---
if(FileIsLineEnding(handle))
{
cols_total=column;
rows++;
column = 0;
current_time = GetTickCount();
Comment("Reading ",file_name," record = ",rows," Time taken | ",ConvertTime((current_time - time_start) / 1000.0));
}
}
FileClose(handle);
}
//--- Get the headers
headers="";
for(uint i=0; i<header_arr.Size(); i++)
headers += header_arr[i] + ((i==header_arr.Size()-1) ? "" :delimiter);
//---
int rows =all_size/cols_total;
Comment("");
matrix mat(rows, cols_total);
string Col[];
vector col_vector;
for (int i=0; i<cols_total; i++)
{
GetCol(Arr, Col, i+1, cols_total);
col_vector = auto_encode ? FixColumn(encoder, Col) : ArrayToVector(Col);
mat.Col(col_vector, i);
}
return(mat);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
template<typename T>
void MatrixExtend::GetCol(const T &Matrix[], T &Col[], int column, int cols)
{
int rows = ArraySize(Matrix)/cols;
ArrayResize(Col,rows);
int start = 0;
for (int i=0; i<cols; i++)
{
start = i;
if (i != column-1) continue;
else
for (int j=0; j<rows; j++)
{
//printf("ColMatrix[%d} Matrix{%d]",j,start);
Col[j] = Matrix[start];
start += cols;
}
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
template<typename T>
vector MatrixExtend::ArrayToVector(const T &Arr[])
{
vector v(ArraySize(Arr));
for (int i=0; i<ArraySize(Arr); i++)
v[i] = double(Arr[i]);
return (v);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
template<typename T>
bool MatrixExtend::VectorToArray(const vector<T> &v, T &arr[])
{
vector temp = v;
if (!temp.Swap(arr))
{
Print("Failed to Convert vector to Array Err=",GetLastError());
return false;
}
return(true);
}
//+------------------------------------------------------------------+
//| |
//| |
//+------------------------------------------------------------------+
template<typename T>
bool MatrixExtend::XandYSplitMatrices(const matrix<T> &matrix_, matrix<T> &xmatrix, vector<T> &y_vector,int y_column=-1)
{
y_column = int( y_column==-1 ? matrix_.Cols()-1 : y_column);
if (matrix_.Rows() == 0 || matrix_.Cols()==0)
{
#ifdef DEBUG_MODE
printf("%s Line %d Cannot split the matrix of size[%dx%d]",__FUNCTION__,__LINE__,matrix_.Rows(),matrix_.Cols());
#endif
return false;
}
y_vector = matrix_.Col(y_column);
xmatrix.Copy(matrix_);
return RemoveCol(xmatrix, y_column); //Remove the y column
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
template<typename T>
void MatrixExtend::Randomize(vector<T> &v, int random_state=-1, bool replace=false)
{
MathSrand(random_state!=-1?random_state:GetTickCount());
int swap_index;
double temp;
int SIZE = (int)v.Size();
vector<T> temp_v = v;
for (int i=0; i<SIZE; i++) //Fisher yates algorithm
{
if (!replace)
{
swap_index = rand() % SIZE;
temp = v[i];
v[i] = v[swap_index];
v[swap_index] = temp;
}
else
{
v[i] = temp_v[MathRandom(0, SIZE)];
}
}
}
//+------------------------------------------------------------------+
//| replace =true parameter allows the same index to be chosen more |
//| than once, simulating the bootstrapping process. |
//+------------------------------------------------------------------+
template<typename T>
void MatrixExtend::Randomize(matrix<T> &matrix_,int random_state=-1, bool replace=false)
{
MathSrand(random_state!=-1?random_state:GetTickCount());
int ROWS=(int)matrix_.Rows(), COL=(int)matrix_.Cols();
int swap_index;
vector<T> temp(COL);
matrix<T> temp_m = matrix_;
int random = 0;
for (int i=0; i<ROWS; i++)
{
if (!replace)
{
swap_index = MathRand() % ROWS;
temp = matrix_.Row(i);
matrix_.Row(matrix_.Row(swap_index),i);
matrix_.Row(temp,swap_index);
}
else
{
random = MathRandom(1, ROWS);
temp = temp_m.Row(random-1);
matrix_.Row(temp, i);
}
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
template <typename T>
void MatrixExtend::TrainTestSplitMatrices(const matrix<T> &matrix_, matrix<T> &x_train, vector<T> &y_train, matrix<T> &x_test, vector<T> &y_test, double train_size=0.7,int random_state=-1)
{
ulong total = matrix_.Rows(), cols = matrix_.Cols();
ulong last_col = cols-1;
//--- Random pseudo matrix
matrix ret_matrix = matrix_;
Randomize(ret_matrix,random_state);
//---
int train = (int)MathFloor(total*train_size);
int test = (int)total-train;
x_train.Resize(train,cols-1);
x_test.Resize(test,cols-1);
y_train.Resize(train);
y_test.Resize(test);
int train_count = 0, test_count = 0;
Copy(ret_matrix.Col(last_col),y_train,0,train);
Copy(ret_matrix.Col(last_col),y_test,train);
for(ulong i=0; i<ret_matrix.Rows(); i++)
{
if(i < (ulong)train)
{
x_train.Row(ret_matrix.Row(i),train_count);
train_count++;
}
else
{
x_test.Row(ret_matrix.Row(i),test_count);
test_count++;
}
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix MatrixExtend::DesignMatrix(matrix &x_matrix)
{
matrix out_matrix(x_matrix.Rows(),x_matrix.Cols()+1);
vector ones(x_matrix.Rows());
ones.Fill(1);
out_matrix.Col(ones,0);
vector new_vector;
for(ulong i=1; i<out_matrix.Cols(); i++)
{
new_vector = x_matrix.Col(i-1);
out_matrix.Col(new_vector,i);
}
return (out_matrix);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix MatrixExtend::OneHotEncoding(const vector &v)
{
matrix mat = {};
//---
vector v_classes = Unique(v);
//---
mat.Resize(v.Size(),v_classes.Size());
mat.Fill(-100);
for (ulong i=0; i<mat.Rows(); i++)
for (ulong j=0; j<mat.Cols(); j++)
{
if (v[i] == v_classes[j])
mat[i][j] = 1;
else
mat[i][j] = 0;
}
return(mat);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void MatrixExtend::Unique(const string &Array[], string &classes_arr[])
{
string temp_arr[];
ArrayResize(classes_arr,1);
ArrayCopy(temp_arr,Array);
classes_arr[0] = Array[0];
for(int i=0, count =1; i<ArraySize(Array); i++) //counting the different neighbors
{
for(int j=0; j<ArraySize(Array); j++)
{
if(Array[i] == temp_arr[j] && temp_arr[j] != "-nan")
{
bool count_ready = false;
for(int n=0; n<ArraySize(classes_arr); n++)
if(Array[i] == classes_arr[n])
count_ready = true;
if(!count_ready)
{
count++;
ArrayResize(classes_arr,count);
classes_arr[count-1] = Array[i];
temp_arr[j] = "-nan"; //modify so that it can no more be counted
}
else
break;
}
else
continue;
}
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector MatrixExtend::Unique(const vector &v)
{
vector temp_v = v;
vector v_classes={v[0]};
for (ulong i = 0, count=0; i < v.Size(); i++)
{
bool alreadyCounted = false;
for (ulong j = 0; j < v_classes.Size(); j++)
{
if (temp_v[i] == v_classes[j] && temp_v[i] != -DBL_MAX && i!=0)
{
alreadyCounted = true;
temp_v[i] = -DBL_MAX;
}
}
if (!alreadyCounted)
{
count++;
v_classes.Resize(count);
v_classes[count-1] = temp_v[i];
}
}
return MatrixExtend::Sort(v_classes); //Sort the unique values in ascending order
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
template<typename T>
T MatrixExtend:: MathRandom(T mini, T maxi)
{
double f = (MathRand() / 32767.0);
return (mini + (T)(f * (maxi - mini)));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
template<typename T>
vector MatrixExtend::Random(T min, T max,int size,int random_state=-1)
{
MathSrand(random_state!=-1?random_state:GetTickCount());
vector v(size);
for (ulong i=0; i<v.Size(); i++)
v[i] = MathRandom<T>(min,max);
return (v);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix MatrixExtend::Random(double min,double max,ulong rows,ulong cols,int random_state=-1)
{
MathSrand(random_state!=-1?random_state:GetTickCount());
matrix mat(rows,cols);
for (ulong r=0; r<rows; r++)
for (ulong c=0; c<cols; c++)
mat[r][c] = MathRandom<double>(min,max);
return (mat);
}
//+------------------------------------------------------------------+
//| Appends vector v1 to the end of vector v2 |
//+------------------------------------------------------------------+
vector MatrixExtend::concatenate(const vector &v1, const vector &v2)
{
vector v_out = v1;
v_out.Resize(v1.Size()+v2.Size());
for (ulong i=0; i<v1.Size(); i++)
v_out[i] = v1[i];
for (ulong i=v1.Size(),index =0; i<v_out.Size(); i++, index++)
v_out[i] = v2[index];
return (v_out);
}
//+------------------------------------------------------------------+
//| Appends matrix mat1 to the end of mat2 |
//+------------------------------------------------------------------+
matrix MatrixExtend::concatenate(const matrix &mat1, const matrix &mat2, int axis = 0)
{
matrix m_out = {};
if ((axis == 0 && mat1.Cols() != mat2.Cols() && mat1.Cols()>0) || (axis == 1 && mat1.Rows() != mat2.Rows() && mat1.Rows()>0))
{
Print(__FUNCTION__, "Err | Dimensions mismatch for concatenation");
return m_out;
}
if (axis == 0) {
m_out.Resize(mat1.Rows() + mat2.Rows(), MathMax(mat1.Cols(), mat2.Cols()));
for (ulong row = 0; row < mat1.Rows(); row++) {
for (ulong col = 0; col < m_out.Cols(); col++) {
m_out[row][col] = mat1[row][col];
}
}
for (ulong row = 0; row < mat2.Rows(); row++) {
for (ulong col = 0; col < m_out.Cols(); col++) {
m_out[row + mat1.Rows()][col] = mat2[row][col];
}
}
} else if (axis == 1) {
m_out.Resize(MathMax(mat1.Rows(), mat2.Rows()), mat1.Cols() + mat2.Cols());
for (ulong row = 0; row < m_out.Rows(); row++) {
for (ulong col = 0; col < mat1.Cols(); col++) {
m_out[row][col] = mat1[row][col];
}
for (ulong col = 0; col < mat2.Cols(); col++) {
m_out[row][col + mat1.Cols()] = mat2[row][col];
}
}
}
return m_out;
}
//+------------------------------------------------------------------+
//| Concatenates the vector to a matrix, axis =0 along the rows |
//| while axis =1 along the colums concatenation
//+------------------------------------------------------------------+
template<typename T>
matrix<T> MatrixExtend::concatenate(const matrix<T> &mat, const vector<T> &v, int axis=1)
{
matrix<T> ret= mat;
ulong new_rows, new_cols;
if (axis == 0) //place it along the rows
{
if (mat.Cols() == 0)
ret.Resize(mat.Rows(), v.Size());
new_rows = ret.Rows()+1; new_cols = ret.Cols();
if (v.Size() != new_cols)
{
Print(__FUNCTION__," Dimensions don't match the vector v needs to have the same size as the number of columns in the original matrix");
return ret;
}
ret.Resize(new_rows, new_cols);
ret.Row(v, new_rows-1);
}
else if (axis == 1)
{
if (mat.Rows() == 0)
ret.Resize(v.Size(), ret.Cols());
new_rows = ret.Rows(); new_cols = ret.Cols()+1;
if (v.Size() != new_rows)
{
Print(__FUNCTION__," Dimensions don't match the vector v needs to have the same size as the number of rows in the original matrix");
return ret;
}
ret.Resize(new_rows, new_cols);
ret.Col(v, new_cols-1);
}
else
{
Print(__FUNCTION__," Axis value Can either be 0 or 1");
return ret;
}
//---
return ret;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
template<typename T>
bool MatrixExtend::Copy(const vector<T> &src, vector<T> &dst,ulong src_start,ulong total=WHOLE_ARRAY)
{
if (total == WHOLE_ARRAY)
total = src.Size()-src_start;
if ( total <= 0 || src.Size() == 0)
{
printf("%s Can't copy a vector | Size %d total %d src_start %d ",__FUNCTION__,src.Size(),total,src_start);
return (false);
}
dst.Resize(total);
dst.Fill(0);
for (ulong i=src_start, index =0; i<total+src_start; i++)
{
dst[index] = src[i];
index++;
}
return (true);
}
//+------------------------------------------------------------------+
//| Searches for a value in a vector | Returns all the index where |
//| Such values was located |
//+------------------------------------------------------------------+
template<typename T>
vector MatrixExtend::Search(const vector<T> &v, T value)
{
vector<T> v_out ={};
for (ulong i=0, count =0; i<v.Size(); i++)
{
if (value == v[i])
{
count++;
v_out.Resize(count);
v_out[count-1] = (T)i;
}
}
return v_out;
}
//+------------------------------------------------------------------+
//| Finds the unique values in a vector and returns a vector of |
//| the number of values found for each unique value |
//+------------------------------------------------------------------+
vector MatrixExtend::Unique_count(vector &v)
{
vector classes = MatrixExtend::Unique(v);
vector keys(classes.Size());
for (ulong i=0; i<classes.Size(); i++)
keys[i] = (int)Search(v, classes[i]).Size();
return keys;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
template<typename T>
vector MatrixExtend::Sort(vector<T> &v,ENUM_SORT_MODE sort_mode=SORT_ASCENDING)
{
T arr[];
vector temp = v;
temp.Swap(arr);
if (!ArraySort(arr))
printf("%s Failed to sort this vector Err=%d",__FUNCTION__,GetLastError());
switch(sort_mode)
{
case SORT_ASCENDING:
temp = MatrixExtend::ArrayToVector(arr);
break;
case SORT_DESCENDING:
temp = MatrixExtend::ArrayToVector(arr);
MatrixExtend::Reverse(temp);
break;
default:
printf("%s Unknown sort mode");
break;
}
return temp;
}
//+------------------------------------------------------------------+
//| Returns the Sorted Argsuments in either ascending order or |
//| descending order |
//+------------------------------------------------------------------+
template<typename T>
vector MatrixExtend::ArgSort(vector<T> &v)
{
//---
ulong size = v.Size();
vector args(size);
// Initialize args array with sequential values
for (ulong i = 0; i < size; i++)
args[i] = (int)i;
// Perform selection sort on args based on array values
for (ulong i = 0; i < size - 1; i++)
{
ulong minIndex = i;
for (ulong j = i + 1; j < size; j++)
{
if (v[(int)args[j]] < v[(int)args[minIndex]])
minIndex = j;
}
// Swap args
int temp = (int)args[i];
args[i] = args[minIndex];
args[minIndex] = temp;
}
return args;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
template<typename T>
void MatrixExtend::Reverse(vector<T> &v)
{
vector<T> v_temp = v;
for (ulong i=0, j=v.Size()-1; i<v.Size(); i++, j--)
v[i] = v_temp[j];
ZeroMemory(v_temp);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
template<typename T>
void MatrixExtend::Reverse(matrix<T> &mat)
{
matrix<T> temp_mat = mat;
for (ulong i=0, j=mat.Rows()-1; i<mat.Rows(); i++, j--)
mat.Row(mat.Row(j), i);
}
//+------------------------------------------------------------------+
//| Hadamard product --> is a binary operation that takes two |
//| matrices of the same dimensions and produces another matrix |
//| of the same dimension as the operands. | This operation is |
//| widely known as element wise multiplication |
//+------------------------------------------------------------------+
matrix MatrixExtend::HadamardProduct(matrix &a,matrix &b)
{
matrix c = {};
if (a.Rows() != b.Rows() || a.Cols() != b.Cols())
{
Print("Cannot calculate Hadamard product | matrix a and b are not having the same size ");
return c;
}
//---
return a*b;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
string MatrixExtend::CalcTimeElapsed(double seconds)
{
string time_str = "";
uint minutes=0, hours=0;
if(seconds >= 60)
time_str = StringFormat("%d Minutes and %.3f Seconds ",minutes=(int)round(seconds/60.0), ((int)seconds % 60));
if(minutes >= 60)
time_str = StringFormat("%d Hours %d Minutes and %.3f Seconds ",hours=(int)round(minutes/60.0), minutes, ((int)seconds % 60));
else
time_str = StringFormat("%.3f Seconds ",seconds);
return time_str;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix MatrixExtend::DBtoMatrix(int db_handle, string table_name,string &column_names[],int total=WHOLE_ARRAY)
{
matrix matrix_ = {};
#ifdef DEBUG_MODE
Print("---> loading database ");
#endif
if(db_handle == INVALID_HANDLE)
{
printf("db handle failed, Err = %d",GetLastError());
DatabaseClose(db_handle);
return matrix_;
}
//---
string sql = "SELECT * FROM "+table_name;
int request = DatabasePrepare(db_handle,sql);
ulong cols = DatabaseColumnsCount(request), rows =0;
ArrayResize(column_names,(int)cols);
//---
matrix_.Resize(cols,0);
double time_start = GetMicrosecondCount()/(double)1e6, time_stop=0; //Elapsed time
double row_start = 0, row_stop =0;
for (int j=0; DatabaseRead(request) && !IsStopped(); j++)
{
row_start = GetMicrosecondCount()/(double)1e6;
rows = (ulong)j+1;
matrix_.Resize(cols,rows);
for (ulong k=0; k<cols; k++)
{
DatabaseColumnDouble(request,(int)k,matrix_[k][j]);
if (j==0) DatabaseColumnName(request,(int)k,column_names[k]);
}
if (total != WHOLE_ARRAY)
if (j >= total) break;
#ifdef DEBUG_MODE
row_stop =GetMicrosecondCount()/(double)1e6;
printf("Row ----> %d | Elapsed %s",j,CalcTimeElapsed(row_stop-row_start));
#endif
}
//---
DatabaseFinalize(request);
DatabaseClose(db_handle);
matrix_ = matrix_.Transpose(); //very crucial step
#ifdef DEBUG_MODE
time_stop = GetMicrosecondCount()/(double)1e6;
printf("---> finished reading DB size=(%dx%d) | Time Elapsed %s",rows,cols,CalcTimeElapsed(time_stop-time_start));
ArrayPrint(column_names);
for (ulong i=0; i<5; i++) Print(matrix_.Row(i));
#endif
return matrix_;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
template<typename T>
void MatrixExtend::NormalizeDouble_(vector<T> &v,int digits=3)
{
for (ulong i=0; i<v.Size(); i++)
v[i] = NormalizeDouble(v[i], digits);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
template<typename T>
void MatrixExtend::NormalizeDouble_(matrix<T> &mat,int digits=3)
{
for (ulong i=0; i<mat.Rows(); i++)
for (ulong j=0; j<mat.Cols(); j++)
mat[i][j] = NormalizeDouble(mat[i][j], digits);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void MatrixExtend::PrintShort(matrix &matrix_, ulong rows=5,int digits=5)
{
vector v = {};
for (ulong i=0; i<rows; i++)
{
v = matrix_.Row(i);
NormalizeDouble_(v, digits);
Print(v);
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void MatrixExtend::Swap(double &var1,double &var2)
{
double temp_1 = var1, temp2=var2;
var1 = temp2;
var2 = temp_1;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int MatrixExtend::CopyBufferVector(int handle,int buff_num,int start_pos,int count,vector &v)
{
double buff_arr[];
int ret = CopyBuffer(handle, buff_num, start_pos, count, buff_arr);
v = ArrayToVector(buff_arr);
return (ret);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
string MatrixExtend::Stringfy(vector &v, int digits = 2)
{
string str = "";
for (ulong i=0; i<v.Size(); i++)
str += " "+DoubleToString(v[i], digits) + " ";
return (str);
}
//+------------------------------------------------------------------+
//| a function to convert the seocnds to Hours and minutes, Useful |
//| in measuring the time taken for operations that takes a long |
//| time to complete, Such as reading and writing to a large csv file|
//+------------------------------------------------------------------+
string MatrixExtend::ConvertTime(double seconds)
{
string time_str = "";
uint minutes = 0, hours = 0;
if (seconds >= 60)
{
minutes = (uint)(seconds / 60.0) ;
seconds = fmod(seconds, 1.0) * 60;
time_str = StringFormat("%d Minutes and %.3f Seconds", minutes, seconds);
}
if (minutes >= 60)
{
hours = (uint)(minutes / 60.0);
minutes = minutes % 60;
time_str = StringFormat("%d Hours and %d Minutes", hours, minutes);
}
if (time_str == "")
{
time_str = StringFormat("%.3f Seconds", seconds);
}
return time_str;
}
//+------------------------------------------------------------------+
//| Obtains a part of the matrix starting from a start_index row to |
//| end_index row Inclusive |
//+------------------------------------------------------------------+
matrix MatrixExtend::Get(const matrix &mat, ulong start_index, ulong end_index)
{
matrix ret_mat(MathAbs(end_index-start_index+1), mat.Cols());
if (start_index >= mat.Rows())
{
Print(__FUNCTION__," Error | start_index (",start_index,") is greater than or Equal to matrix Rows (",mat.Rows(),")");
return ret_mat;
}
if (end_index > mat.Rows())
{
Print(__FUNCTION__," Error | end_index (",end_index,") is greater than (",mat.Rows(),")");
return ret_mat;
}
if (start_index > end_index)
{
Print(__FUNCTION__," Error | start_index shouldn't be greater than end_index ???");
return ret_mat;
}
for (ulong i=start_index, count =0; i<=end_index; i++, count++)
for (ulong col=0; col<mat.Cols(); col++)
ret_mat[count][col] = mat[i][col];
return ret_mat;
}
//+------------------------------------------------------------------+
//| Obtains a part of the vector starting from a start_index row to |
//| end_index row Inclusive |
//+------------------------------------------------------------------+
vector MatrixExtend::Get(const vector &v, ulong start_index, ulong end_index)
{
vector ret_vec(MathAbs(end_index-start_index+1));
if (start_index >= v.Size())
{
Print(__FUNCTION__,"Error | start_index (",start_index,") is greater than or Equal to matrix Rows (",v.Size(),")");
return ret_vec;
}
if (end_index > v.Size())
{
Print(__FUNCTION__,"Error | end_index (",start_index,") is greater than (",v.Size(),")");
return ret_vec;
}
if (start_index > end_index)
{
Print(__FUNCTION__,"Error | start_index shouldn't be greater than end_index ???");
return ret_vec;
}
for (ulong i=start_index, count=0; i<=end_index; i++, count++)
ret_vec[count] = v[i];
return ret_vec;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix MatrixExtend::Sign(matrix &x)
{
matrix ret_matrix = x;
for (ulong i=0; i<x.Cols(); i++)
ret_matrix.Col(Sign(x.Col(i)) ,i);
return ret_matrix;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector MatrixExtend::Sign(vector &x)
{
vector v(x.Size());
for (ulong i=0; i<x.Size(); i++)
v[i] = Sign(x[i]);
return v;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix MatrixExtend::eye(uint num_features)
{
matrix ret_matrix(num_features, num_features);
ret_matrix.Fill(0.0);
vector diag;
diag.Fill(1.0);
ret_matrix.Diag(diag, 0);
return ret_matrix;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool MatrixExtend::write_bin(vector &v,string file)
{
FileDelete(file);
int handle = FileOpen(file,FILE_READ|FILE_WRITE|FILE_BIN,",");
if (handle == INVALID_HANDLE)
{
printf("Invalid handle Err=%d",GetLastError());
DebugBreak();
return false;
}
double arr[];
ArrayResize(arr, (int)v.Size());
for (uint i=0; i<arr.Size(); i++)
arr[i] = v[i];
FileWriteArray(handle, arr);
FileClose(handle);
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| MALE5/MatrixExtend.mqh/0 | {
"file_path": "MALE5/MatrixExtend.mqh",
"repo_id": "MALE5",
"token_count": 22270
} | 35 |
//+------------------------------------------------------------------+
//| Tensors.mqh |
//| Copyright 2022, Fxalgebra.com |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Fxalgebra.com"
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CMatrix
{
public:
matrix Matrix;
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CTensors
{
CMatrix* matrices[];
public:
CTensors(uint DIM); //For one dimension tensor
~CTensors(void);
uint SIZE;
bool Add(matrix<double> &mat_ , ulong POS);
bool Append(matrix<double> &mat_);
matrix<double> Get(ulong POS);
void Print_();
void Fill(double value);
void MemoryClear();
string shape(); //returns the shape of the tensor
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CTensors::CTensors(uint DIM)
{
SIZE = DIM;
ArrayResize(matrices, SIZE);
for (uint i=0; i<SIZE; i++)
matrices[i] = new CMatrix;
for (uint i=0; i<SIZE; i++)
if (CheckPointer(matrices[i]) == POINTER_INVALID)
{
printf("Can't create a tensor, Invalid pointer Err %d ",GetLastError());
return;
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CTensors::~CTensors(void)
{
for (uint i=0; i<SIZE; i++)
if (CheckPointer(matrices[i]) != POINTER_INVALID)
delete matrices[i];
ArrayFree(matrices);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CTensors::Add(matrix<double> &mat_ , ulong POS)
{
if (POS > SIZE)
{
Print(__FUNCTION__," Index Error POS =",POS," greater than TENSOR_DIM ",SIZE);
return (false);
}
this.matrices[POS].Matrix = mat_;
return (true);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CTensors::Append(matrix<double> &mat_)
{
if (ArrayResize(matrices, SIZE+1)<0)
return false;
SIZE = matrices.Size();
matrices[SIZE-1] = new CMatrix();
matrices[SIZE-1].Matrix = mat_; //Add the new matrix to the newly created tensor index
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CTensors::Print_(void)
{
for (ulong i=0; i<SIZE; i++)
Print("TENSOR INDEX [",i,"] matrix-size=(",this.matrices[i].Matrix.Rows(),"x",this.matrices[i].Matrix.Cols(),")\n",this.matrices[i].Matrix);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix<double> CTensors::Get(ulong POS)
{
matrix<double> mat={};
if (POS > SIZE)
{
Print(__FUNCTION__," Index Error POS =",POS," greater than TENSOR_DIM ",SIZE);
return (mat);
}
matrix temp = this.matrices[POS].Matrix;
mat.Assign(temp);
return (mat);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CTensors::Fill(double value)
{
for (ulong i=0; i<SIZE; i++)
this.matrices[i].Matrix.Fill(value);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CTensors::MemoryClear(void)
{
for (ulong i=0; i<SIZE; i++)
{
this.matrices[i].Matrix.Resize(1,0);
ZeroMemory(this.matrices[i].Matrix);
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
string CTensors::shape(void)
{
printf("Warning: %s assumes all matrices in the tensor have the same size",__FUNCTION__);
return StringFormat("(%d, %d, %d)",this.SIZE,this.matrices[0].Matrix.Rows(),this.matrices[0].Matrix.Cols());
}
//+------------------------------------------------------------------+
//| |
//| Tensorflows for Vector type of data |
//| |
//+------------------------------------------------------------------+
class CVectors
{
public:
vector Vector;
};
//---
class CTensorsVectors
{
CVectors *vectors[];
private:
uint SIZE;
public:
CTensorsVectors(uint DIM);
~CTensorsVectors(void);
bool Add(vector &v, ulong POS);
void Print_(void);
vector Get(ulong POS);
void Fill(double value);
void MemoryClear();
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CTensorsVectors::CTensorsVectors(uint DIM)
{
SIZE = DIM;
ArrayResize(vectors, SIZE);
for (uint i=0; i<SIZE; i++)
vectors[i] = new CVectors;
for (uint i=0; i<SIZE; i++)
if (CheckPointer(vectors[i]) == POINTER_INVALID)
{
printf("Can't create a tensor, Invalid pointer Err %d ",GetLastError());
return;
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CTensorsVectors::~CTensorsVectors(void)
{
for (uint i=0; i<SIZE; i++)
if (CheckPointer(vectors[i]) != POINTER_INVALID)
delete vectors[i];
ArrayFree(vectors);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CTensorsVectors::Add(vector &v, ulong POS)
{
if (POS > SIZE)
{
Print(__FUNCTION__," Index Error POS =",POS," greater than TENSOR_DIM ",SIZE);
return (false);
}
this.vectors[POS].Vector = v;
return (true);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CTensorsVectors::Print_(void)
{
for (ulong i=0; i<SIZE; i++)
Print("TENSOR INDEX [",i,"] vector-size =(",this.vectors[i].Vector.Size(),")\n",this.vectors[i].Vector);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector CTensorsVectors::Get(ulong POS)
{
if (POS > SIZE)
{
Print(__FUNCTION__," Index Error POS =",POS," greater than TENSOR_DIM ",SIZE);
vector v = {};
return (v);
}
return (this.vectors[POS].Vector);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CTensorsVectors::Fill(double value)
{
for (ulong i=0; i<SIZE; i++)
this.vectors[i].Vector.Fill(value);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CTensorsVectors::MemoryClear(void)
{
for (ulong i=0; i<SIZE; i++)
{
this.vectors[i].Vector.Resize(1,0);
ZeroMemory(this.vectors[i].Vector);
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| MALE5/Tensors.mqh/0 | {
"file_path": "MALE5/Tensors.mqh",
"repo_id": "MALE5",
"token_count": 4342
} | 36 |
//+------------------------------------------------------------------+
//| K-means test.mq5 |
//| Copyright 2022, Omega Joctan. |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Omega Joctan."
#property link "https://www.mql5.com/en/users/omegajoctan"
#property version "1.00"
#property strict
#property script_show_inputs
//+------------------------------------------------------------------+
#include "KMeans.mqh";
CKMeans *clustering;
bool ChartShow = true;
enum plot_enum
{ CLUSTER_PLOT, ELBOW_PLOT };
input plot_enum PlotOnChart = CLUSTER_PLOT;
input int input_clusters = 3;
input int MATRIXDIMENSION = 1;
input group "ELBOW METHOD";
input int init_clusters = 1;
input int k_clusters = 10;
input group "BARS";
input int bars = 20;
//+------------------------------------------------------------------+
//| Script program start function |
//+------------------------------------------------------------------+
void OnStart()
{
matrix DMatrix = {};
DMatrix.Resize(bars, MATRIXDIMENSION); //columns determines the dimension of the dataset 1D won't be visualized properly
vector column_v = {};
ulong start = 0;
for (ulong i=0; i<(ulong)MATRIXDIMENSION; i++)
{
column_v.CopyRates(Symbol(),PERIOD_CURRENT,COPY_RATES_CLOSE,start,bars);
DMatrix.Col(column_v,i);
start += bars;
}
//---
MeanNormalization(DMatrix);
matrix clusterd_mat={}, centroids_mat = {};
clustering = new CKMeans(DMatrix,input_clusters);
clustering.KMeansClustering(clusterd_mat,centroids_mat,k_clusters,false);
Print("clustered matrix\n",clusterd_mat,"\ncentroids_mat\n",centroids_mat);
bool elbow_show = false;
if (PlotOnChart == CLUSTER_PLOT)
{
ObjectDelete(0,"graph"); ObjectDelete(0,"elbow");
ScatterPlotsMatrix("graph",clusterd_mat,"cluster 1");
}
else elbow_show = true;
Sleep(100);
clustering.ElbowMethod(init_clusters,k_clusters,elbow_show);
delete(clustering);
}
//+------------------------------------------------------------------+
bool ScatterPlotsMatrix(
string obj_name,
matrix &_matrix,
string legend,
string x_axis_label = "x-axis",
string y_axis_label = "y-axis",
color clr = clrDodgerBlue,
bool points_fill = true
)
{
if (!graph.Create(0,obj_name,0,30,70,600,640))
{
printf("Failed to Create graphical object on the Main chart Err = %d",GetLastError());
return(false);
}
ChartSetInteger(0,CHART_SHOW,ChartShow);
double x_arr[], y_arr[];
vector x = {}, y = {};
y = _matrix.Row(0); x = _matrix.Row(1);
clustering.FilterZero(x);
clustering.FilterZero(y);
vectortoArray(x, x_arr);
vectortoArray(y, y_arr);
//--- additional curves
//graph.CurveAdd(y_arr,y_arr,clrBlack,CURVE_POINTS,y_axis_label);
for (ulong i=0; i<_matrix.Rows(); i++)
{
x = _matrix.Row(i);
clustering.FilterZero(x); vectortoArray(x,x_arr);
graph.CurveAdd(x_arr,CURVE_POINTS," cluster "+string(i+1));
}
//---
graph.XAxis().Name(x_axis_label);
graph.XAxis().NameSize(13);
graph.YAxis().Name(y_axis_label);
graph.YAxis().NameSize(13);
graph.FontSet("Lucida Console",13);
graph.CurvePlotAll();
graph.Update();
return(true);
}
//+------------------------------------------------------------------+
void vectortoArray(vector &v, double &Arr[])
{
ArrayResize(Arr,(int)v.Size());
for (int i=0; i<(int)v.Size(); i++)
{ Arr[i] = v[i]; }
}
//+------------------------------------------------------------------+
void MeanNormalization(matrix &mat)
{
vector v = {};
for (ulong i=0; i<mat.Cols(); i++)
{
v = mat.Col(i);
MeanNormalization(v);
mat.Col(v,i);
}
}
//+------------------------------------------------------------------+
void MeanNormalization(vector &v)
{
double mean = v.Mean(),
max = v.Max(),
min = v.Min();
for (ulong i=0; i<v.Size(); i++)
v[i] = (v[i] - mean) / (max - min);
}
//+------------------------------------------------------------------+
| Data-Mining-MQL5/K-means test.mq5/0 | {
"file_path": "Data-Mining-MQL5/K-means test.mq5",
"repo_id": "Data-Mining-MQL5",
"token_count": 2185
} | 0 |
{% extends "base_template.html" %}
{% block content %}
<h1>Renew: {{book.title}}</h1>
<p>Borrower: {{bookinstance.borrower}}</p>
<p {% if bookinstance.is_overdue %} class="text-danger" {% endif %}>Due date: {{ book_instance.due_back }}</p>
<form action="" method="post">
{% csrf_token %}
<table>
{{ form.as_table }}
</table>
<input type="submit" value="submit">
</form>
{% endblock %} | Django-locallibrary/LocalLibrary/catalog/Templates/renew_book_librarian.html/0 | {
"file_path": "Django-locallibrary/LocalLibrary/catalog/Templates/renew_book_librarian.html",
"repo_id": "Django-locallibrary",
"token_count": 253
} | 1 |
# Generated by Django 3.2.3 on 2021-08-28 04:48
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.TextField(max_length=100)),
('last_name', models.TextField(max_length=100)),
('date_of_birth', models.DateField(blank=True, null=True)),
('date_of_death', models.DateField(blank=True, null=True, verbose_name='Died')),
],
options={
'ordering': ['last_name', 'first_name'],
},
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('summary', models.TextField(help_text='Enter a brief description of the book', max_length=1000)),
('isbn', models.CharField(help_text='13 Character <a href="https://isbn-international.org/content/what-isbn">ISBN Number</a>', max_length=13, unique=True, verbose_name='ISBN')),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.author')),
],
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Enter a book genre (eg. Science,Fiction...', max_length=200)),
],
),
migrations.CreateModel(
name='BookInstance',
fields=[
('id', models.UUIDField(default=uuid.uuid4, help_text='Inique ID for this Particular Book across the whole library', primary_key=True, serialize=False)),
('imprint', models.CharField(max_length=200)),
('due_back', models.DateField(blank=True, null=True)),
('status', models.CharField(blank=True, choices=[('m', 'Maintainance'), ('o', 'On Loan'), ('a', 'Available'), ('r', 'Reserved')], default='m', help_text='Book Availability', max_length=1)),
('book', models.ForeignKey(null=True, on_delete=django.db.models.deletion.RESTRICT, to='catalog.book')),
],
options={
'ordering': ['due_back'],
},
),
migrations.AddField(
model_name='book',
name='genre',
field=models.ManyToManyField(help_text='select a genre for the book', to='catalog.Genre'),
),
]
| Django-locallibrary/LocalLibrary/catalog/migrations/0001_initial.py/0 | {
"file_path": "Django-locallibrary/LocalLibrary/catalog/migrations/0001_initial.py",
"repo_id": "Django-locallibrary",
"token_count": 1367
} | 2 |
"""Run the EasyInstall command"""
if __name__ == '__main__':
from setuptools.command.easy_install import main
main()
| Django-locallibrary/env/Lib/site-packages/easy_install.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/easy_install.py",
"repo_id": "Django-locallibrary",
"token_count": 42
} | 3 |
from __future__ import absolute_import, division
import contextlib
import itertools
import logging
import sys
import time
from pip._vendor.progress import HIDE_CURSOR, SHOW_CURSOR
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.logging import get_indentation
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Iterator, IO
logger = logging.getLogger(__name__)
class SpinnerInterface(object):
def spin(self):
# type: () -> None
raise NotImplementedError()
def finish(self, final_status):
# type: (str) -> None
raise NotImplementedError()
class InteractiveSpinner(SpinnerInterface):
def __init__(self, message, file=None, spin_chars="-\\|/",
# Empirically, 8 updates/second looks nice
min_update_interval_seconds=0.125):
# type: (str, IO[str], str, float) -> None
self._message = message
if file is None:
file = sys.stdout
self._file = file
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._finished = False
self._spin_cycle = itertools.cycle(spin_chars)
self._file.write(" " * get_indentation() + self._message + " ... ")
self._width = 0
def _write(self, status):
# type: (str) -> None
assert not self._finished
# Erase what we wrote before by backspacing to the beginning, writing
# spaces to overwrite the old text, and then backspacing again
backup = "\b" * self._width
self._file.write(backup + " " * self._width + backup)
# Now we have a blank slate to add our status
self._file.write(status)
self._width = len(status)
self._file.flush()
self._rate_limiter.reset()
def spin(self):
# type: () -> None
if self._finished:
return
if not self._rate_limiter.ready():
return
self._write(next(self._spin_cycle))
def finish(self, final_status):
# type: (str) -> None
if self._finished:
return
self._write(final_status)
self._file.write("\n")
self._file.flush()
self._finished = True
# Used for dumb terminals, non-interactive installs (no tty), etc.
# We still print updates occasionally (once every 60 seconds by default) to
# act as a keep-alive for systems like Travis-CI that take lack-of-output as
# an indication that a task has frozen.
class NonInteractiveSpinner(SpinnerInterface):
def __init__(self, message, min_update_interval_seconds=60):
# type: (str, float) -> None
self._message = message
self._finished = False
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._update("started")
def _update(self, status):
# type: (str) -> None
assert not self._finished
self._rate_limiter.reset()
logger.info("%s: %s", self._message, status)
def spin(self):
# type: () -> None
if self._finished:
return
if not self._rate_limiter.ready():
return
self._update("still running...")
def finish(self, final_status):
# type: (str) -> None
if self._finished:
return
self._update(
"finished with status '{final_status}'".format(**locals()))
self._finished = True
class RateLimiter(object):
def __init__(self, min_update_interval_seconds):
# type: (float) -> None
self._min_update_interval_seconds = min_update_interval_seconds
self._last_update = 0 # type: float
def ready(self):
# type: () -> bool
now = time.time()
delta = now - self._last_update
return delta >= self._min_update_interval_seconds
def reset(self):
# type: () -> None
self._last_update = time.time()
@contextlib.contextmanager
def open_spinner(message):
# type: (str) -> Iterator[SpinnerInterface]
# Interactive spinner goes directly to sys.stdout rather than being routed
# through the logging system, but it acts like it has level INFO,
# i.e. it's only displayed if we're at level INFO or better.
# Non-interactive spinner goes through the logging system, so it is always
# in sync with logging configuration.
if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO:
spinner = InteractiveSpinner(message) # type: SpinnerInterface
else:
spinner = NonInteractiveSpinner(message)
try:
with hidden_cursor(sys.stdout):
yield spinner
except KeyboardInterrupt:
spinner.finish("canceled")
raise
except Exception:
spinner.finish("error")
raise
else:
spinner.finish("done")
@contextlib.contextmanager
def hidden_cursor(file):
# type: (IO[str]) -> Iterator[None]
# The Windows terminal does not support the hide/show cursor ANSI codes,
# even via colorama. So don't even try.
if WINDOWS:
yield
# We don't want to clutter the output with control characters if we're
# writing to a file, or if the user is running with --quiet.
# See https://github.com/pypa/pip/issues/3418
elif not file.isatty() or logger.getEffectiveLevel() > logging.INFO:
yield
else:
file.write(HIDE_CURSOR)
try:
yield
finally:
file.write(SHOW_CURSOR)
| Django-locallibrary/env/Lib/site-packages/pip/_internal/cli/spinners.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/cli/spinners.py",
"repo_id": "Django-locallibrary",
"token_count": 2239
} | 4 |
from __future__ import absolute_import
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.cli.base_command import Command
from pip._internal.cli.req_command import SessionCommandMixin
from pip._internal.cli.status_codes import SUCCESS
from pip._internal.exceptions import InstallationError
from pip._internal.req import parse_requirements
from pip._internal.req.constructors import (
install_req_from_line,
install_req_from_parsed_requirement,
)
from pip._internal.utils.misc import protect_pip_from_modification_on_windows
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import List
class UninstallCommand(Command, SessionCommandMixin):
"""
Uninstall packages.
pip is able to uninstall most installed packages. Known exceptions are:
- Pure distutils packages installed with ``python setup.py install``, which
leave behind no metadata to determine what files were installed.
- Script wrappers installed by ``python setup.py develop``.
"""
usage = """
%prog [options] <package> ...
%prog [options] -r <requirements file> ..."""
def add_options(self):
# type: () -> None
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Uninstall all the packages listed in the given requirements '
'file. This option can be used multiple times.',
)
self.cmd_opts.add_option(
'-y', '--yes',
dest='yes',
action='store_true',
help="Don't ask for confirmation of uninstall deletions.")
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
# type: (Values, List[str]) -> int
session = self.get_default_session(options)
reqs_to_uninstall = {}
for name in args:
req = install_req_from_line(
name, isolated=options.isolated_mode,
)
if req.name:
reqs_to_uninstall[canonicalize_name(req.name)] = req
for filename in options.requirements:
for parsed_req in parse_requirements(
filename,
options=options,
session=session):
req = install_req_from_parsed_requirement(
parsed_req,
isolated=options.isolated_mode
)
if req.name:
reqs_to_uninstall[canonicalize_name(req.name)] = req
if not reqs_to_uninstall:
raise InstallationError(
'You must give at least one requirement to {self.name} (see '
'"pip help {self.name}")'.format(**locals())
)
protect_pip_from_modification_on_windows(
modifying_pip="pip" in reqs_to_uninstall
)
for req in reqs_to_uninstall.values():
uninstall_pathset = req.uninstall(
auto_confirm=options.yes, verbose=self.verbosity > 0,
)
if uninstall_pathset:
uninstall_pathset.commit()
return SUCCESS
| Django-locallibrary/env/Lib/site-packages/pip/_internal/commands/uninstall.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/commands/uninstall.py",
"repo_id": "Django-locallibrary",
"token_count": 1461
} | 5 |
"""
Requirements file parsing
"""
from __future__ import absolute_import
import optparse
import os
import re
import shlex
import sys
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._internal.cli import cmdoptions
from pip._internal.exceptions import (
InstallationError,
RequirementsFileParseError,
)
from pip._internal.models.search_scope import SearchScope
from pip._internal.network.utils import raise_for_status
from pip._internal.utils.encoding import auto_decode
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import get_url_scheme
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import (
Any, Callable, Dict, Iterator, List, NoReturn, Optional, Text, Tuple,
)
from pip._internal.index.package_finder import PackageFinder
from pip._internal.network.session import PipSession
ReqFileLines = Iterator[Tuple[int, Text]]
LineParser = Callable[[Text], Tuple[str, Values]]
__all__ = ['parse_requirements']
SCHEME_RE = re.compile(r'^(http|https|file):', re.I)
COMMENT_RE = re.compile(r'(^|\s+)#.*$')
# Matches environment variable-style values in '${MY_VARIABLE_1}' with the
# variable name consisting of only uppercase letters, digits or the '_'
# (underscore). This follows the POSIX standard defined in IEEE Std 1003.1,
# 2013 Edition.
ENV_VAR_RE = re.compile(r'(?P<var>\$\{(?P<name>[A-Z0-9_]+)\})')
SUPPORTED_OPTIONS = [
cmdoptions.index_url,
cmdoptions.extra_index_url,
cmdoptions.no_index,
cmdoptions.constraints,
cmdoptions.requirements,
cmdoptions.editable,
cmdoptions.find_links,
cmdoptions.no_binary,
cmdoptions.only_binary,
cmdoptions.prefer_binary,
cmdoptions.require_hashes,
cmdoptions.pre,
cmdoptions.trusted_host,
cmdoptions.use_new_feature,
] # type: List[Callable[..., optparse.Option]]
# options to be passed to requirements
SUPPORTED_OPTIONS_REQ = [
cmdoptions.install_options,
cmdoptions.global_options,
cmdoptions.hash,
] # type: List[Callable[..., optparse.Option]]
# the 'dest' string values
SUPPORTED_OPTIONS_REQ_DEST = [str(o().dest) for o in SUPPORTED_OPTIONS_REQ]
class ParsedRequirement(object):
def __init__(
self,
requirement, # type:str
is_editable, # type: bool
comes_from, # type: str
constraint, # type: bool
options=None, # type: Optional[Dict[str, Any]]
line_source=None, # type: Optional[str]
):
# type: (...) -> None
self.requirement = requirement
self.is_editable = is_editable
self.comes_from = comes_from
self.options = options
self.constraint = constraint
self.line_source = line_source
class ParsedLine(object):
def __init__(
self,
filename, # type: str
lineno, # type: int
comes_from, # type: Optional[str]
args, # type: str
opts, # type: Values
constraint, # type: bool
):
# type: (...) -> None
self.filename = filename
self.lineno = lineno
self.comes_from = comes_from
self.opts = opts
self.constraint = constraint
if args:
self.is_requirement = True
self.is_editable = False
self.requirement = args
elif opts.editables:
self.is_requirement = True
self.is_editable = True
# We don't support multiple -e on one line
self.requirement = opts.editables[0]
else:
self.is_requirement = False
def parse_requirements(
filename, # type: str
session, # type: PipSession
finder=None, # type: Optional[PackageFinder]
comes_from=None, # type: Optional[str]
options=None, # type: Optional[optparse.Values]
constraint=False, # type: bool
):
# type: (...) -> Iterator[ParsedRequirement]
"""Parse a requirements file and yield ParsedRequirement instances.
:param filename: Path or url of requirements file.
:param session: PipSession instance.
:param finder: Instance of pip.index.PackageFinder.
:param comes_from: Origin description of requirements.
:param options: cli options.
:param constraint: If true, parsing a constraint file rather than
requirements file.
"""
line_parser = get_line_parser(finder)
parser = RequirementsFileParser(session, line_parser, comes_from)
for parsed_line in parser.parse(filename, constraint):
parsed_req = handle_line(
parsed_line,
options=options,
finder=finder,
session=session
)
if parsed_req is not None:
yield parsed_req
def preprocess(content):
# type: (Text) -> ReqFileLines
"""Split, filter, and join lines, and return a line iterator
:param content: the content of the requirements file
"""
lines_enum = enumerate(content.splitlines(), start=1) # type: ReqFileLines
lines_enum = join_lines(lines_enum)
lines_enum = ignore_comments(lines_enum)
lines_enum = expand_env_variables(lines_enum)
return lines_enum
def handle_requirement_line(
line, # type: ParsedLine
options=None, # type: Optional[optparse.Values]
):
# type: (...) -> ParsedRequirement
# preserve for the nested code path
line_comes_from = '{} {} (line {})'.format(
'-c' if line.constraint else '-r', line.filename, line.lineno,
)
assert line.is_requirement
if line.is_editable:
# For editable requirements, we don't support per-requirement
# options, so just return the parsed requirement.
return ParsedRequirement(
requirement=line.requirement,
is_editable=line.is_editable,
comes_from=line_comes_from,
constraint=line.constraint,
)
else:
if options:
# Disable wheels if the user has specified build options
cmdoptions.check_install_build_global(options, line.opts)
# get the options that apply to requirements
req_options = {}
for dest in SUPPORTED_OPTIONS_REQ_DEST:
if dest in line.opts.__dict__ and line.opts.__dict__[dest]:
req_options[dest] = line.opts.__dict__[dest]
line_source = 'line {} of {}'.format(line.lineno, line.filename)
return ParsedRequirement(
requirement=line.requirement,
is_editable=line.is_editable,
comes_from=line_comes_from,
constraint=line.constraint,
options=req_options,
line_source=line_source,
)
def handle_option_line(
opts, # type: Values
filename, # type: str
lineno, # type: int
finder=None, # type: Optional[PackageFinder]
options=None, # type: Optional[optparse.Values]
session=None, # type: Optional[PipSession]
):
# type: (...) -> None
if options:
# percolate options upward
if opts.require_hashes:
options.require_hashes = opts.require_hashes
if opts.features_enabled:
options.features_enabled.extend(
f for f in opts.features_enabled
if f not in options.features_enabled
)
# set finder options
if finder:
find_links = finder.find_links
index_urls = finder.index_urls
if opts.index_url:
index_urls = [opts.index_url]
if opts.no_index is True:
index_urls = []
if opts.extra_index_urls:
index_urls.extend(opts.extra_index_urls)
if opts.find_links:
# FIXME: it would be nice to keep track of the source
# of the find_links: support a find-links local path
# relative to a requirements file.
value = opts.find_links[0]
req_dir = os.path.dirname(os.path.abspath(filename))
relative_to_reqs_file = os.path.join(req_dir, value)
if os.path.exists(relative_to_reqs_file):
value = relative_to_reqs_file
find_links.append(value)
search_scope = SearchScope(
find_links=find_links,
index_urls=index_urls,
)
finder.search_scope = search_scope
if opts.pre:
finder.set_allow_all_prereleases()
if opts.prefer_binary:
finder.set_prefer_binary()
if session:
for host in opts.trusted_hosts or []:
source = 'line {} of {}'.format(lineno, filename)
session.add_trusted_host(host, source=source)
def handle_line(
line, # type: ParsedLine
options=None, # type: Optional[optparse.Values]
finder=None, # type: Optional[PackageFinder]
session=None, # type: Optional[PipSession]
):
# type: (...) -> Optional[ParsedRequirement]
"""Handle a single parsed requirements line; This can result in
creating/yielding requirements, or updating the finder.
:param line: The parsed line to be processed.
:param options: CLI options.
:param finder: The finder - updated by non-requirement lines.
:param session: The session - updated by non-requirement lines.
Returns a ParsedRequirement object if the line is a requirement line,
otherwise returns None.
For lines that contain requirements, the only options that have an effect
are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
requirement. Other options from SUPPORTED_OPTIONS may be present, but are
ignored.
For lines that do not contain requirements, the only options that have an
effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
be present, but are ignored. These lines may contain multiple options
(although our docs imply only one is supported), and all our parsed and
affect the finder.
"""
if line.is_requirement:
parsed_req = handle_requirement_line(line, options)
return parsed_req
else:
handle_option_line(
line.opts,
line.filename,
line.lineno,
finder,
options,
session,
)
return None
class RequirementsFileParser(object):
def __init__(
self,
session, # type: PipSession
line_parser, # type: LineParser
comes_from, # type: Optional[str]
):
# type: (...) -> None
self._session = session
self._line_parser = line_parser
self._comes_from = comes_from
def parse(self, filename, constraint):
# type: (str, bool) -> Iterator[ParsedLine]
"""Parse a given file, yielding parsed lines.
"""
for line in self._parse_and_recurse(filename, constraint):
yield line
def _parse_and_recurse(self, filename, constraint):
# type: (str, bool) -> Iterator[ParsedLine]
for line in self._parse_file(filename, constraint):
if (
not line.is_requirement and
(line.opts.requirements or line.opts.constraints)
):
# parse a nested requirements file
if line.opts.requirements:
req_path = line.opts.requirements[0]
nested_constraint = False
else:
req_path = line.opts.constraints[0]
nested_constraint = True
# original file is over http
if SCHEME_RE.search(filename):
# do a url join so relative paths work
req_path = urllib_parse.urljoin(filename, req_path)
# original file and nested file are paths
elif not SCHEME_RE.search(req_path):
# do a join so relative paths work
req_path = os.path.join(
os.path.dirname(filename), req_path,
)
for inner_line in self._parse_and_recurse(
req_path, nested_constraint,
):
yield inner_line
else:
yield line
def _parse_file(self, filename, constraint):
# type: (str, bool) -> Iterator[ParsedLine]
_, content = get_file_content(
filename, self._session, comes_from=self._comes_from
)
lines_enum = preprocess(content)
for line_number, line in lines_enum:
try:
args_str, opts = self._line_parser(line)
except OptionParsingError as e:
# add offending line
msg = 'Invalid requirement: {}\n{}'.format(line, e.msg)
raise RequirementsFileParseError(msg)
yield ParsedLine(
filename,
line_number,
self._comes_from,
args_str,
opts,
constraint,
)
def get_line_parser(finder):
# type: (Optional[PackageFinder]) -> LineParser
def parse_line(line):
# type: (Text) -> Tuple[str, Values]
# Build new parser for each line since it accumulates appendable
# options.
parser = build_parser()
defaults = parser.get_default_values()
defaults.index_url = None
if finder:
defaults.format_control = finder.format_control
args_str, options_str = break_args_options(line)
# Prior to 2.7.3, shlex cannot deal with unicode entries
if sys.version_info < (2, 7, 3):
# https://github.com/python/mypy/issues/1174
options_str = options_str.encode('utf8') # type: ignore
# https://github.com/python/mypy/issues/1174
opts, _ = parser.parse_args(
shlex.split(options_str), defaults) # type: ignore
return args_str, opts
return parse_line
def break_args_options(line):
# type: (Text) -> Tuple[str, Text]
"""Break up the line into an args and options string. We only want to shlex
(and then optparse) the options, not the args. args can contain markers
which are corrupted by shlex.
"""
tokens = line.split(' ')
args = []
options = tokens[:]
for token in tokens:
if token.startswith('-') or token.startswith('--'):
break
else:
args.append(token)
options.pop(0)
return ' '.join(args), ' '.join(options) # type: ignore
class OptionParsingError(Exception):
def __init__(self, msg):
# type: (str) -> None
self.msg = msg
def build_parser():
# type: () -> optparse.OptionParser
"""
Return a parser for parsing requirement lines
"""
parser = optparse.OptionParser(add_help_option=False)
option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
for option_factory in option_factories:
option = option_factory()
parser.add_option(option)
# By default optparse sys.exits on parsing errors. We want to wrap
# that in our own exception.
def parser_exit(self, msg):
# type: (Any, str) -> NoReturn
raise OptionParsingError(msg)
# NOTE: mypy disallows assigning to a method
# https://github.com/python/mypy/issues/2427
parser.exit = parser_exit # type: ignore
return parser
def join_lines(lines_enum):
# type: (ReqFileLines) -> ReqFileLines
"""Joins a line ending in '\' with the previous line (except when following
comments). The joined line takes on the index of the first line.
"""
primary_line_number = None
new_line = [] # type: List[Text]
for line_number, line in lines_enum:
if not line.endswith('\\') or COMMENT_RE.match(line):
if COMMENT_RE.match(line):
# this ensures comments are always matched later
line = ' ' + line
if new_line:
new_line.append(line)
assert primary_line_number is not None
yield primary_line_number, ''.join(new_line)
new_line = []
else:
yield line_number, line
else:
if not new_line:
primary_line_number = line_number
new_line.append(line.strip('\\'))
# last line contains \
if new_line:
assert primary_line_number is not None
yield primary_line_number, ''.join(new_line)
# TODO: handle space after '\'.
def ignore_comments(lines_enum):
# type: (ReqFileLines) -> ReqFileLines
"""
Strips comments and filter empty lines.
"""
for line_number, line in lines_enum:
line = COMMENT_RE.sub('', line)
line = line.strip()
if line:
yield line_number, line
def expand_env_variables(lines_enum):
# type: (ReqFileLines) -> ReqFileLines
"""Replace all environment variables that can be retrieved via `os.getenv`.
The only allowed format for environment variables defined in the
requirement file is `${MY_VARIABLE_1}` to ensure two things:
1. Strings that contain a `$` aren't accidentally (partially) expanded.
2. Ensure consistency across platforms for requirement files.
These points are the result of a discussion on the `github pull
request #3514 <https://github.com/pypa/pip/pull/3514>`_.
Valid characters in variable names follow the `POSIX standard
<http://pubs.opengroup.org/onlinepubs/9699919799/>`_ and are limited
to uppercase letter, digits and the `_` (underscore).
"""
for line_number, line in lines_enum:
for env_var, var_name in ENV_VAR_RE.findall(line):
value = os.getenv(var_name)
if not value:
continue
line = line.replace(env_var, value)
yield line_number, line
def get_file_content(url, session, comes_from=None):
# type: (str, PipSession, Optional[str]) -> Tuple[str, Text]
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode.
Respects # -*- coding: declarations on the retrieved files.
:param url: File path or url.
:param session: PipSession instance.
:param comes_from: Origin description of requirements.
"""
scheme = get_url_scheme(url)
if scheme in ['http', 'https']:
# FIXME: catch some errors
resp = session.get(url)
raise_for_status(resp)
return resp.url, resp.text
elif scheme == 'file':
if comes_from and comes_from.startswith('http'):
raise InstallationError(
'Requirements file {} references URL {}, '
'which is local'.format(comes_from, url)
)
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib_parse.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
try:
with open(url, 'rb') as f:
content = auto_decode(f.read())
except IOError as exc:
raise InstallationError(
'Could not open requirements file: {}'.format(exc)
)
return url, content
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
| Django-locallibrary/env/Lib/site-packages/pip/_internal/req/req_file.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/req/req_file.py",
"repo_id": "Django-locallibrary",
"token_count": 8417
} | 6 |
from distutils.errors import DistutilsArgError
from distutils.fancy_getopt import FancyGetopt
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Dict, List
_options = [
("exec-prefix=", None, ""),
("home=", None, ""),
("install-base=", None, ""),
("install-data=", None, ""),
("install-headers=", None, ""),
("install-lib=", None, ""),
("install-platlib=", None, ""),
("install-purelib=", None, ""),
("install-scripts=", None, ""),
("prefix=", None, ""),
("root=", None, ""),
("user", None, ""),
]
# typeshed doesn't permit Tuple[str, None, str], see python/typeshed#3469.
_distutils_getopt = FancyGetopt(_options) # type: ignore
def parse_distutils_args(args):
# type: (List[str]) -> Dict[str, str]
"""Parse provided arguments, returning an object that has the
matched arguments.
Any unknown arguments are ignored.
"""
result = {}
for arg in args:
try:
_, match = _distutils_getopt.getopt(args=[arg])
except DistutilsArgError:
# We don't care about any other options, which here may be
# considered unrecognized since our option list is not
# exhaustive.
pass
else:
result.update(match.__dict__)
return result
| Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/distutils_args.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/distutils_args.py",
"repo_id": "Django-locallibrary",
"token_count": 539
} | 7 |
from __future__ import absolute_import
import errno
import itertools
import logging
import os.path
import tempfile
from contextlib import contextmanager
from pip._vendor.contextlib2 import ExitStack
from pip._vendor.six import ensure_text
from pip._internal.utils.misc import enum, rmtree
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Any, Dict, Iterator, Optional, TypeVar, Union
_T = TypeVar('_T', bound='TempDirectory')
logger = logging.getLogger(__name__)
# Kinds of temporary directories. Only needed for ones that are
# globally-managed.
tempdir_kinds = enum(
BUILD_ENV="build-env",
EPHEM_WHEEL_CACHE="ephem-wheel-cache",
REQ_BUILD="req-build",
)
_tempdir_manager = None # type: Optional[ExitStack]
@contextmanager
def global_tempdir_manager():
# type: () -> Iterator[None]
global _tempdir_manager
with ExitStack() as stack:
old_tempdir_manager, _tempdir_manager = _tempdir_manager, stack
try:
yield
finally:
_tempdir_manager = old_tempdir_manager
class TempDirectoryTypeRegistry(object):
"""Manages temp directory behavior
"""
def __init__(self):
# type: () -> None
self._should_delete = {} # type: Dict[str, bool]
def set_delete(self, kind, value):
# type: (str, bool) -> None
"""Indicate whether a TempDirectory of the given kind should be
auto-deleted.
"""
self._should_delete[kind] = value
def get_delete(self, kind):
# type: (str) -> bool
"""Get configured auto-delete flag for a given TempDirectory type,
default True.
"""
return self._should_delete.get(kind, True)
_tempdir_registry = None # type: Optional[TempDirectoryTypeRegistry]
@contextmanager
def tempdir_registry():
# type: () -> Iterator[TempDirectoryTypeRegistry]
"""Provides a scoped global tempdir registry that can be used to dictate
whether directories should be deleted.
"""
global _tempdir_registry
old_tempdir_registry = _tempdir_registry
_tempdir_registry = TempDirectoryTypeRegistry()
try:
yield _tempdir_registry
finally:
_tempdir_registry = old_tempdir_registry
class _Default(object):
pass
_default = _Default()
class TempDirectory(object):
"""Helper class that owns and cleans up a temporary directory.
This class can be used as a context manager or as an OO representation of a
temporary directory.
Attributes:
path
Location to the created temporary directory
delete
Whether the directory should be deleted when exiting
(when used as a contextmanager)
Methods:
cleanup()
Deletes the temporary directory
When used as a context manager, if the delete attribute is True, on
exiting the context the temporary directory is deleted.
"""
def __init__(
self,
path=None, # type: Optional[str]
delete=_default, # type: Union[bool, None, _Default]
kind="temp", # type: str
globally_managed=False, # type: bool
):
super(TempDirectory, self).__init__()
if delete is _default:
if path is not None:
# If we were given an explicit directory, resolve delete option
# now.
delete = False
else:
# Otherwise, we wait until cleanup and see what
# tempdir_registry says.
delete = None
if path is None:
path = self._create(kind)
self._path = path
self._deleted = False
self.delete = delete
self.kind = kind
if globally_managed:
assert _tempdir_manager is not None
_tempdir_manager.enter_context(self)
@property
def path(self):
# type: () -> str
assert not self._deleted, (
"Attempted to access deleted path: {}".format(self._path)
)
return self._path
def __repr__(self):
# type: () -> str
return "<{} {!r}>".format(self.__class__.__name__, self.path)
def __enter__(self):
# type: (_T) -> _T
return self
def __exit__(self, exc, value, tb):
# type: (Any, Any, Any) -> None
if self.delete is not None:
delete = self.delete
elif _tempdir_registry:
delete = _tempdir_registry.get_delete(self.kind)
else:
delete = True
if delete:
self.cleanup()
def _create(self, kind):
# type: (str) -> str
"""Create a temporary directory and store its path in self.path
"""
# We realpath here because some systems have their default tmpdir
# symlinked to another directory. This tends to confuse build
# scripts, so we canonicalize the path by traversing potential
# symlinks here.
path = os.path.realpath(
tempfile.mkdtemp(prefix="pip-{}-".format(kind))
)
logger.debug("Created temporary directory: %s", path)
return path
def cleanup(self):
# type: () -> None
"""Remove the temporary directory created and reset state
"""
self._deleted = True
if os.path.exists(self._path):
# Make sure to pass unicode on Python 2 to make the contents also
# use unicode, ensuring non-ASCII names and can be represented.
rmtree(ensure_text(self._path))
class AdjacentTempDirectory(TempDirectory):
"""Helper class that creates a temporary directory adjacent to a real one.
Attributes:
original
The original directory to create a temp directory for.
path
After calling create() or entering, contains the full
path to the temporary directory.
delete
Whether the directory should be deleted when exiting
(when used as a contextmanager)
"""
# The characters that may be used to name the temp directory
# We always prepend a ~ and then rotate through these until
# a usable name is found.
# pkg_resources raises a different error for .dist-info folder
# with leading '-' and invalid metadata
LEADING_CHARS = "-~.=%0123456789"
def __init__(self, original, delete=None):
# type: (str, Optional[bool]) -> None
self.original = original.rstrip('/\\')
super(AdjacentTempDirectory, self).__init__(delete=delete)
@classmethod
def _generate_names(cls, name):
# type: (str) -> Iterator[str]
"""Generates a series of temporary names.
The algorithm replaces the leading characters in the name
with ones that are valid filesystem characters, but are not
valid package names (for both Python and pip definitions of
package).
"""
for i in range(1, len(name)):
for candidate in itertools.combinations_with_replacement(
cls.LEADING_CHARS, i - 1):
new_name = '~' + ''.join(candidate) + name[i:]
if new_name != name:
yield new_name
# If we make it this far, we will have to make a longer name
for i in range(len(cls.LEADING_CHARS)):
for candidate in itertools.combinations_with_replacement(
cls.LEADING_CHARS, i):
new_name = '~' + ''.join(candidate) + name
if new_name != name:
yield new_name
def _create(self, kind):
# type: (str) -> str
root, name = os.path.split(self.original)
for candidate in self._generate_names(name):
path = os.path.join(root, candidate)
try:
os.mkdir(path)
except OSError as ex:
# Continue if the name exists already
if ex.errno != errno.EEXIST:
raise
else:
path = os.path.realpath(path)
break
else:
# Final fallback on the default behavior.
path = os.path.realpath(
tempfile.mkdtemp(prefix="pip-{}-".format(kind))
)
logger.debug("Created temporary directory: %s", path)
return path
| Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/temp_dir.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/temp_dir.py",
"repo_id": "Django-locallibrary",
"token_count": 3526
} | 8 |
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import logging
import os
import re
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import (
display_path,
is_console_interactive,
rmtree,
split_auth_from_netloc,
)
from pip._internal.utils.subprocess import make_command
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.vcs.versioncontrol import VersionControl, vcs
_svn_xml_url_re = re.compile('url="([^"]+)"')
_svn_rev_re = re.compile(r'committed-rev="(\d+)"')
_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"')
_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>')
if MYPY_CHECK_RUNNING:
from typing import Optional, Tuple
from pip._internal.utils.subprocess import CommandArgs
from pip._internal.utils.misc import HiddenText
from pip._internal.vcs.versioncontrol import AuthInfo, RevOptions
logger = logging.getLogger(__name__)
class Subversion(VersionControl):
name = 'svn'
dirname = '.svn'
repo_name = 'checkout'
schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn')
@classmethod
def should_add_vcs_url_prefix(cls, remote_url):
return True
@staticmethod
def get_base_rev_args(rev):
return ['-r', rev]
@classmethod
def get_revision(cls, location):
"""
Return the maximum revision for all files under a given location
"""
# Note: taken from setuptools.command.egg_info
revision = 0
for base, dirs, _ in os.walk(location):
if cls.dirname not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove(cls.dirname)
entries_fn = os.path.join(base, cls.dirname, 'entries')
if not os.path.exists(entries_fn):
# FIXME: should we warn?
continue
dirurl, localrev = cls._get_svn_url_rev(base)
if base == location:
base = dirurl + '/' # save the root url
elif not dirurl or not dirurl.startswith(base):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return revision
@classmethod
def get_netloc_and_auth(cls, netloc, scheme):
"""
This override allows the auth information to be passed to svn via the
--username and --password options instead of via the URL.
"""
if scheme == 'ssh':
# The --username and --password options can't be used for
# svn+ssh URLs, so keep the auth information in the URL.
return super(Subversion, cls).get_netloc_and_auth(netloc, scheme)
return split_auth_from_netloc(netloc)
@classmethod
def get_url_rev_and_auth(cls, url):
# type: (str) -> Tuple[str, Optional[str], AuthInfo]
# hotfix the URL scheme after removing svn+ from svn+ssh:// readd it
url, rev, user_pass = super(Subversion, cls).get_url_rev_and_auth(url)
if url.startswith('ssh://'):
url = 'svn+' + url
return url, rev, user_pass
@staticmethod
def make_rev_args(username, password):
# type: (Optional[str], Optional[HiddenText]) -> CommandArgs
extra_args = [] # type: CommandArgs
if username:
extra_args += ['--username', username]
if password:
extra_args += ['--password', password]
return extra_args
@classmethod
def get_remote_url(cls, location):
# In cases where the source is in a subdirectory, not alongside
# setup.py we have to look up in the location until we find a real
# setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without
# finding setup.py
logger.warning(
"Could not find setup.py for directory %s (tried all "
"parent directories)",
orig_location,
)
return None
return cls._get_svn_url_rev(location)[0]
@classmethod
def _get_svn_url_rev(cls, location):
from pip._internal.exceptions import SubProcessError
entries_path = os.path.join(location, cls.dirname, 'entries')
if os.path.exists(entries_path):
with open(entries_path) as f:
data = f.read()
else: # subversion >= 1.7 does not have the 'entries' file
data = ''
if (data.startswith('8') or
data.startswith('9') or
data.startswith('10')):
data = list(map(str.splitlines, data.split('\n\x0c\n')))
del data[0][0] # get rid of the '8'
url = data[0][3]
revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0]
elif data.startswith('<?xml'):
match = _svn_xml_url_re.search(data)
if not match:
raise ValueError(
'Badly formatted data: {data!r}'.format(**locals()))
url = match.group(1) # get repository URL
revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0]
else:
try:
# subversion >= 1.7
# Note that using get_remote_call_options is not necessary here
# because `svn info` is being run against a local directory.
# We don't need to worry about making sure interactive mode
# is being used to prompt for passwords, because passwords
# are only potentially needed for remote server requests.
xml = cls.run_command(
['info', '--xml', location],
)
url = _svn_info_xml_url_re.search(xml).group(1)
revs = [
int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)
]
except SubProcessError:
url, revs = None, []
if revs:
rev = max(revs)
else:
rev = 0
return url, rev
@classmethod
def is_commit_id_equal(cls, dest, name):
"""Always assume the versions don't match"""
return False
def __init__(self, use_interactive=None):
# type: (bool) -> None
if use_interactive is None:
use_interactive = is_console_interactive()
self.use_interactive = use_interactive
# This member is used to cache the fetched version of the current
# ``svn`` client.
# Special value definitions:
# None: Not evaluated yet.
# Empty tuple: Could not parse version.
self._vcs_version = None # type: Optional[Tuple[int, ...]]
super(Subversion, self).__init__()
def call_vcs_version(self):
# type: () -> Tuple[int, ...]
"""Query the version of the currently installed Subversion client.
:return: A tuple containing the parts of the version information or
``()`` if the version returned from ``svn`` could not be parsed.
:raises: BadCommand: If ``svn`` is not installed.
"""
# Example versions:
# svn, version 1.10.3 (r1842928)
# compiled Feb 25 2019, 14:20:39 on x86_64-apple-darwin17.0.0
# svn, version 1.7.14 (r1542130)
# compiled Mar 28 2018, 08:49:13 on x86_64-pc-linux-gnu
# svn, version 1.12.0-SlikSvn (SlikSvn/1.12.0)
# compiled May 28 2019, 13:44:56 on x86_64-microsoft-windows6.2
version_prefix = 'svn, version '
version = self.run_command(['--version'])
if not version.startswith(version_prefix):
return ()
version = version[len(version_prefix):].split()[0]
version_list = version.partition('-')[0].split('.')
try:
parsed_version = tuple(map(int, version_list))
except ValueError:
return ()
return parsed_version
def get_vcs_version(self):
# type: () -> Tuple[int, ...]
"""Return the version of the currently installed Subversion client.
If the version of the Subversion client has already been queried,
a cached value will be used.
:return: A tuple containing the parts of the version information or
``()`` if the version returned from ``svn`` could not be parsed.
:raises: BadCommand: If ``svn`` is not installed.
"""
if self._vcs_version is not None:
# Use cached version, if available.
# If parsing the version failed previously (empty tuple),
# do not attempt to parse it again.
return self._vcs_version
vcs_version = self.call_vcs_version()
self._vcs_version = vcs_version
return vcs_version
def get_remote_call_options(self):
# type: () -> CommandArgs
"""Return options to be used on calls to Subversion that contact the server.
These options are applicable for the following ``svn`` subcommands used
in this class.
- checkout
- export
- switch
- update
:return: A list of command line arguments to pass to ``svn``.
"""
if not self.use_interactive:
# --non-interactive switch is available since Subversion 0.14.4.
# Subversion < 1.8 runs in interactive mode by default.
return ['--non-interactive']
svn_version = self.get_vcs_version()
# By default, Subversion >= 1.8 runs in non-interactive mode if
# stdin is not a TTY. Since that is how pip invokes SVN, in
# call_subprocess(), pip must pass --force-interactive to ensure
# the user can be prompted for a password, if required.
# SVN added the --force-interactive option in SVN 1.8. Since
# e.g. RHEL/CentOS 7, which is supported until 2024, ships with
# SVN 1.7, pip should continue to support SVN 1.7. Therefore, pip
# can't safely add the option if the SVN version is < 1.8 (or unknown).
if svn_version >= (1, 8):
return ['--force-interactive']
return []
def export(self, location, url):
# type: (str, HiddenText) -> None
"""Export the svn repository at the url to the destination location"""
url, rev_options = self.get_url_rev_options(url)
logger.info('Exporting svn repository %s to %s', url, location)
with indent_log():
if os.path.exists(location):
# Subversion doesn't like to check out over an existing
# directory --force fixes this, but was only added in svn 1.5
rmtree(location)
cmd_args = make_command(
'export', self.get_remote_call_options(),
rev_options.to_args(), url, location,
)
self.run_command(cmd_args)
def fetch_new(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
rev_display = rev_options.to_display()
logger.info(
'Checking out %s%s to %s',
url,
rev_display,
display_path(dest),
)
cmd_args = make_command(
'checkout', '-q', self.get_remote_call_options(),
rev_options.to_args(), url, dest,
)
self.run_command(cmd_args)
def switch(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
cmd_args = make_command(
'switch', self.get_remote_call_options(), rev_options.to_args(),
url, dest,
)
self.run_command(cmd_args)
def update(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
cmd_args = make_command(
'update', self.get_remote_call_options(), rev_options.to_args(),
dest,
)
self.run_command(cmd_args)
vcs.register(Subversion)
| Django-locallibrary/env/Lib/site-packages/pip/_internal/vcs/subversion.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/vcs/subversion.py",
"repo_id": "Django-locallibrary",
"token_count": 5596
} | 9 |
from __future__ import division
from datetime import datetime
from pip._vendor.cachecontrol.cache import BaseCache
class RedisCache(BaseCache):
def __init__(self, conn):
self.conn = conn
def get(self, key):
return self.conn.get(key)
def set(self, key, value, expires=None):
if not expires:
self.conn.set(key, value)
else:
expires = expires - datetime.utcnow()
self.conn.setex(key, int(expires.total_seconds()), value)
def delete(self, key):
self.conn.delete(key)
def clear(self):
"""Helper for clearing all the keys in a database. Use with
caution!"""
for key in self.conn.keys():
self.conn.delete(key)
def close(self):
"""Redis uses connection pooling, no need to close the connection."""
pass
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py",
"repo_id": "Django-locallibrary",
"token_count": 357
} | 10 |
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# KOI8-R language model
# Character Mapping Table:
KOI8R_char_to_order_map = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_char_to_order_map = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_char_to_order_map = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
macCyrillic_char_to_order_map = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
)
IBM855_char_to_order_map = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
)
IBM866_char_to_order_map = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 97.6601%
# first 1024 sequences: 2.3389%
# rest sequences: 0.1237%
# negative sequences: 0.0009%
RussianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
)
Koi8rModel = {
'char_to_order_map': KOI8R_char_to_order_map,
'precedence_matrix': RussianLangModel,
'typical_positive_ratio': 0.976601,
'keep_english_letter': False,
'charset_name': "KOI8-R",
'language': 'Russian',
}
Win1251CyrillicModel = {
'char_to_order_map': win1251_char_to_order_map,
'precedence_matrix': RussianLangModel,
'typical_positive_ratio': 0.976601,
'keep_english_letter': False,
'charset_name': "windows-1251",
'language': 'Russian',
}
Latin5CyrillicModel = {
'char_to_order_map': latin5_char_to_order_map,
'precedence_matrix': RussianLangModel,
'typical_positive_ratio': 0.976601,
'keep_english_letter': False,
'charset_name': "ISO-8859-5",
'language': 'Russian',
}
MacCyrillicModel = {
'char_to_order_map': macCyrillic_char_to_order_map,
'precedence_matrix': RussianLangModel,
'typical_positive_ratio': 0.976601,
'keep_english_letter': False,
'charset_name': "MacCyrillic",
'language': 'Russian',
}
Ibm866Model = {
'char_to_order_map': IBM866_char_to_order_map,
'precedence_matrix': RussianLangModel,
'typical_positive_ratio': 0.976601,
'keep_english_letter': False,
'charset_name': "IBM866",
'language': 'Russian',
}
Ibm855Model = {
'char_to_order_map': IBM855_char_to_order_map,
'precedence_matrix': RussianLangModel,
'typical_positive_ratio': 0.976601,
'keep_english_letter': False,
'charset_name': "IBM855",
'language': 'Russian',
}
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/langcyrillicmodel.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/langcyrillicmodel.py",
"repo_id": "Django-locallibrary",
"token_count": 12803
} | 11 |
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
from .initialise import init, deinit, reinit, colorama_text
from .ansi import Fore, Back, Style, Cursor
from .ansitowin32 import AnsiToWin32
__version__ = '0.4.3'
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/colorama/__init__.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/colorama/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 80
} | 12 |
version = (1, 0, 0)
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/msgpack/_version.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/msgpack/_version.py",
"repo_id": "Django-locallibrary",
"token_count": 10
} | 13 |
# -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import os.path
import socket
from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url
from pip._vendor.urllib3.response import HTTPResponse
from pip._vendor.urllib3.util import parse_url
from pip._vendor.urllib3.util import Timeout as TimeoutSauce
from pip._vendor.urllib3.util.retry import Retry
from pip._vendor.urllib3.exceptions import ClosedPoolError
from pip._vendor.urllib3.exceptions import ConnectTimeoutError
from pip._vendor.urllib3.exceptions import HTTPError as _HTTPError
from pip._vendor.urllib3.exceptions import MaxRetryError
from pip._vendor.urllib3.exceptions import NewConnectionError
from pip._vendor.urllib3.exceptions import ProxyError as _ProxyError
from pip._vendor.urllib3.exceptions import ProtocolError
from pip._vendor.urllib3.exceptions import ReadTimeoutError
from pip._vendor.urllib3.exceptions import SSLError as _SSLError
from pip._vendor.urllib3.exceptions import ResponseError
from pip._vendor.urllib3.exceptions import LocationValueError
from .models import Response
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths,
get_encoding_from_headers, prepend_scheme_if_needed,
get_auth_from_url, urldefragauth, select_proxy)
from .structures import CaseInsensitiveDict
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError, InvalidSchema, InvalidProxyURL,
InvalidURL)
from .auth import _basic_auth_str
try:
from pip._vendor.urllib3.contrib.socks import SOCKSProxyManager
except ImportError:
def SOCKSProxyManager(*args, **kwargs):
raise InvalidSchema("Missing dependencies for SOCKS support.")
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self, request, stream=False, timeout=None, verify=True,
cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
raise NotImplementedError
def close(self):
"""Cleans up adapter specific items."""
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
:rtype: urllib3.ProxyManager
"""
if proxy in self.proxy_manager:
manager = self.proxy_manager[proxy]
elif proxy.lower().startswith('socks'):
username, password = get_auth_from_url(proxy)
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
proxy,
username=username,
password=password,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs
)
else:
proxy_headers = self.proxy_headers(proxy)
manager = self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return manager
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)
if not cert_loc or not os.path.exists(cert_loc):
raise IOError("Could not find a suitable TLS CA certificate bundle, "
"invalid path: {}".format(cert_loc))
conn.cert_reqs = 'CERT_REQUIRED'
if not os.path.isdir(cert_loc):
conn.ca_certs = cert_loc
else:
conn.ca_cert_dir = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
conn.ca_cert_dir = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
conn.key_file = None
if conn.cert_file and not os.path.exists(conn.cert_file):
raise IOError("Could not find the TLS certificate file, "
"invalid path: {}".format(conn.cert_file))
if conn.key_file and not os.path.exists(conn.key_file):
raise IOError("Could not find the TLS key file, "
"invalid path: {}".format(conn.key_file))
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
:rtype: requests.Response
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_url = parse_url(proxy)
if not proxy_url.host:
raise InvalidProxyURL("Please check proxy URL. It is malformed"
" and could be missing the host.")
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this closes the PoolManager and any active ProxyManager,
which closes any pooled connections.
"""
self.poolmanager.clear()
for proxy in self.proxy_manager.values():
proxy.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
:rtype: str
"""
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
is_proxied_http_request = (proxy and scheme != 'https')
using_socks_proxy = False
if proxy:
proxy_scheme = urlparse(proxy).scheme.lower()
using_socks_proxy = proxy_scheme.startswith('socks')
url = request.path_url
if is_proxied_http_request and not using_socks_proxy:
url = urldefragauth(request.url)
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The url of the proxy being used for this request.
:rtype: dict
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple or urllib3 Timeout object
:param verify: (optional) Either a boolean, in which case it controls whether
we verify the server's TLS certificate, or a string, in which case it
must be a path to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
:rtype: requests.Response
"""
try:
conn = self.get_connection(request.url, proxies)
except LocationValueError as e:
raise InvalidURL(e, request=request)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
elif isinstance(timeout, TimeoutSauce):
pass
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
# Receive the response from the server
try:
# For Python 2.7, use buffering of HTTP responses
r = low_conn.getresponse(buffering=True)
except TypeError:
# For compatibility with Python 3.3+
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
# TODO: Remove this in 3.0.0: see #2811
if not isinstance(e.reason, NewConnectionError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
if isinstance(e.reason, _ProxyError):
raise ProxyError(e, request=request)
if isinstance(e.reason, _SSLError):
# This branch is for urllib3 v1.22 and later.
raise SSLError(e, request=request)
raise ConnectionError(e, request=request)
except ClosedPoolError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
# This branch is for urllib3 versions earlier than v1.22
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/requests/adapters.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/requests/adapters.py",
"repo_id": "Django-locallibrary",
"token_count": 9374
} | 14 |
from __future__ import absolute_import
from . import ssl_match_hostname
__all__ = ("ssl_match_hostname",)
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/packages/__init__.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/packages/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 36
} | 15 |
from __future__ import absolute_import
# The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
from socket import _GLOBAL_DEFAULT_TIMEOUT
import time
from ..exceptions import TimeoutStateError
# A sentinel value to indicate that no timeout was specified by the user in
# urllib3
_Default = object()
# Use time.monotonic if available.
current_time = getattr(time, "monotonic", time.time)
class Timeout(object):
""" Timeout configuration.
Timeouts can be defined as a default for a pool::
timeout = Timeout(connect=2.0, read=7.0)
http = PoolManager(timeout=timeout)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``::
no_timeout = Timeout(connect=None, read=None)
response = http.request('GET', 'http://example.com/, timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
:param connect:
The maximum amount of time (in seconds) to wait for a connection
attempt to a server to succeed. Omitting the parameter will default the
connect timeout to the system default, probably `the global default
timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time (in seconds) to wait between consecutive
read operations for a response from the server. Omitting the parameter
will default the read timeout to the system default, probably `the
global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, "connect")
self._read = self._validate_timeout(read, "read")
self.total = self._validate_timeout(total, "total")
self._start_connect = None
def __repr__(self):
return "%s(connect=%r, read=%r, total=%r)" % (
type(self).__name__,
self._connect,
self._read,
self.total,
)
# __str__ provided for backwards compatibility
__str__ = __repr__
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If it is a numeric value less than or equal to
zero, or the type is not an integer, float, or None.
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
if isinstance(value, bool):
raise ValueError(
"Timeout cannot be a boolean value. It must "
"be an int, float or None."
)
try:
float(value)
except (TypeError, ValueError):
raise ValueError(
"Timeout value %s was %s, but it must be an "
"int, float or None." % (name, value)
)
try:
if value <= 0:
raise ValueError(
"Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than or equal to 0." % (name, value)
)
except TypeError:
# Python 3
raise ValueError(
"Timeout value %s was %s, but it must be an "
"int, float or None." % (name, value)
)
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, sentinel default object, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read, total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time in seconds.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError(
"Can't get connect duration for timer that has not started."
)
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (
self.total is not None
and self.total is not self.DEFAULT_TIMEOUT
and self._read is not None
and self._read is not self.DEFAULT_TIMEOUT
):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(), self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/util/timeout.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/util/timeout.py",
"repo_id": "Django-locallibrary",
"token_count": 3799
} | 16 |
Metadata-Version: 2.1
Name: setuptools
Version: 49.2.1
Summary: Easily download, build, install, upgrade, and uninstall Python packages
Home-page: https://github.com/pypa/setuptools
Author: Python Packaging Authority
Author-email: [email protected]
License: UNKNOWN
Project-URL: Documentation, https://setuptools.readthedocs.io/
Keywords: CPAN PyPI distutils eggs package management
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: MIT License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3 :: Only
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: Topic :: System :: Archiving :: Packaging
Classifier: Topic :: System :: Systems Administration
Classifier: Topic :: Utilities
Requires-Python: >=3.5
Description-Content-Type: text/x-rst; charset=UTF-8
Provides-Extra: certs
Requires-Dist: certifi (==2016.9.26) ; extra == 'certs'
Provides-Extra: docs
Requires-Dist: sphinx ; extra == 'docs'
Requires-Dist: jaraco.packaging (>=6.1) ; extra == 'docs'
Requires-Dist: rst.linker (>=1.9) ; extra == 'docs'
Requires-Dist: pygments-github-lexers (==0.0.5) ; extra == 'docs'
Provides-Extra: ssl
Requires-Dist: wincertstore (==0.2) ; (sys_platform == "win32") and extra == 'ssl'
Provides-Extra: tests
Requires-Dist: mock ; extra == 'tests'
Requires-Dist: pytest-flake8 ; extra == 'tests'
Requires-Dist: virtualenv (>=13.0.0) ; extra == 'tests'
Requires-Dist: pytest-virtualenv (>=1.2.7) ; extra == 'tests'
Requires-Dist: pytest (>=3.7) ; extra == 'tests'
Requires-Dist: wheel ; extra == 'tests'
Requires-Dist: coverage (>=4.5.1) ; extra == 'tests'
Requires-Dist: pytest-cov (>=2.5.1) ; extra == 'tests'
Requires-Dist: pip (>=19.1) ; extra == 'tests'
Requires-Dist: futures ; (python_version == "2.7") and extra == 'tests'
Requires-Dist: flake8-2020 ; (python_version >= "3.6") and extra == 'tests'
Requires-Dist: paver ; (python_version >= "3.6") and extra == 'tests'
.. image:: https://img.shields.io/pypi/v/setuptools.svg
:target: `PyPI link`_
.. image:: https://img.shields.io/pypi/pyversions/setuptools.svg
:target: `PyPI link`_
.. _PyPI link: https://pypi.org/project/setuptools
.. image:: https://dev.azure.com/jaraco/setuptools/_apis/build/status/pypa.setuptools?branchName=master
:target: https://dev.azure.com/jaraco/setuptools/_build/latest?definitionId=1&branchName=master
.. image:: https://img.shields.io/travis/pypa/setuptools/master.svg?label=Linux%20CI&logo=travis&logoColor=white
:target: https://travis-ci.org/pypa/setuptools
.. image:: https://img.shields.io/appveyor/ci/pypa/setuptools/master.svg?label=Windows%20CI&logo=appveyor&logoColor=white
:target: https://ci.appveyor.com/project/pypa/setuptools/branch/master
.. image:: https://img.shields.io/readthedocs/setuptools/latest.svg
:target: https://setuptools.readthedocs.io
.. image:: https://img.shields.io/codecov/c/github/pypa/setuptools/master.svg?logo=codecov&logoColor=white
:target: https://codecov.io/gh/pypa/setuptools
.. image:: https://tidelift.com/badges/github/pypa/setuptools?style=flat
:target: https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=readme
See the `Installation Instructions
<https://packaging.python.org/installing/>`_ in the Python Packaging
User's Guide for instructions on installing, upgrading, and uninstalling
Setuptools.
Questions and comments should be directed to the `distutils-sig
mailing list <http://mail.python.org/pipermail/distutils-sig/>`_.
Bug reports and especially tested patches may be
submitted directly to the `bug tracker
<https://github.com/pypa/setuptools/issues>`_.
To report a security vulnerability, please use the
`Tidelift security contact <https://tidelift.com/security>`_.
Tidelift will coordinate the fix and disclosure.
For Enterprise
==============
Available as part of the Tidelift Subscription.
Setuptools and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use.
`Learn more <https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=referral&utm_campaign=github>`_.
Code of Conduct
===============
Everyone interacting in the setuptools project's codebases, issue trackers,
chat rooms, and mailing lists is expected to follow the
`PSF Code of Conduct <https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md>`_.
| Django-locallibrary/env/Lib/site-packages/setuptools-49.2.1.dist-info/METADATA/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools-49.2.1.dist-info/METADATA",
"repo_id": "Django-locallibrary",
"token_count": 1592
} | 17 |
"""distutils.command.install_data
Implements the Distutils 'install_data' command, for installing
platform-independent data files."""
# contributed by Bastian Kleineidam
import os
from distutils.core import Command
from distutils.util import change_root, convert_path
class install_data(Command):
description = "install data files"
user_options = [
('install-dir=', 'd',
"base directory for installing data files "
"(default: installation base dir)"),
('root=', None,
"install everything relative to this alternate root directory"),
('force', 'f', "force installation (overwrite existing files)"),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.outfiles = []
self.root = None
self.force = 0
self.data_files = self.distribution.data_files
self.warn_dir = 1
def finalize_options(self):
self.set_undefined_options('install',
('install_data', 'install_dir'),
('root', 'root'),
('force', 'force'),
)
def run(self):
self.mkpath(self.install_dir)
for f in self.data_files:
if isinstance(f, str):
# it's a simple file, so copy it
f = convert_path(f)
if self.warn_dir:
self.warn("setup script did not provide a directory for "
"'%s' -- installing right in '%s'" %
(f, self.install_dir))
(out, _) = self.copy_file(f, self.install_dir)
self.outfiles.append(out)
else:
# it's a tuple with path to install to and a list of files
dir = convert_path(f[0])
if not os.path.isabs(dir):
dir = os.path.join(self.install_dir, dir)
elif self.root:
dir = change_root(self.root, dir)
self.mkpath(dir)
if f[1] == []:
# If there are no files listed, the user must be
# trying to create an empty directory, so add the
# directory to the list of output files.
self.outfiles.append(dir)
else:
# Copy files, adding them to the list of output files.
for data in f[1]:
data = convert_path(data)
(out, _) = self.copy_file(data, dir)
self.outfiles.append(out)
def get_inputs(self):
return self.data_files or []
def get_outputs(self):
return self.outfiles
| Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/install_data.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/install_data.py",
"repo_id": "Django-locallibrary",
"token_count": 1431
} | 18 |
"""distutils.extension
Provides the Extension class, used to describe C/C++ extension
modules in setup scripts."""
import os
import warnings
# This class is really only used by the "build_ext" command, so it might
# make sense to put it in distutils.command.build_ext. However, that
# module is already big enough, and I want to make this class a bit more
# complex to simplify some common cases ("foo" module in "foo.c") and do
# better error-checking ("foo.c" actually exists).
#
# Also, putting this in build_ext.py means every setup script would have to
# import that large-ish module (indirectly, through distutils.core) in
# order to do anything.
class Extension:
"""Just a collection of attributes that describes an extension
module and everything needed to build it (hopefully in a portable
way, but there are hooks that let you be as unportable as you need).
Instance attributes:
name : string
the full name of the extension, including any packages -- ie.
*not* a filename or pathname, but Python dotted name
sources : [string]
list of source filenames, relative to the distribution root
(where the setup script lives), in Unix form (slash-separated)
for portability. Source files may be C, C++, SWIG (.i),
platform-specific resource files, or whatever else is recognized
by the "build_ext" command as source for a Python extension.
include_dirs : [string]
list of directories to search for C/C++ header files (in Unix
form for portability)
define_macros : [(name : string, value : string|None)]
list of macros to define; each macro is defined using a 2-tuple,
where 'value' is either the string to define it to or None to
define it without a particular value (equivalent of "#define
FOO" in source or -DFOO on Unix C compiler command line)
undef_macros : [string]
list of macros to undefine explicitly
library_dirs : [string]
list of directories to search for C/C++ libraries at link time
libraries : [string]
list of library names (not filenames or paths) to link against
runtime_library_dirs : [string]
list of directories to search for C/C++ libraries at run time
(for shared extensions, this is when the extension is loaded)
extra_objects : [string]
list of extra files to link with (eg. object files not implied
by 'sources', static library that must be explicitly specified,
binary resource files, etc.)
extra_compile_args : [string]
any extra platform- and compiler-specific information to use
when compiling the source files in 'sources'. For platforms and
compilers where "command line" makes sense, this is typically a
list of command-line arguments, but for other platforms it could
be anything.
extra_link_args : [string]
any extra platform- and compiler-specific information to use
when linking object files together to create the extension (or
to create a new static Python interpreter). Similar
interpretation as for 'extra_compile_args'.
export_symbols : [string]
list of symbols to be exported from a shared extension. Not
used on all platforms, and not generally necessary for Python
extensions, which typically export exactly one symbol: "init" +
extension_name.
swig_opts : [string]
any extra options to pass to SWIG if a source file has the .i
extension.
depends : [string]
list of files that the extension depends on
language : string
extension language (i.e. "c", "c++", "objc"). Will be detected
from the source extensions if not provided.
optional : boolean
specifies that a build failure in the extension should not abort the
build process, but simply not install the failing extension.
"""
# When adding arguments to this constructor, be sure to update
# setup_keywords in core.py.
def __init__(self, name, sources,
include_dirs=None,
define_macros=None,
undef_macros=None,
library_dirs=None,
libraries=None,
runtime_library_dirs=None,
extra_objects=None,
extra_compile_args=None,
extra_link_args=None,
export_symbols=None,
swig_opts = None,
depends=None,
language=None,
optional=None,
**kw # To catch unknown keywords
):
if not isinstance(name, str):
raise AssertionError("'name' must be a string")
if not (isinstance(sources, list) and
all(isinstance(v, str) for v in sources)):
raise AssertionError("'sources' must be a list of strings")
self.name = name
self.sources = sources
self.include_dirs = include_dirs or []
self.define_macros = define_macros or []
self.undef_macros = undef_macros or []
self.library_dirs = library_dirs or []
self.libraries = libraries or []
self.runtime_library_dirs = runtime_library_dirs or []
self.extra_objects = extra_objects or []
self.extra_compile_args = extra_compile_args or []
self.extra_link_args = extra_link_args or []
self.export_symbols = export_symbols or []
self.swig_opts = swig_opts or []
self.depends = depends or []
self.language = language
self.optional = optional
# If there are unknown keyword options, warn about them
if len(kw) > 0:
options = [repr(option) for option in kw]
options = ', '.join(sorted(options))
msg = "Unknown Extension options: %s" % options
warnings.warn(msg)
def __repr__(self):
return '<%s.%s(%r) at %#x>' % (
self.__class__.__module__,
self.__class__.__qualname__,
self.name,
id(self))
def read_setup_file(filename):
"""Reads a Setup file and returns Extension instances."""
from distutils.sysconfig import (parse_makefile, expand_makefile_vars,
_variable_rx)
from distutils.text_file import TextFile
from distutils.util import split_quoted
# First pass over the file to gather "VAR = VALUE" assignments.
vars = parse_makefile(filename)
# Second pass to gobble up the real content: lines of the form
# <module> ... [<sourcefile> ...] [<cpparg> ...] [<library> ...]
file = TextFile(filename,
strip_comments=1, skip_blanks=1, join_lines=1,
lstrip_ws=1, rstrip_ws=1)
try:
extensions = []
while True:
line = file.readline()
if line is None: # eof
break
if _variable_rx.match(line): # VAR=VALUE, handled in first pass
continue
if line[0] == line[-1] == "*":
file.warn("'%s' lines not handled yet" % line)
continue
line = expand_makefile_vars(line, vars)
words = split_quoted(line)
# NB. this parses a slightly different syntax than the old
# makesetup script: here, there must be exactly one extension per
# line, and it must be the first word of the line. I have no idea
# why the old syntax supported multiple extensions per line, as
# they all wind up being the same.
module = words[0]
ext = Extension(module, [])
append_next_word = None
for word in words[1:]:
if append_next_word is not None:
append_next_word.append(word)
append_next_word = None
continue
suffix = os.path.splitext(word)[1]
switch = word[0:2] ; value = word[2:]
if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"):
# hmm, should we do something about C vs. C++ sources?
# or leave it up to the CCompiler implementation to
# worry about?
ext.sources.append(word)
elif switch == "-I":
ext.include_dirs.append(value)
elif switch == "-D":
equals = value.find("=")
if equals == -1: # bare "-DFOO" -- no value
ext.define_macros.append((value, None))
else: # "-DFOO=blah"
ext.define_macros.append((value[0:equals],
value[equals+2:]))
elif switch == "-U":
ext.undef_macros.append(value)
elif switch == "-C": # only here 'cause makesetup has it!
ext.extra_compile_args.append(word)
elif switch == "-l":
ext.libraries.append(value)
elif switch == "-L":
ext.library_dirs.append(value)
elif switch == "-R":
ext.runtime_library_dirs.append(value)
elif word == "-rpath":
append_next_word = ext.runtime_library_dirs
elif word == "-Xlinker":
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif suffix in (".a", ".so", ".sl", ".o", ".dylib"):
# NB. a really faithful emulation of makesetup would
# append a .o file to extra_objects only if it
# had a slash in it; otherwise, it would s/.o/.c/
# and append it to sources. Hmmmm.
ext.extra_objects.append(word)
else:
file.warn("unrecognized argument '%s'" % word)
extensions.append(ext)
finally:
file.close()
return extensions
| Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/extension.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/extension.py",
"repo_id": "Django-locallibrary",
"token_count": 4727
} | 19 |
__all__ = [
'alias', 'bdist_egg', 'bdist_rpm', 'build_ext', 'build_py', 'develop',
'easy_install', 'egg_info', 'install', 'install_lib', 'rotate', 'saveopts',
'sdist', 'setopt', 'test', 'install_egg_info', 'install_scripts',
'bdist_wininst', 'upload_docs', 'build_clib', 'dist_info',
]
from distutils.command.bdist import bdist
import sys
from setuptools.command import install_scripts
if 'egg' not in bdist.format_commands:
bdist.format_command['egg'] = ('bdist_egg', "Python .egg file")
bdist.format_commands.append('egg')
del bdist, sys
| Django-locallibrary/env/Lib/site-packages/setuptools/command/__init__.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/command/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 218
} | 20 |
from glob import glob
from distutils.util import convert_path
import distutils.command.build_py as orig
import os
import fnmatch
import textwrap
import io
import distutils.errors
import itertools
import stat
from setuptools.extern import six
from setuptools.extern.six.moves import map, filter, filterfalse
try:
from setuptools.lib2to3_ex import Mixin2to3
except ImportError:
class Mixin2to3:
def run_2to3(self, files, doctests=True):
"do nothing"
def make_writable(target):
os.chmod(target, os.stat(target).st_mode | stat.S_IWRITE)
class build_py(orig.build_py, Mixin2to3):
"""Enhanced 'build_py' command that includes data files with packages
The data files are specified via a 'package_data' argument to 'setup()'.
See 'setuptools.dist.Distribution' for more details.
Also, this version of the 'build_py' command allows you to specify both
'py_modules' and 'packages' in the same setup operation.
"""
def finalize_options(self):
orig.build_py.finalize_options(self)
self.package_data = self.distribution.package_data
self.exclude_package_data = (self.distribution.exclude_package_data or
{})
if 'data_files' in self.__dict__:
del self.__dict__['data_files']
self.__updated_files = []
self.__doctests_2to3 = []
def run(self):
"""Build modules, packages, and copy data files to build directory"""
if not self.py_modules and not self.packages:
return
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.run_2to3(self.__updated_files, False)
self.run_2to3(self.__updated_files, True)
self.run_2to3(self.__doctests_2to3, True)
# Only compile actual .py files, using our base class' idea of what our
# output files are.
self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
def __getattr__(self, attr):
"lazily compute data files"
if attr == 'data_files':
self.data_files = self._get_data_files()
return self.data_files
return orig.build_py.__getattr__(self, attr)
def build_module(self, module, module_file, package):
if six.PY2 and isinstance(package, six.string_types):
# avoid errors on Python 2 when unicode is passed (#190)
package = package.split('.')
outfile, copied = orig.build_py.build_module(self, module, module_file,
package)
if copied:
self.__updated_files.append(outfile)
return outfile, copied
def _get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
self.analyze_manifest()
return list(map(self._get_pkg_data_files, self.packages or ()))
def _get_pkg_data_files(self, package):
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Strip directory from globbed filenames
filenames = [
os.path.relpath(file, src_dir)
for file in self.find_data_files(package, src_dir)
]
return package, src_dir, build_dir, filenames
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
patterns = self._get_platform_patterns(
self.package_data,
package,
src_dir,
)
globs_expanded = map(glob, patterns)
# flatten the expanded globs into an iterable of matches
globs_matches = itertools.chain.from_iterable(globs_expanded)
glob_files = filter(os.path.isfile, globs_matches)
files = itertools.chain(
self.manifest_files.get(package, []),
glob_files,
)
return self.exclude_data_files(package, src_dir, files)
def build_package_data(self):
"""Copy data files into build directory"""
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
srcfile = os.path.join(src_dir, filename)
outf, copied = self.copy_file(srcfile, target)
make_writable(target)
srcfile = os.path.abspath(srcfile)
if (copied and
srcfile in self.distribution.convert_2to3_doctests):
self.__doctests_2to3.append(outf)
def analyze_manifest(self):
self.manifest_files = mf = {}
if not self.distribution.include_package_data:
return
src_dirs = {}
for package in self.packages or ():
# Locate package source directory
src_dirs[assert_relative(self.get_package_dir(package))] = package
self.run_command('egg_info')
ei_cmd = self.get_finalized_command('egg_info')
for path in ei_cmd.filelist.files:
d, f = os.path.split(assert_relative(path))
prev = None
oldf = f
while d and d != prev and d not in src_dirs:
prev = d
d, df = os.path.split(d)
f = os.path.join(df, f)
if d in src_dirs:
if path.endswith('.py') and f == oldf:
continue # it's a module, not data
mf.setdefault(src_dirs[d], []).append(path)
def get_data_files(self):
pass # Lazily compute data files in _get_data_files() function.
def check_package(self, package, package_dir):
"""Check namespace packages' __init__ for declare_namespace"""
try:
return self.packages_checked[package]
except KeyError:
pass
init_py = orig.build_py.check_package(self, package, package_dir)
self.packages_checked[package] = init_py
if not init_py or not self.distribution.namespace_packages:
return init_py
for pkg in self.distribution.namespace_packages:
if pkg == package or pkg.startswith(package + '.'):
break
else:
return init_py
with io.open(init_py, 'rb') as f:
contents = f.read()
if b'declare_namespace' not in contents:
raise distutils.errors.DistutilsError(
"Namespace package problem: %s is a namespace package, but "
"its\n__init__.py does not call declare_namespace()! Please "
'fix it.\n(See the setuptools manual under '
'"Namespace Packages" for details.)\n"' % (package,)
)
return init_py
def initialize_options(self):
self.packages_checked = {}
orig.build_py.initialize_options(self)
def get_package_dir(self, package):
res = orig.build_py.get_package_dir(self, package)
if self.distribution.src_root is not None:
return os.path.join(self.distribution.src_root, res)
return res
def exclude_data_files(self, package, src_dir, files):
"""Filter filenames for package's data files in 'src_dir'"""
files = list(files)
patterns = self._get_platform_patterns(
self.exclude_package_data,
package,
src_dir,
)
match_groups = (
fnmatch.filter(files, pattern)
for pattern in patterns
)
# flatten the groups of matches into an iterable of matches
matches = itertools.chain.from_iterable(match_groups)
bad = set(matches)
keepers = (
fn
for fn in files
if fn not in bad
)
# ditch dupes
return list(_unique_everseen(keepers))
@staticmethod
def _get_platform_patterns(spec, package, src_dir):
"""
yield platform-specific path patterns (suitable for glob
or fn_match) from a glob-based spec (such as
self.package_data or self.exclude_package_data)
matching package in src_dir.
"""
raw_patterns = itertools.chain(
spec.get('', []),
spec.get(package, []),
)
return (
# Each pattern has to be converted to a platform-specific path
os.path.join(src_dir, convert_path(pattern))
for pattern in raw_patterns
)
# from Python docs
def _unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def assert_relative(path):
if not os.path.isabs(path):
return path
from distutils.errors import DistutilsSetupError
msg = textwrap.dedent("""
Error: setup script specifies an absolute path:
%s
setup() arguments must *always* be /-separated paths relative to the
setup.py directory, *never* absolute paths.
""").lstrip() % path
raise DistutilsSetupError(msg)
| Django-locallibrary/env/Lib/site-packages/setuptools/command/build_py.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/command/build_py.py",
"repo_id": "Django-locallibrary",
"token_count": 4414
} | 21 |
import os
import operator
import sys
import contextlib
import itertools
import unittest
from distutils.errors import DistutilsError, DistutilsOptionError
from distutils import log
from unittest import TestLoader
from setuptools.extern import six
from setuptools.extern.six.moves import map, filter
from pkg_resources import (resource_listdir, resource_exists, normalize_path,
working_set, _namespace_packages, evaluate_marker,
add_activation_listener, require, EntryPoint)
from setuptools import Command
from .build_py import _unique_everseen
__metaclass__ = type
class ScanningLoader(TestLoader):
def __init__(self):
TestLoader.__init__(self)
self._visited = set()
def loadTestsFromModule(self, module, pattern=None):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
if module in self._visited:
return None
self._visited.add(module)
tests = []
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__ + '.' + file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
# adapted from jaraco.classes.properties:NonDataProperty
class NonDataProperty:
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self.fget(obj)
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build (deprecated)"
user_options = [
('test-module=', 'm', "Run 'test_suite' in specified module"),
('test-suite=', 's',
"Run single test, case or suite (e.g. 'module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite and self.test_module:
msg = "You may specify a module or a suite, but not both"
raise DistutilsOptionError(msg)
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
@NonDataProperty
def test_args(self):
return list(self._test_args())
def _test_args(self):
if not self.test_suite and sys.version_info >= (2, 7):
yield 'discover'
if self.verbose:
yield '--verbose'
if self.test_suite:
yield self.test_suite
def with_project_on_sys_path(self, func):
"""
Backward compatibility for project_on_sys_path context.
"""
with self.project_on_sys_path():
func()
@contextlib.contextmanager
def project_on_sys_path(self, include_dists=[]):
with_2to3 = not six.PY2 and getattr(
self.distribution, 'use_2to3', False)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
project_path = normalize_path(ei_cmd.egg_base)
sys.path.insert(0, project_path)
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
with self.paths_on_pythonpath([project_path]):
yield
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
@staticmethod
@contextlib.contextmanager
def paths_on_pythonpath(paths):
"""
Add the indicated paths to the head of the PYTHONPATH environment
variable so that subprocesses will also see the packages at
these paths.
Do this in a context that restores the value on exit.
"""
nothing = object()
orig_pythonpath = os.environ.get('PYTHONPATH', nothing)
current_pythonpath = os.environ.get('PYTHONPATH', '')
try:
prefix = os.pathsep.join(_unique_everseen(paths))
to_join = filter(None, [prefix, current_pythonpath])
new_path = os.pathsep.join(to_join)
if new_path:
os.environ['PYTHONPATH'] = new_path
yield
finally:
if orig_pythonpath is nothing:
os.environ.pop('PYTHONPATH', None)
else:
os.environ['PYTHONPATH'] = orig_pythonpath
@staticmethod
def install_dists(dist):
"""
Install the requirements indicated by self.distribution and
return an iterable of the dists that were built.
"""
ir_d = dist.fetch_build_eggs(dist.install_requires)
tr_d = dist.fetch_build_eggs(dist.tests_require or [])
er_d = dist.fetch_build_eggs(
v for k, v in dist.extras_require.items()
if k.startswith(':') and evaluate_marker(k[1:])
)
return itertools.chain(ir_d, tr_d, er_d)
def run(self):
self.announce(
"WARNING: Testing via this command is deprecated and will be "
"removed in a future version. Users looking for a generic test "
"entry point independent of test runner are encouraged to use "
"tox.",
log.WARN,
)
installed_dists = self.install_dists(self.distribution)
cmd = ' '.join(self._argv)
if self.dry_run:
self.announce('skipping "%s" (dry run)' % cmd)
return
self.announce('running "%s"' % cmd)
paths = map(operator.attrgetter('location'), installed_dists)
with self.paths_on_pythonpath(paths):
with self.project_on_sys_path():
self.run_tests()
def run_tests(self):
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if not six.PY2 and getattr(self.distribution, 'use_2to3', False):
module = self.test_suite.split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
test = unittest.main(
None, None, self._argv,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
exit=False,
)
if not test.result.wasSuccessful():
msg = 'Test failed: %s' % test.result
self.announce(msg, log.ERROR)
raise DistutilsError(msg)
@property
def _argv(self):
return ['unittest'] + self.test_args
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.resolve()()
| Django-locallibrary/env/Lib/site-packages/setuptools/command/test.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/command/test.py",
"repo_id": "Django-locallibrary",
"token_count": 4464
} | 22 |
import glob
import os
import subprocess
import sys
from distutils import log
from distutils.errors import DistutilsError
import pkg_resources
from setuptools.command.easy_install import easy_install
from setuptools.extern import six
from setuptools.wheel import Wheel
from .py31compat import TemporaryDirectory
def _fixup_find_links(find_links):
"""Ensure find-links option end-up being a list of strings."""
if isinstance(find_links, six.string_types):
return find_links.split()
assert isinstance(find_links, (tuple, list))
return find_links
def _legacy_fetch_build_egg(dist, req):
"""Fetch an egg needed for building.
Legacy path using EasyInstall.
"""
tmp_dist = dist.__class__({'script_args': ['easy_install']})
opts = tmp_dist.get_option_dict('easy_install')
opts.clear()
opts.update(
(k, v)
for k, v in dist.get_option_dict('easy_install').items()
if k in (
# don't use any other settings
'find_links', 'site_dirs', 'index_url',
'optimize', 'site_dirs', 'allow_hosts',
))
if dist.dependency_links:
links = dist.dependency_links[:]
if 'find_links' in opts:
links = _fixup_find_links(opts['find_links'][1]) + links
opts['find_links'] = ('setup', links)
install_dir = dist.get_egg_cache_dir()
cmd = easy_install(
tmp_dist, args=["x"], install_dir=install_dir,
exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
return cmd.easy_install(req)
def fetch_build_egg(dist, req):
"""Fetch an egg needed for building.
Use pip/wheel to fetch/build a wheel."""
# Check pip is available.
try:
pkg_resources.get_distribution('pip')
except pkg_resources.DistributionNotFound:
dist.announce(
'WARNING: The pip package is not available, falling back '
'to EasyInstall for handling setup_requires/test_requires; '
'this is deprecated and will be removed in a future version.',
log.WARN
)
return _legacy_fetch_build_egg(dist, req)
# Warn if wheel is not.
try:
pkg_resources.get_distribution('wheel')
except pkg_resources.DistributionNotFound:
dist.announce('WARNING: The wheel package is not available.', log.WARN)
# Ignore environment markers; if supplied, it is required.
req = strip_marker(req)
# Take easy_install options into account, but do not override relevant
# pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll
# take precedence.
opts = dist.get_option_dict('easy_install')
if 'allow_hosts' in opts:
raise DistutilsError('the `allow-hosts` option is not supported '
'when using pip to install requirements.')
if 'PIP_QUIET' in os.environ or 'PIP_VERBOSE' in os.environ:
quiet = False
else:
quiet = True
if 'PIP_INDEX_URL' in os.environ:
index_url = None
elif 'index_url' in opts:
index_url = opts['index_url'][1]
else:
index_url = None
if 'find_links' in opts:
find_links = _fixup_find_links(opts['find_links'][1])[:]
else:
find_links = []
if dist.dependency_links:
find_links.extend(dist.dependency_links)
eggs_dir = os.path.realpath(dist.get_egg_cache_dir())
environment = pkg_resources.Environment()
for egg_dist in pkg_resources.find_distributions(eggs_dir):
if egg_dist in req and environment.can_add(egg_dist):
return egg_dist
with TemporaryDirectory() as tmpdir:
cmd = [
sys.executable, '-m', 'pip',
'--disable-pip-version-check',
'wheel', '--no-deps',
'-w', tmpdir,
]
if quiet:
cmd.append('--quiet')
if index_url is not None:
cmd.extend(('--index-url', index_url))
if find_links is not None:
for link in find_links:
cmd.extend(('--find-links', link))
# If requirement is a PEP 508 direct URL, directly pass
# the URL to pip, as `req @ url` does not work on the
# command line.
if req.url:
cmd.append(req.url)
else:
cmd.append(str(req))
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError as e:
raise DistutilsError(str(e)) from e
wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0])
dist_location = os.path.join(eggs_dir, wheel.egg_name())
wheel.install_as_egg(dist_location)
dist_metadata = pkg_resources.PathMetadata(
dist_location, os.path.join(dist_location, 'EGG-INFO'))
dist = pkg_resources.Distribution.from_filename(
dist_location, metadata=dist_metadata)
return dist
def strip_marker(req):
"""
Return a new requirement without the environment marker to avoid
calling pip with something like `babel; extra == "i18n"`, which
would always be ignored.
"""
# create a copy to avoid mutating the input
req = pkg_resources.Requirement.parse(str(req))
req.marker = None
return req
| Django-locallibrary/env/Lib/site-packages/setuptools/installer.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/installer.py",
"repo_id": "Django-locallibrary",
"token_count": 2277
} | 23 |
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('setuptools').version
except Exception:
__version__ = 'unknown'
| Django-locallibrary/env/Lib/site-packages/setuptools/version.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/version.py",
"repo_id": "Django-locallibrary",
"token_count": 48
} | 24 |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 18 17:59:53 2022
@author: Omega Joctan
"""
#gradient descent implementation python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import seaborn as sns
from sklearn import metrics
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
data = pd.read_csv(r'C:\Users\Omega Joctan\AppData\Roaming\MetaQuotes\Terminal\892B47EBC091D6EF95E3961284A76097\MQL5\Files\Salary_Data.csv')
X = data["YearsExperience"].values.reshape(-1,1)
Y = data["Salary"].values.reshape(-1,1)
#data_scaled = np.array(scaler.fit_transform(data))
X = np.array(scaler.fit_transform(X))
Y = np.array(scaler.fit_transform(Y))
print(X,f"\n Scaled Mean {X.mean(axis=0)} std {X.std(axis=0)}")
plt.scatter(X, Y)
plt.xlabel("years experience")
plt.ylabel("Salary")
plt.show()
# Building the model
m = 0
c = 0
L = 0.1 # The learning Rate
epochs = 10000 # The number of iterations to perform gradient descent
n = float(len(X)) # Number of elements in X
# Performing Gradient Descent
for i in range(epochs):
Y_pred = m*X + c # The current predicted value of Y
D_m = (-2/n) * sum(X * (Y - Y_pred)) # Derivative wrt m
D_c = (-2/n) * sum(Y - Y_pred) # Derivative wrt c
m = m - L * D_m # Update m
c = c - L * D_c # Update c
print (m, c)
plt.figure(figsize=(12,6))
plt.title("Gradient Descent best model")
plt.scatter(X,Y)
plt.xlabel("Years Of Experince")
plt.ylabel("Salary")
plt.plot(X,m*X+c,label="Best model",c="red")
plt.legend(loc="upper right")
plt.show()
data = pd.read_csv(r'C:\Users\Omega Joctan\AppData\Roaming\MetaQuotes\Terminal\892B47EBC091D6EF95E3961284A76097\MQL5\Files\titanic.csv')
print(data.head(5))
y = data.iloc[:,1]
x = data.iloc[:,2]
#print(x,"\n Y's ", y)
X = data["Pclass"].values.reshape(-1,1)
Y = data["Survived"].values.reshape(-1,1)
n = float(len(X))
e = 2.718281828;
def Sigmoid(x):
return 1/(1+(e**-x))
for i in range(epochs):
Y_pred = Sigmoid(m*X + c) # The current predicted value of Y
D_m = (-2/n) * sum(X * (Y - Y_pred)) # Derivative wrt m
D_c = (-2/n) * sum(Y - Y_pred) # Derivative wrt c
if D_m == 0 and D_c == 0:
break
m = m - L * D_m # Update m
c = c - L * D_c # Update c
print (m, c)
sns.catplot( x="Sex", y = "Survived", kind="bar", hue="Pclass" , data=data)
sns.catplot(x="Pclass",y = "Survived", kind="swarm", hue="Sex", data=data)
Yp = np.sort(Sigmoid(m*X+c))
plt.figure(figsize=(13,9))
plt.scatter(X,Y, c = "blue")
plt.scatter(X,Yp, c="red",label ="sigmoid")
plt.legend(loc="upper right")
plt.show()
| Gradient-Descent-MQL5/gradient descent.py/0 | {
"file_path": "Gradient-Descent-MQL5/gradient descent.py",
"repo_id": "Gradient-Descent-MQL5",
"token_count": 1234
} | 25 |
## Decision Trees in MQL5: Classification and Regression
Decision trees are powerful machine learning algorithms that use a tree-like structure to make predictions. They work by splitting the data based on features (independent variables) into increasingly homogeneous subsets, ultimately reaching leaves representing the final prediction. MQL5 offers functionalities for implementing both **classification** and **regression** decision trees through the `tree.mqh` library.
**Decision Tree Theory (Basic Overview):**
1. **Start with the entire dataset at the root node.**
2. **Choose the feature and threshold that best splits the data into two subsets such that each subset is more homogeneous concerning the target variable (dependent variable).**
* For classification, this often involves maximizing information gain or minimizing Gini impurity.
* For regression, it involves maximizing variance reduction between the parent node and child nodes.
3. **Repeat step 2 for each child node recursively until a stopping criterion is met (e.g., reaching a maximum depth, minimum samples per node, or sufficient homogeneity).**
4. **Assign a prediction value to each leaf node.**
* For classification, this is the most frequent class in the leaf node.
* For regression, this is the average value of the target variable in the leaf node.
**CDecisionTreeClassifier Class:**
This class implements a decision tree for classification tasks. It offers the following functionalities:
* `CDecisionTreeClassifier(uint min_samples_split=2, uint max_depth=2, mode mode_=MODE_GINI)` Constructor, allows setting hyperparameters (minimum samples per split, maximum tree depth, and splitting criterion).
* `~CDecisionTreeClassifier(void)` Destructor.
* `void fit(const matrix &x, const vector &y)` Trains the model on the provided data (`x` - independent variables, `y` - class labels).
* `void print_tree(Node *tree, string indent=" ",string padl=")` Prints the tree structure in a readable format.
* `double predict(const vector &x)` Predicts the class label for a new data point (`x`).
* `vector predict(const matrix &x)` Predicts class labels for multiple new data points (`x`).
**CDecisionTreeRegressor Class:**
This class inherits from `CDecisionTreeClassifier` and specializes in regression tasks. It overrides specific functions and implements different splitting criteria:
* `CDecisionTreeRegressor(uint min_samples_split=2, uint max_depth=2):` Constructor, allows setting hyperparameters (minimum samples per split and maximum tree depth).
* `~CDecisionTreeRegressor(void):` Destructor.
* `void fit(matrix &x, vector &y):` Trains the model on the provided data (`x` - independent variables, `y` - continuous values).
* `double predict(const vector &x)` Predicts the continuous value for a new data point (`x`).
**Additional Notes:**
* Both classes use internal helper functions for building the tree, calculating splitting criteria (information gain, Gini impurity, variance reduction), and making predictions.
* The `check_is_fitted` function ensures the model is trained before allowing predictions.
* Choosing appropriate hyperparameters (especially maximum depth) is crucial to avoid overfitting the model.
**Reference**
[Data Science and Machine Learning (Part 16): A Refreshing Look at Decision Trees](https://www.mql5.com/en/articles/13862) | MALE5/Decision Tree/README.md/0 | {
"file_path": "MALE5/Decision Tree/README.md",
"repo_id": "MALE5",
"token_count": 830
} | 26 |
//+------------------------------------------------------------------+
//| Ridge Regression.mqh |
//| Copyright 2022, MetaQuotes Ltd. |
//| https://www.mql5.com |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Fxalgebra.com"
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
#include <MALE5\preprocessing.mqh>
#include <MALE5\MatrixExtend.mqh>
#include "Linear Regression.mqh"
//+------------------------------------------------------------------+
class CRidgeregression
{
protected:
matrix XMatrix; //matrix of independent variables
matrix YMatrix;
vector yVector; // Vector of target variables
matrix Id_matrix; //Identity matrix
matrix Betas;
ulong n; //No of samples
ulong k; //No of regressors
public:
CRidgeregression(matrix &_matrix);
~CRidgeregression(void);
double RSS;
double Lr_accuracy;
vector L2Norm(double lambda); //Ridge regression
};
//+------------------------------------------------------------------+
CRidgeregression::CRidgeregression(matrix &_matrix)
{
n = _matrix.Rows();
k = _matrix.Cols();
MatrixExtend::XandYSplitMatrices(_matrix,XMatrix,yVector);
YMatrix = MatrixExtend::VectorToMatrix(yVector);
//---
Id_matrix.Resize(k,k);
Id_matrix.Identity();
}
//+------------------------------------------------------------------+
CRidgeregression::~CRidgeregression(void)
{
ZeroMemory(XMatrix);
ZeroMemory(yVector);
ZeroMemory(yVector);
ZeroMemory(Id_matrix);
}
//+------------------------------------------------------------------+
vector CRidgeregression::L2Norm(double lambda)
{
matrix design = MatrixExtend::DesignMatrix(XMatrix);
matrix XT = design.Transpose();
matrix XTX = XT.MatMul(design);
matrix lamdaxI = lambda * Id_matrix;
matrix sum_matrix = XTX + lamdaxI;
matrix Inverse_sum = sum_matrix.Inv();
matrix XTy = XT.MatMul(YMatrix);
Betas = Inverse_sum.MatMul(XTy);
#ifdef DEBUG_MODE
//Print("Betas\n",Betas);
#endif
return(MatrixExtend::MatrixToVector(Betas));
}
//+------------------------------------------------------------------+
| MALE5/Linear Models/Ridge Regression.mqh/0 | {
"file_path": "MALE5/Linear Models/Ridge Regression.mqh",
"repo_id": "MALE5",
"token_count": 1263
} | 27 |
"""
ASGI config for LocalLibrary project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'LocalLibrary.settings')
application = get_asgi_application()
| Django-locallibrary/LocalLibrary/LocalLibrary/asgi.py/0 | {
"file_path": "Django-locallibrary/LocalLibrary/LocalLibrary/asgi.py",
"repo_id": "Django-locallibrary",
"token_count": 131
} | 0 |
{% extends "base_template.html" %}
{% block content %}
<h2>Error 403</h2>
<h3>You have no permission to access this page</h3>
{% endblock %} | Django-locallibrary/LocalLibrary/catalog/Templates/403.html/0 | {
"file_path": "Django-locallibrary/LocalLibrary/catalog/Templates/403.html",
"repo_id": "Django-locallibrary",
"token_count": 73
} | 1 |
from django.test import TestCase
| Django-locallibrary/LocalLibrary/catalog/Tests/test_models.py/0 | {
"file_path": "Django-locallibrary/LocalLibrary/catalog/Tests/test_models.py",
"repo_id": "Django-locallibrary",
"token_count": 9
} | 2 |
# Generated by Django 3.2.3 on 2021-12-06 12:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0003_auto_20210914_0632'),
]
operations = [
migrations.AlterModelOptions(
name='bookinstance',
options={'ordering': ['due_back'], 'permissions': (('can_mark_returned', 'Set book as returned'),)},
),
]
| Django-locallibrary/LocalLibrary/catalog/migrations/0004_alter_bookinstance_options.py/0 | {
"file_path": "Django-locallibrary/LocalLibrary/catalog/migrations/0004_alter_bookinstance_options.py",
"repo_id": "Django-locallibrary",
"token_count": 177
} | 3 |
@font-face {
font-family: 'Roboto';
src: url('../fonts/Roboto-Bold-webfont.woff');
font-weight: 700;
font-style: normal;
}
@font-face {
font-family: 'Roboto';
src: url('../fonts/Roboto-Regular-webfont.woff');
font-weight: 400;
font-style: normal;
}
@font-face {
font-family: 'Roboto';
src: url('../fonts/Roboto-Light-webfont.woff');
font-weight: 300;
font-style: normal;
}
| Django-locallibrary/LocalLibrary/staticfiles/admin/css/fonts.css/0 | {
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/css/fonts.css",
"repo_id": "Django-locallibrary",
"token_count": 192
} | 4 |
Metadata-Version: 2.1
Name: pip
Version: 20.2.3
Summary: The PyPA recommended tool for installing Python packages.
Home-page: https://pip.pypa.io/
Author: The pip developers
Author-email: [email protected]
License: MIT
Project-URL: Documentation, https://pip.pypa.io
Project-URL: Source, https://github.com/pypa/pip
Project-URL: Changelog, https://pip.pypa.io/en/stable/news/
Keywords: distutils easy_install egg setuptools wheel virtualenv
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: MIT License
Classifier: Topic :: Software Development :: Build Tools
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*
pip - The Python Package Installer
==================================
.. image:: https://img.shields.io/pypi/v/pip.svg
:target: https://pypi.org/project/pip/
.. image:: https://readthedocs.org/projects/pip/badge/?version=latest
:target: https://pip.pypa.io/en/latest
pip is the `package installer`_ for Python. You can use pip to install packages from the `Python Package Index`_ and other indexes.
Please take a look at our documentation for how to install and use pip:
* `Installation`_
* `Usage`_
We release updates regularly, with a new version every 3 months. Find more details in our documentation:
* `Release notes`_
* `Release process`_
In 2020, we're working on improvements to the heart of pip. Please `learn more and take our survey`_ to help us do it right.
If you find bugs, need help, or want to talk to the developers, please use our mailing lists or chat rooms:
* `Issue tracking`_
* `Discourse channel`_
* `User IRC`_
If you want to get involved head over to GitHub to get the source code, look at our development documentation and feel free to jump on the developer mailing lists and chat rooms:
* `GitHub page`_
* `Development documentation`_
* `Development mailing list`_
* `Development IRC`_
Code of Conduct
---------------
Everyone interacting in the pip project's codebases, issue trackers, chat
rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_.
.. _package installer: https://packaging.python.org/guides/tool-recommendations/
.. _Python Package Index: https://pypi.org
.. _Installation: https://pip.pypa.io/en/stable/installing.html
.. _Usage: https://pip.pypa.io/en/stable/
.. _Release notes: https://pip.pypa.io/en/stable/news.html
.. _Release process: https://pip.pypa.io/en/latest/development/release-process/
.. _GitHub page: https://github.com/pypa/pip
.. _Development documentation: https://pip.pypa.io/en/latest/development
.. _learn more and take our survey: https://pyfound.blogspot.com/2020/03/new-pip-resolver-to-roll-out-this-year.html
.. _Issue tracking: https://github.com/pypa/pip/issues
.. _Discourse channel: https://discuss.python.org/c/packaging
.. _Development mailing list: https://mail.python.org/mailman3/lists/distutils-sig.python.org/
.. _User IRC: https://webchat.freenode.net/?channels=%23pypa
.. _Development IRC: https://webchat.freenode.net/?channels=%23pypa-dev
.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/
| Django-locallibrary/env/Lib/site-packages/pip-20.2.3.dist-info/METADATA/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip-20.2.3.dist-info/METADATA",
"repo_id": "Django-locallibrary",
"token_count": 1132
} | 5 |
from __future__ import absolute_import
import logging
import os
import textwrap
import pip._internal.utils.filesystem as filesystem
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import ERROR, SUCCESS
from pip._internal.exceptions import CommandError, PipError
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import Any, List
logger = logging.getLogger(__name__)
class CacheCommand(Command):
"""
Inspect and manage pip's wheel cache.
Subcommands:
- dir: Show the cache directory.
- info: Show information about the cache.
- list: List filenames of packages stored in the cache.
- remove: Remove one or more package from the cache.
- purge: Remove all items from the cache.
``<pattern>`` can be a glob expression or a package name.
"""
ignore_require_venv = True
usage = """
%prog dir
%prog info
%prog list [<pattern>]
%prog remove <pattern>
%prog purge
"""
def run(self, options, args):
# type: (Values, List[Any]) -> int
handlers = {
"dir": self.get_cache_dir,
"info": self.get_cache_info,
"list": self.list_cache_items,
"remove": self.remove_cache_items,
"purge": self.purge_cache,
}
if not options.cache_dir:
logger.error("pip cache commands can not "
"function since cache is disabled.")
return ERROR
# Determine action
if not args or args[0] not in handlers:
logger.error(
"Need an action (%s) to perform.",
", ".join(sorted(handlers)),
)
return ERROR
action = args[0]
# Error handling happens here, not in the action-handlers.
try:
handlers[action](options, args[1:])
except PipError as e:
logger.error(e.args[0])
return ERROR
return SUCCESS
def get_cache_dir(self, options, args):
# type: (Values, List[Any]) -> None
if args:
raise CommandError('Too many arguments')
logger.info(options.cache_dir)
def get_cache_info(self, options, args):
# type: (Values, List[Any]) -> None
if args:
raise CommandError('Too many arguments')
num_packages = len(self._find_wheels(options, '*'))
cache_location = self._wheels_cache_dir(options)
cache_size = filesystem.format_directory_size(cache_location)
message = textwrap.dedent("""
Location: {location}
Size: {size}
Number of wheels: {package_count}
""").format(
location=cache_location,
package_count=num_packages,
size=cache_size,
).strip()
logger.info(message)
def list_cache_items(self, options, args):
# type: (Values, List[Any]) -> None
if len(args) > 1:
raise CommandError('Too many arguments')
if args:
pattern = args[0]
else:
pattern = '*'
files = self._find_wheels(options, pattern)
if not files:
logger.info('Nothing cached.')
return
results = []
for filename in files:
wheel = os.path.basename(filename)
size = filesystem.format_file_size(filename)
results.append(' - {} ({})'.format(wheel, size))
logger.info('Cache contents:\n')
logger.info('\n'.join(sorted(results)))
def remove_cache_items(self, options, args):
# type: (Values, List[Any]) -> None
if len(args) > 1:
raise CommandError('Too many arguments')
if not args:
raise CommandError('Please provide a pattern')
files = self._find_wheels(options, args[0])
if not files:
raise CommandError('No matching packages')
for filename in files:
os.unlink(filename)
logger.debug('Removed %s', filename)
logger.info('Files removed: %s', len(files))
def purge_cache(self, options, args):
# type: (Values, List[Any]) -> None
if args:
raise CommandError('Too many arguments')
return self.remove_cache_items(options, ['*'])
def _wheels_cache_dir(self, options):
# type: (Values) -> str
return os.path.join(options.cache_dir, 'wheels')
def _find_wheels(self, options, pattern):
# type: (Values, str) -> List[str]
wheel_dir = self._wheels_cache_dir(options)
# The wheel filename format, as specified in PEP 427, is:
# {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl
#
# Additionally, non-alphanumeric values in the distribution are
# normalized to underscores (_), meaning hyphens can never occur
# before `-{version}`.
#
# Given that information:
# - If the pattern we're given contains a hyphen (-), the user is
# providing at least the version. Thus, we can just append `*.whl`
# to match the rest of it.
# - If the pattern we're given doesn't contain a hyphen (-), the
# user is only providing the name. Thus, we append `-*.whl` to
# match the hyphen before the version, followed by anything else.
#
# PEP 427: https://www.python.org/dev/peps/pep-0427/
pattern = pattern + ("*.whl" if "-" in pattern else "-*.whl")
return filesystem.find_files(wheel_dir, pattern)
| Django-locallibrary/env/Lib/site-packages/pip/_internal/commands/cache.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/commands/cache.py",
"repo_id": "Django-locallibrary",
"token_count": 2436
} | 6 |
from pip._internal.distributions.sdist import SourceDistribution
from pip._internal.distributions.wheel import WheelDistribution
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from pip._internal.distributions.base import AbstractDistribution
from pip._internal.req.req_install import InstallRequirement
def make_distribution_for_install_requirement(install_req):
# type: (InstallRequirement) -> AbstractDistribution
"""Returns a Distribution for the given InstallRequirement
"""
# Editable requirements will always be source distributions. They use the
# legacy logic until we create a modern standard for them.
if install_req.editable:
return SourceDistribution(install_req)
# If it's a wheel, it's a WheelDistribution
if install_req.is_wheel:
return WheelDistribution(install_req)
# Otherwise, a SourceDistribution
return SourceDistribution(install_req)
| Django-locallibrary/env/Lib/site-packages/pip/_internal/distributions/__init__.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/distributions/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 287
} | 7 |
"""Routines related to PyPI, indexes"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
from __future__ import absolute_import
import logging
import re
from pip._vendor.packaging import specifiers
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.packaging.version import parse as parse_version
from pip._internal.exceptions import (
BestVersionAlreadyInstalled,
DistributionNotFound,
InvalidWheelFilename,
UnsupportedWheel,
)
from pip._internal.index.collector import parse_links
from pip._internal.models.candidate import InstallationCandidate
from pip._internal.models.format_control import FormatControl
from pip._internal.models.link import Link
from pip._internal.models.selection_prefs import SelectionPreferences
from pip._internal.models.target_python import TargetPython
from pip._internal.models.wheel import Wheel
from pip._internal.utils.filetypes import WHEEL_EXTENSION
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import build_netloc
from pip._internal.utils.packaging import check_requires_python
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.unpacking import SUPPORTED_EXTENSIONS
from pip._internal.utils.urls import url_to_path
if MYPY_CHECK_RUNNING:
from typing import (
FrozenSet, Iterable, List, Optional, Set, Text, Tuple, Union,
)
from pip._vendor.packaging.tags import Tag
from pip._vendor.packaging.version import _BaseVersion
from pip._internal.index.collector import LinkCollector
from pip._internal.models.search_scope import SearchScope
from pip._internal.req import InstallRequirement
from pip._internal.utils.hashes import Hashes
BuildTag = Union[Tuple[()], Tuple[int, str]]
CandidateSortingKey = (
Tuple[int, int, int, _BaseVersion, BuildTag, Optional[int]]
)
__all__ = ['FormatControl', 'BestCandidateResult', 'PackageFinder']
logger = logging.getLogger(__name__)
def _check_link_requires_python(
link, # type: Link
version_info, # type: Tuple[int, int, int]
ignore_requires_python=False, # type: bool
):
# type: (...) -> bool
"""
Return whether the given Python version is compatible with a link's
"Requires-Python" value.
:param version_info: A 3-tuple of ints representing the Python
major-minor-micro version to check.
:param ignore_requires_python: Whether to ignore the "Requires-Python"
value if the given Python version isn't compatible.
"""
try:
is_compatible = check_requires_python(
link.requires_python, version_info=version_info,
)
except specifiers.InvalidSpecifier:
logger.debug(
"Ignoring invalid Requires-Python (%r) for link: %s",
link.requires_python, link,
)
else:
if not is_compatible:
version = '.'.join(map(str, version_info))
if not ignore_requires_python:
logger.debug(
'Link requires a different Python (%s not in: %r): %s',
version, link.requires_python, link,
)
return False
logger.debug(
'Ignoring failed Requires-Python check (%s not in: %r) '
'for link: %s',
version, link.requires_python, link,
)
return True
class LinkEvaluator(object):
"""
Responsible for evaluating links for a particular project.
"""
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
# Don't include an allow_yanked default value to make sure each call
# site considers whether yanked releases are allowed. This also causes
# that decision to be made explicit in the calling code, which helps
# people when reading the code.
def __init__(
self,
project_name, # type: str
canonical_name, # type: str
formats, # type: FrozenSet[str]
target_python, # type: TargetPython
allow_yanked, # type: bool
ignore_requires_python=None, # type: Optional[bool]
):
# type: (...) -> None
"""
:param project_name: The user supplied package name.
:param canonical_name: The canonical package name.
:param formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
:param target_python: The target Python interpreter to use when
evaluating link compatibility. This is used, for example, to
check wheel compatibility, as well as when checking the Python
version, e.g. the Python version embedded in a link filename
(or egg fragment) and against an HTML link's optional PEP 503
"data-requires-python" attribute.
:param allow_yanked: Whether files marked as yanked (in the sense
of PEP 592) are permitted to be candidates for install.
:param ignore_requires_python: Whether to ignore incompatible
PEP 503 "data-requires-python" values in HTML links. Defaults
to False.
"""
if ignore_requires_python is None:
ignore_requires_python = False
self._allow_yanked = allow_yanked
self._canonical_name = canonical_name
self._ignore_requires_python = ignore_requires_python
self._formats = formats
self._target_python = target_python
self.project_name = project_name
def evaluate_link(self, link):
# type: (Link) -> Tuple[bool, Optional[Text]]
"""
Determine whether a link is a candidate for installation.
:return: A tuple (is_candidate, result), where `result` is (1) a
version string if `is_candidate` is True, and (2) if
`is_candidate` is False, an optional string to log the reason
the link fails to qualify.
"""
version = None
if link.is_yanked and not self._allow_yanked:
reason = link.yanked_reason or '<none given>'
# Mark this as a unicode string to prevent "UnicodeEncodeError:
# 'ascii' codec can't encode character" in Python 2 when
# the reason contains non-ascii characters.
return (False, u'yanked for reason: {}'.format(reason))
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
return (False, 'not a file')
if ext not in SUPPORTED_EXTENSIONS:
return (False, 'unsupported archive format: {}'.format(ext))
if "binary" not in self._formats and ext == WHEEL_EXTENSION:
reason = 'No binaries permitted for {}'.format(
self.project_name)
return (False, reason)
if "macosx10" in link.path and ext == '.zip':
return (False, 'macosx10 one')
if ext == WHEEL_EXTENSION:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
return (False, 'invalid wheel filename')
if canonicalize_name(wheel.name) != self._canonical_name:
reason = 'wrong project name (not {})'.format(
self.project_name)
return (False, reason)
supported_tags = self._target_python.get_tags()
if not wheel.supported(supported_tags):
# Include the wheel's tags in the reason string to
# simplify troubleshooting compatibility issues.
file_tags = wheel.get_formatted_file_tags()
reason = (
"none of the wheel's tags match: {}".format(
', '.join(file_tags)
)
)
return (False, reason)
version = wheel.version
# This should be up by the self.ok_binary check, but see issue 2700.
if "source" not in self._formats and ext != WHEEL_EXTENSION:
reason = 'No sources permitted for {}'.format(self.project_name)
return (False, reason)
if not version:
version = _extract_version_from_fragment(
egg_info, self._canonical_name,
)
if not version:
reason = 'Missing project version for {}'.format(self.project_name)
return (False, reason)
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != self._target_python.py_version:
return (False, 'Python version is incorrect')
supports_python = _check_link_requires_python(
link, version_info=self._target_python.py_version_info,
ignore_requires_python=self._ignore_requires_python,
)
if not supports_python:
# Return None for the reason text to suppress calling
# _log_skipped_link().
return (False, None)
logger.debug('Found link %s, version: %s', link, version)
return (True, version)
def filter_unallowed_hashes(
candidates, # type: List[InstallationCandidate]
hashes, # type: Hashes
project_name, # type: str
):
# type: (...) -> List[InstallationCandidate]
"""
Filter out candidates whose hashes aren't allowed, and return a new
list of candidates.
If at least one candidate has an allowed hash, then all candidates with
either an allowed hash or no hash specified are returned. Otherwise,
the given candidates are returned.
Including the candidates with no hash specified when there is a match
allows a warning to be logged if there is a more preferred candidate
with no hash specified. Returning all candidates in the case of no
matches lets pip report the hash of the candidate that would otherwise
have been installed (e.g. permitting the user to more easily update
their requirements file with the desired hash).
"""
if not hashes:
logger.debug(
'Given no hashes to check %s links for project %r: '
'discarding no candidates',
len(candidates),
project_name,
)
# Make sure we're not returning back the given value.
return list(candidates)
matches_or_no_digest = []
# Collect the non-matches for logging purposes.
non_matches = []
match_count = 0
for candidate in candidates:
link = candidate.link
if not link.has_hash:
pass
elif link.is_hash_allowed(hashes=hashes):
match_count += 1
else:
non_matches.append(candidate)
continue
matches_or_no_digest.append(candidate)
if match_count:
filtered = matches_or_no_digest
else:
# Make sure we're not returning back the given value.
filtered = list(candidates)
if len(filtered) == len(candidates):
discard_message = 'discarding no candidates'
else:
discard_message = 'discarding {} non-matches:\n {}'.format(
len(non_matches),
'\n '.join(str(candidate.link) for candidate in non_matches)
)
logger.debug(
'Checked %s links for project %r against %s hashes '
'(%s matches, %s no digest): %s',
len(candidates),
project_name,
hashes.digest_count,
match_count,
len(matches_or_no_digest) - match_count,
discard_message
)
return filtered
class CandidatePreferences(object):
"""
Encapsulates some of the preferences for filtering and sorting
InstallationCandidate objects.
"""
def __init__(
self,
prefer_binary=False, # type: bool
allow_all_prereleases=False, # type: bool
):
# type: (...) -> None
"""
:param allow_all_prereleases: Whether to allow all pre-releases.
"""
self.allow_all_prereleases = allow_all_prereleases
self.prefer_binary = prefer_binary
class BestCandidateResult(object):
"""A collection of candidates, returned by `PackageFinder.find_best_candidate`.
This class is only intended to be instantiated by CandidateEvaluator's
`compute_best_candidate()` method.
"""
def __init__(
self,
candidates, # type: List[InstallationCandidate]
applicable_candidates, # type: List[InstallationCandidate]
best_candidate, # type: Optional[InstallationCandidate]
):
# type: (...) -> None
"""
:param candidates: A sequence of all available candidates found.
:param applicable_candidates: The applicable candidates.
:param best_candidate: The most preferred candidate found, or None
if no applicable candidates were found.
"""
assert set(applicable_candidates) <= set(candidates)
if best_candidate is None:
assert not applicable_candidates
else:
assert best_candidate in applicable_candidates
self._applicable_candidates = applicable_candidates
self._candidates = candidates
self.best_candidate = best_candidate
def iter_all(self):
# type: () -> Iterable[InstallationCandidate]
"""Iterate through all candidates.
"""
return iter(self._candidates)
def iter_applicable(self):
# type: () -> Iterable[InstallationCandidate]
"""Iterate through the applicable candidates.
"""
return iter(self._applicable_candidates)
class CandidateEvaluator(object):
"""
Responsible for filtering and sorting candidates for installation based
on what tags are valid.
"""
@classmethod
def create(
cls,
project_name, # type: str
target_python=None, # type: Optional[TargetPython]
prefer_binary=False, # type: bool
allow_all_prereleases=False, # type: bool
specifier=None, # type: Optional[specifiers.BaseSpecifier]
hashes=None, # type: Optional[Hashes]
):
# type: (...) -> CandidateEvaluator
"""Create a CandidateEvaluator object.
:param target_python: The target Python interpreter to use when
checking compatibility. If None (the default), a TargetPython
object will be constructed from the running Python.
:param specifier: An optional object implementing `filter`
(e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
versions.
:param hashes: An optional collection of allowed hashes.
"""
if target_python is None:
target_python = TargetPython()
if specifier is None:
specifier = specifiers.SpecifierSet()
supported_tags = target_python.get_tags()
return cls(
project_name=project_name,
supported_tags=supported_tags,
specifier=specifier,
prefer_binary=prefer_binary,
allow_all_prereleases=allow_all_prereleases,
hashes=hashes,
)
def __init__(
self,
project_name, # type: str
supported_tags, # type: List[Tag]
specifier, # type: specifiers.BaseSpecifier
prefer_binary=False, # type: bool
allow_all_prereleases=False, # type: bool
hashes=None, # type: Optional[Hashes]
):
# type: (...) -> None
"""
:param supported_tags: The PEP 425 tags supported by the target
Python in order of preference (most preferred first).
"""
self._allow_all_prereleases = allow_all_prereleases
self._hashes = hashes
self._prefer_binary = prefer_binary
self._project_name = project_name
self._specifier = specifier
self._supported_tags = supported_tags
def get_applicable_candidates(
self,
candidates, # type: List[InstallationCandidate]
):
# type: (...) -> List[InstallationCandidate]
"""
Return the applicable candidates from a list of candidates.
"""
# Using None infers from the specifier instead.
allow_prereleases = self._allow_all_prereleases or None
specifier = self._specifier
versions = {
str(v) for v in specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
(str(c.version) for c in candidates),
prereleases=allow_prereleases,
)
}
# Again, converting version to str to deal with debundling.
applicable_candidates = [
c for c in candidates if str(c.version) in versions
]
filtered_applicable_candidates = filter_unallowed_hashes(
candidates=applicable_candidates,
hashes=self._hashes,
project_name=self._project_name,
)
return sorted(filtered_applicable_candidates, key=self._sort_key)
def _sort_key(self, candidate):
# type: (InstallationCandidate) -> CandidateSortingKey
"""
Function to pass as the `key` argument to a call to sorted() to sort
InstallationCandidates by preference.
Returns a tuple such that tuples sorting as greater using Python's
default comparison operator are more preferred.
The preference is as follows:
First and foremost, candidates with allowed (matching) hashes are
always preferred over candidates without matching hashes. This is
because e.g. if the only candidate with an allowed hash is yanked,
we still want to use that candidate.
Second, excepting hash considerations, candidates that have been
yanked (in the sense of PEP 592) are always less preferred than
candidates that haven't been yanked. Then:
If not finding wheels, they are sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min(self._supported_tags)
3. source archives
If prefer_binary was set, then all wheels are sorted above sources.
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
valid_tags = self._supported_tags
support_num = len(valid_tags)
build_tag = () # type: BuildTag
binary_preference = 0
link = candidate.link
if link.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(link.filename)
if not wheel.supported(valid_tags):
raise UnsupportedWheel(
"{} is not a supported wheel for this platform. It "
"can't be sorted.".format(wheel.filename)
)
if self._prefer_binary:
binary_preference = 1
pri = -(wheel.support_index_min(valid_tags))
if wheel.build_tag is not None:
match = re.match(r'^(\d+)(.*)$', wheel.build_tag)
build_tag_groups = match.groups()
build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
else: # sdist
pri = -(support_num)
has_allowed_hash = int(link.is_hash_allowed(self._hashes))
yank_value = -1 * int(link.is_yanked) # -1 for yanked.
return (
has_allowed_hash, yank_value, binary_preference, candidate.version,
build_tag, pri,
)
def sort_best_candidate(
self,
candidates, # type: List[InstallationCandidate]
):
# type: (...) -> Optional[InstallationCandidate]
"""
Return the best candidate per the instance's sort order, or None if
no candidate is acceptable.
"""
if not candidates:
return None
best_candidate = max(candidates, key=self._sort_key)
return best_candidate
def compute_best_candidate(
self,
candidates, # type: List[InstallationCandidate]
):
# type: (...) -> BestCandidateResult
"""
Compute and return a `BestCandidateResult` instance.
"""
applicable_candidates = self.get_applicable_candidates(candidates)
best_candidate = self.sort_best_candidate(applicable_candidates)
return BestCandidateResult(
candidates,
applicable_candidates=applicable_candidates,
best_candidate=best_candidate,
)
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(
self,
link_collector, # type: LinkCollector
target_python, # type: TargetPython
allow_yanked, # type: bool
format_control=None, # type: Optional[FormatControl]
candidate_prefs=None, # type: CandidatePreferences
ignore_requires_python=None, # type: Optional[bool]
):
# type: (...) -> None
"""
This constructor is primarily meant to be used by the create() class
method and from tests.
:param format_control: A FormatControl object, used to control
the selection of source packages / binary packages when consulting
the index and links.
:param candidate_prefs: Options to use when creating a
CandidateEvaluator object.
"""
if candidate_prefs is None:
candidate_prefs = CandidatePreferences()
format_control = format_control or FormatControl(set(), set())
self._allow_yanked = allow_yanked
self._candidate_prefs = candidate_prefs
self._ignore_requires_python = ignore_requires_python
self._link_collector = link_collector
self._target_python = target_python
self.format_control = format_control
# These are boring links that have already been logged somehow.
self._logged_links = set() # type: Set[Link]
# Don't include an allow_yanked default value to make sure each call
# site considers whether yanked releases are allowed. This also causes
# that decision to be made explicit in the calling code, which helps
# people when reading the code.
@classmethod
def create(
cls,
link_collector, # type: LinkCollector
selection_prefs, # type: SelectionPreferences
target_python=None, # type: Optional[TargetPython]
):
# type: (...) -> PackageFinder
"""Create a PackageFinder.
:param selection_prefs: The candidate selection preferences, as a
SelectionPreferences object.
:param target_python: The target Python interpreter to use when
checking compatibility. If None (the default), a TargetPython
object will be constructed from the running Python.
"""
if target_python is None:
target_python = TargetPython()
candidate_prefs = CandidatePreferences(
prefer_binary=selection_prefs.prefer_binary,
allow_all_prereleases=selection_prefs.allow_all_prereleases,
)
return cls(
candidate_prefs=candidate_prefs,
link_collector=link_collector,
target_python=target_python,
allow_yanked=selection_prefs.allow_yanked,
format_control=selection_prefs.format_control,
ignore_requires_python=selection_prefs.ignore_requires_python,
)
@property
def target_python(self):
# type: () -> TargetPython
return self._target_python
@property
def search_scope(self):
# type: () -> SearchScope
return self._link_collector.search_scope
@search_scope.setter
def search_scope(self, search_scope):
# type: (SearchScope) -> None
self._link_collector.search_scope = search_scope
@property
def find_links(self):
# type: () -> List[str]
return self._link_collector.find_links
@property
def index_urls(self):
# type: () -> List[str]
return self.search_scope.index_urls
@property
def trusted_hosts(self):
# type: () -> Iterable[str]
for host_port in self._link_collector.session.pip_trusted_origins:
yield build_netloc(*host_port)
@property
def allow_all_prereleases(self):
# type: () -> bool
return self._candidate_prefs.allow_all_prereleases
def set_allow_all_prereleases(self):
# type: () -> None
self._candidate_prefs.allow_all_prereleases = True
@property
def prefer_binary(self):
# type: () -> bool
return self._candidate_prefs.prefer_binary
def set_prefer_binary(self):
# type: () -> None
self._candidate_prefs.prefer_binary = True
def make_link_evaluator(self, project_name):
# type: (str) -> LinkEvaluator
canonical_name = canonicalize_name(project_name)
formats = self.format_control.get_allowed_formats(canonical_name)
return LinkEvaluator(
project_name=project_name,
canonical_name=canonical_name,
formats=formats,
target_python=self._target_python,
allow_yanked=self._allow_yanked,
ignore_requires_python=self._ignore_requires_python,
)
def _sort_links(self, links):
# type: (Iterable[Link]) -> List[Link]
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set() # type: Set[Link]
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _log_skipped_link(self, link, reason):
# type: (Link, Text) -> None
if link not in self._logged_links:
# Mark this as a unicode string to prevent "UnicodeEncodeError:
# 'ascii' codec can't encode character" in Python 2 when
# the reason contains non-ascii characters.
# Also, put the link at the end so the reason is more visible
# and because the link string is usually very long.
logger.debug(u'Skipping link: %s: %s', reason, link)
self._logged_links.add(link)
def get_install_candidate(self, link_evaluator, link):
# type: (LinkEvaluator, Link) -> Optional[InstallationCandidate]
"""
If the link is a candidate for install, convert it to an
InstallationCandidate and return it. Otherwise, return None.
"""
is_candidate, result = link_evaluator.evaluate_link(link)
if not is_candidate:
if result:
self._log_skipped_link(link, reason=result)
return None
return InstallationCandidate(
name=link_evaluator.project_name,
link=link,
# Convert the Text result to str since InstallationCandidate
# accepts str.
version=str(result),
)
def evaluate_links(self, link_evaluator, links):
# type: (LinkEvaluator, Iterable[Link]) -> List[InstallationCandidate]
"""
Convert links that are candidates to InstallationCandidate objects.
"""
candidates = []
for link in self._sort_links(links):
candidate = self.get_install_candidate(link_evaluator, link)
if candidate is not None:
candidates.append(candidate)
return candidates
def process_project_url(self, project_url, link_evaluator):
# type: (Link, LinkEvaluator) -> List[InstallationCandidate]
logger.debug(
'Fetching project page and analyzing links: %s', project_url,
)
html_page = self._link_collector.fetch_page(project_url)
if html_page is None:
return []
page_links = list(parse_links(html_page))
with indent_log():
package_links = self.evaluate_links(
link_evaluator,
links=page_links,
)
return package_links
def find_all_candidates(self, project_name):
# type: (str) -> List[InstallationCandidate]
"""Find all available InstallationCandidate for project_name
This checks index_urls and find_links.
All versions found are returned as an InstallationCandidate list.
See LinkEvaluator.evaluate_link() for details on which files
are accepted.
"""
collected_links = self._link_collector.collect_links(project_name)
link_evaluator = self.make_link_evaluator(project_name)
find_links_versions = self.evaluate_links(
link_evaluator,
links=collected_links.find_links,
)
page_versions = []
for project_url in collected_links.project_urls:
package_links = self.process_project_url(
project_url, link_evaluator=link_evaluator,
)
page_versions.extend(package_links)
file_versions = self.evaluate_links(
link_evaluator,
links=collected_links.files,
)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.link.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return file_versions + find_links_versions + page_versions
def make_candidate_evaluator(
self,
project_name, # type: str
specifier=None, # type: Optional[specifiers.BaseSpecifier]
hashes=None, # type: Optional[Hashes]
):
# type: (...) -> CandidateEvaluator
"""Create a CandidateEvaluator object to use.
"""
candidate_prefs = self._candidate_prefs
return CandidateEvaluator.create(
project_name=project_name,
target_python=self._target_python,
prefer_binary=candidate_prefs.prefer_binary,
allow_all_prereleases=candidate_prefs.allow_all_prereleases,
specifier=specifier,
hashes=hashes,
)
def find_best_candidate(
self,
project_name, # type: str
specifier=None, # type: Optional[specifiers.BaseSpecifier]
hashes=None, # type: Optional[Hashes]
):
# type: (...) -> BestCandidateResult
"""Find matches for the given project and specifier.
:param specifier: An optional object implementing `filter`
(e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
versions.
:return: A `BestCandidateResult` instance.
"""
candidates = self.find_all_candidates(project_name)
candidate_evaluator = self.make_candidate_evaluator(
project_name=project_name,
specifier=specifier,
hashes=hashes,
)
return candidate_evaluator.compute_best_candidate(candidates)
def find_requirement(self, req, upgrade):
# type: (InstallRequirement, bool) -> Optional[InstallationCandidate]
"""Try to find a Link matching req
Expects req, an InstallRequirement and upgrade, a boolean
Returns a InstallationCandidate if found,
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
"""
hashes = req.hashes(trust_internet=False)
best_candidate_result = self.find_best_candidate(
req.name, specifier=req.specifier, hashes=hashes,
)
best_candidate = best_candidate_result.best_candidate
installed_version = None # type: Optional[_BaseVersion]
if req.satisfied_by is not None:
installed_version = parse_version(req.satisfied_by.version)
def _format_versions(cand_iter):
# type: (Iterable[InstallationCandidate]) -> str
# This repeated parse_version and str() conversion is needed to
# handle different vendoring sources from pip and pkg_resources.
# If we stop using the pkg_resources provided specifier and start
# using our own, we can drop the cast to str().
return ", ".join(sorted(
{str(c.version) for c in cand_iter},
key=parse_version,
)) or "none"
if installed_version is None and best_candidate is None:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
_format_versions(best_candidate_result.iter_all()),
)
raise DistributionNotFound(
'No matching distribution found for {}'.format(
req)
)
best_installed = False
if installed_version and (
best_candidate is None or
best_candidate.version <= installed_version):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
installed_version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
installed_version,
best_candidate.version,
)
return None
if best_installed:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
installed_version,
_format_versions(best_candidate_result.iter_applicable()),
)
raise BestVersionAlreadyInstalled
logger.debug(
'Using version %s (newest of versions: %s)',
best_candidate.version,
_format_versions(best_candidate_result.iter_applicable()),
)
return best_candidate
def _find_name_version_sep(fragment, canonical_name):
# type: (str, str) -> int
"""Find the separator's index based on the package's canonical name.
:param fragment: A <package>+<version> filename "fragment" (stem) or
egg fragment.
:param canonical_name: The package's canonical name.
This function is needed since the canonicalized name does not necessarily
have the same length as the egg info's name part. An example::
>>> fragment = 'foo__bar-1.0'
>>> canonical_name = 'foo-bar'
>>> _find_name_version_sep(fragment, canonical_name)
8
"""
# Project name and version must be separated by one single dash. Find all
# occurrences of dashes; if the string in front of it matches the canonical
# name, this is the one separating the name and version parts.
for i, c in enumerate(fragment):
if c != "-":
continue
if canonicalize_name(fragment[:i]) == canonical_name:
return i
raise ValueError("{} does not match {}".format(fragment, canonical_name))
def _extract_version_from_fragment(fragment, canonical_name):
# type: (str, str) -> Optional[str]
"""Parse the version string from a <package>+<version> filename
"fragment" (stem) or egg fragment.
:param fragment: The string to parse. E.g. foo-2.1
:param canonical_name: The canonicalized name of the package this
belongs to.
"""
try:
version_start = _find_name_version_sep(fragment, canonical_name) + 1
except ValueError:
return None
version = fragment[version_start:]
if not version:
return None
return version
| Django-locallibrary/env/Lib/site-packages/pip/_internal/index/package_finder.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/index/package_finder.py",
"repo_id": "Django-locallibrary",
"token_count": 15758
} | 8 |
""" PEP 610 """
import json
import re
from pip._vendor import six
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import (
Any, Dict, Iterable, Optional, Type, TypeVar, Union
)
T = TypeVar("T")
DIRECT_URL_METADATA_NAME = "direct_url.json"
ENV_VAR_RE = re.compile(r"^\$\{[A-Za-z0-9-_]+\}(:\$\{[A-Za-z0-9-_]+\})?$")
__all__ = [
"DirectUrl",
"DirectUrlValidationError",
"DirInfo",
"ArchiveInfo",
"VcsInfo",
]
class DirectUrlValidationError(Exception):
pass
def _get(d, expected_type, key, default=None):
# type: (Dict[str, Any], Type[T], str, Optional[T]) -> Optional[T]
"""Get value from dictionary and verify expected type."""
if key not in d:
return default
value = d[key]
if six.PY2 and expected_type is str:
expected_type = six.string_types # type: ignore
if not isinstance(value, expected_type):
raise DirectUrlValidationError(
"{!r} has unexpected type for {} (expected {})".format(
value, key, expected_type
)
)
return value
def _get_required(d, expected_type, key, default=None):
# type: (Dict[str, Any], Type[T], str, Optional[T]) -> T
value = _get(d, expected_type, key, default)
if value is None:
raise DirectUrlValidationError("{} must have a value".format(key))
return value
def _exactly_one_of(infos):
# type: (Iterable[Optional[InfoType]]) -> InfoType
infos = [info for info in infos if info is not None]
if not infos:
raise DirectUrlValidationError(
"missing one of archive_info, dir_info, vcs_info"
)
if len(infos) > 1:
raise DirectUrlValidationError(
"more than one of archive_info, dir_info, vcs_info"
)
assert infos[0] is not None
return infos[0]
def _filter_none(**kwargs):
# type: (Any) -> Dict[str, Any]
"""Make dict excluding None values."""
return {k: v for k, v in kwargs.items() if v is not None}
class VcsInfo(object):
name = "vcs_info"
def __init__(
self,
vcs, # type: str
commit_id, # type: str
requested_revision=None, # type: Optional[str]
resolved_revision=None, # type: Optional[str]
resolved_revision_type=None, # type: Optional[str]
):
self.vcs = vcs
self.requested_revision = requested_revision
self.commit_id = commit_id
self.resolved_revision = resolved_revision
self.resolved_revision_type = resolved_revision_type
@classmethod
def _from_dict(cls, d):
# type: (Optional[Dict[str, Any]]) -> Optional[VcsInfo]
if d is None:
return None
return cls(
vcs=_get_required(d, str, "vcs"),
commit_id=_get_required(d, str, "commit_id"),
requested_revision=_get(d, str, "requested_revision"),
resolved_revision=_get(d, str, "resolved_revision"),
resolved_revision_type=_get(d, str, "resolved_revision_type"),
)
def _to_dict(self):
# type: () -> Dict[str, Any]
return _filter_none(
vcs=self.vcs,
requested_revision=self.requested_revision,
commit_id=self.commit_id,
resolved_revision=self.resolved_revision,
resolved_revision_type=self.resolved_revision_type,
)
class ArchiveInfo(object):
name = "archive_info"
def __init__(
self,
hash=None, # type: Optional[str]
):
self.hash = hash
@classmethod
def _from_dict(cls, d):
# type: (Optional[Dict[str, Any]]) -> Optional[ArchiveInfo]
if d is None:
return None
return cls(hash=_get(d, str, "hash"))
def _to_dict(self):
# type: () -> Dict[str, Any]
return _filter_none(hash=self.hash)
class DirInfo(object):
name = "dir_info"
def __init__(
self,
editable=False, # type: bool
):
self.editable = editable
@classmethod
def _from_dict(cls, d):
# type: (Optional[Dict[str, Any]]) -> Optional[DirInfo]
if d is None:
return None
return cls(
editable=_get_required(d, bool, "editable", default=False)
)
def _to_dict(self):
# type: () -> Dict[str, Any]
return _filter_none(editable=self.editable or None)
if MYPY_CHECK_RUNNING:
InfoType = Union[ArchiveInfo, DirInfo, VcsInfo]
class DirectUrl(object):
def __init__(
self,
url, # type: str
info, # type: InfoType
subdirectory=None, # type: Optional[str]
):
self.url = url
self.info = info
self.subdirectory = subdirectory
def _remove_auth_from_netloc(self, netloc):
# type: (str) -> str
if "@" not in netloc:
return netloc
user_pass, netloc_no_user_pass = netloc.split("@", 1)
if (
isinstance(self.info, VcsInfo) and
self.info.vcs == "git" and
user_pass == "git"
):
return netloc
if ENV_VAR_RE.match(user_pass):
return netloc
return netloc_no_user_pass
@property
def redacted_url(self):
# type: () -> str
"""url with user:password part removed unless it is formed with
environment variables as specified in PEP 610, or it is ``git``
in the case of a git URL.
"""
purl = urllib_parse.urlsplit(self.url)
netloc = self._remove_auth_from_netloc(purl.netloc)
surl = urllib_parse.urlunsplit(
(purl.scheme, netloc, purl.path, purl.query, purl.fragment)
)
return surl
def validate(self):
# type: () -> None
self.from_dict(self.to_dict())
@classmethod
def from_dict(cls, d):
# type: (Dict[str, Any]) -> DirectUrl
return DirectUrl(
url=_get_required(d, str, "url"),
subdirectory=_get(d, str, "subdirectory"),
info=_exactly_one_of(
[
ArchiveInfo._from_dict(_get(d, dict, "archive_info")),
DirInfo._from_dict(_get(d, dict, "dir_info")),
VcsInfo._from_dict(_get(d, dict, "vcs_info")),
]
),
)
def to_dict(self):
# type: () -> Dict[str, Any]
res = _filter_none(
url=self.redacted_url,
subdirectory=self.subdirectory,
)
res[self.info.name] = self.info._to_dict()
return res
@classmethod
def from_json(cls, s):
# type: (str) -> DirectUrl
return cls.from_dict(json.loads(s))
def to_json(self):
# type: () -> str
return json.dumps(self.to_dict(), sort_keys=True)
| Django-locallibrary/env/Lib/site-packages/pip/_internal/models/direct_url.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/models/direct_url.py",
"repo_id": "Django-locallibrary",
"token_count": 3296
} | 9 |
"""Legacy installation process, i.e. `setup.py install`.
"""
import logging
import os
import sys
from distutils.util import change_root
from pip._internal.exceptions import InstallationError
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import ensure_dir
from pip._internal.utils.setuptools_build import make_setuptools_install_args
from pip._internal.utils.subprocess import runner_with_spinner_message
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List, Optional, Sequence
from pip._internal.build_env import BuildEnvironment
from pip._internal.models.scheme import Scheme
logger = logging.getLogger(__name__)
class LegacyInstallFailure(Exception):
def __init__(self):
# type: () -> None
self.parent = sys.exc_info()
def install(
install_options, # type: List[str]
global_options, # type: Sequence[str]
root, # type: Optional[str]
home, # type: Optional[str]
prefix, # type: Optional[str]
use_user_site, # type: bool
pycompile, # type: bool
scheme, # type: Scheme
setup_py_path, # type: str
isolated, # type: bool
req_name, # type: str
build_env, # type: BuildEnvironment
unpacked_source_directory, # type: str
req_description, # type: str
):
# type: (...) -> bool
header_dir = scheme.headers
with TempDirectory(kind="record") as temp_dir:
try:
record_filename = os.path.join(temp_dir.path, 'install-record.txt')
install_args = make_setuptools_install_args(
setup_py_path,
global_options=global_options,
install_options=install_options,
record_filename=record_filename,
root=root,
prefix=prefix,
header_dir=header_dir,
home=home,
use_user_site=use_user_site,
no_user_config=isolated,
pycompile=pycompile,
)
runner = runner_with_spinner_message(
"Running setup.py install for {}".format(req_name)
)
with indent_log(), build_env:
runner(
cmd=install_args,
cwd=unpacked_source_directory,
)
if not os.path.exists(record_filename):
logger.debug('Record file %s not found', record_filename)
# Signal to the caller that we didn't install the new package
return False
except Exception:
# Signal to the caller that we didn't install the new package
raise LegacyInstallFailure
# At this point, we have successfully installed the requirement.
# We intentionally do not use any encoding to read the file because
# setuptools writes the file using distutils.file_util.write_file,
# which does not specify an encoding.
with open(record_filename) as f:
record_lines = f.read().splitlines()
def prepend_root(path):
# type: (str) -> str
if root is None or not os.path.isabs(path):
return path
else:
return change_root(root, path)
for line in record_lines:
directory = os.path.dirname(line)
if directory.endswith('.egg-info'):
egg_info_dir = prepend_root(directory)
break
else:
message = (
"{} did not indicate that it installed an "
".egg-info directory. Only setup.py projects "
"generating .egg-info directories are supported."
).format(req_description)
raise InstallationError(message)
new_lines = []
for line in record_lines:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(
os.path.relpath(prepend_root(filename), egg_info_dir)
)
new_lines.sort()
ensure_dir(egg_info_dir)
inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt')
with open(inst_files_path, 'w') as f:
f.write('\n'.join(new_lines) + '\n')
return True
| Django-locallibrary/env/Lib/site-packages/pip/_internal/operations/install/legacy.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/operations/install/legacy.py",
"repo_id": "Django-locallibrary",
"token_count": 1846
} | 10 |
from __future__ import absolute_import
import contextlib
import errno
import hashlib
import logging
import os
from pip._vendor import contextlib2
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from types import TracebackType
from typing import Dict, Iterator, Optional, Set, Type, Union
from pip._internal.req.req_install import InstallRequirement
from pip._internal.models.link import Link
logger = logging.getLogger(__name__)
@contextlib.contextmanager
def update_env_context_manager(**changes):
# type: (str) -> Iterator[None]
target = os.environ
# Save values from the target and change them.
non_existent_marker = object()
saved_values = {} # type: Dict[str, Union[object, str]]
for name, new_value in changes.items():
try:
saved_values[name] = target[name]
except KeyError:
saved_values[name] = non_existent_marker
target[name] = new_value
try:
yield
finally:
# Restore original values in the target.
for name, original_value in saved_values.items():
if original_value is non_existent_marker:
del target[name]
else:
assert isinstance(original_value, str) # for mypy
target[name] = original_value
@contextlib.contextmanager
def get_requirement_tracker():
# type: () -> Iterator[RequirementTracker]
root = os.environ.get('PIP_REQ_TRACKER')
with contextlib2.ExitStack() as ctx:
if root is None:
root = ctx.enter_context(
TempDirectory(kind='req-tracker')
).path
ctx.enter_context(update_env_context_manager(PIP_REQ_TRACKER=root))
logger.debug("Initialized build tracking at %s", root)
with RequirementTracker(root) as tracker:
yield tracker
class RequirementTracker(object):
def __init__(self, root):
# type: (str) -> None
self._root = root
self._entries = set() # type: Set[InstallRequirement]
logger.debug("Created build tracker: %s", self._root)
def __enter__(self):
# type: () -> RequirementTracker
logger.debug("Entered build tracker: %s", self._root)
return self
def __exit__(
self,
exc_type, # type: Optional[Type[BaseException]]
exc_val, # type: Optional[BaseException]
exc_tb # type: Optional[TracebackType]
):
# type: (...) -> None
self.cleanup()
def _entry_path(self, link):
# type: (Link) -> str
hashed = hashlib.sha224(link.url_without_fragment.encode()).hexdigest()
return os.path.join(self._root, hashed)
def add(self, req):
# type: (InstallRequirement) -> None
"""Add an InstallRequirement to build tracking.
"""
assert req.link
# Get the file to write information about this requirement.
entry_path = self._entry_path(req.link)
# Try reading from the file. If it exists and can be read from, a build
# is already in progress, so a LookupError is raised.
try:
with open(entry_path) as fp:
contents = fp.read()
except IOError as e:
# if the error is anything other than "file does not exist", raise.
if e.errno != errno.ENOENT:
raise
else:
message = '{} is already being built: {}'.format(
req.link, contents)
raise LookupError(message)
# If we're here, req should really not be building already.
assert req not in self._entries
# Start tracking this requirement.
with open(entry_path, 'w') as fp:
fp.write(str(req))
self._entries.add(req)
logger.debug('Added %s to build tracker %r', req, self._root)
def remove(self, req):
# type: (InstallRequirement) -> None
"""Remove an InstallRequirement from build tracking.
"""
assert req.link
# Delete the created file and the corresponding entries.
os.unlink(self._entry_path(req.link))
self._entries.remove(req)
logger.debug('Removed %s from build tracker %r', req, self._root)
def cleanup(self):
# type: () -> None
for req in set(self._entries):
self.remove(req)
logger.debug("Removed build tracker: %r", self._root)
@contextlib.contextmanager
def track(self, req):
# type: (InstallRequirement) -> Iterator[None]
self.add(req)
yield
self.remove(req)
| Django-locallibrary/env/Lib/site-packages/pip/_internal/req/req_tracker.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/req/req_tracker.py",
"repo_id": "Django-locallibrary",
"token_count": 1972
} | 11 |
import errno
import fnmatch
import os
import os.path
import random
import shutil
import stat
import sys
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
# NOTE: retrying is not annotated in typeshed as on 2017-07-17, which is
# why we ignore the type on this import.
from pip._vendor.retrying import retry # type: ignore
from pip._vendor.six import PY2
from pip._internal.utils.compat import get_path_uid
from pip._internal.utils.misc import format_size
from pip._internal.utils.typing import MYPY_CHECK_RUNNING, cast
if MYPY_CHECK_RUNNING:
from typing import Any, BinaryIO, Iterator, List, Union
class NamedTemporaryFileResult(BinaryIO):
@property
def file(self):
# type: () -> BinaryIO
pass
def check_path_owner(path):
# type: (str) -> bool
# If we don't have a way to check the effective uid of this process, then
# we'll just assume that we own the directory.
if sys.platform == "win32" or not hasattr(os, "geteuid"):
return True
assert os.path.isabs(path)
previous = None
while path != previous:
if os.path.lexists(path):
# Check if path is writable by current user.
if os.geteuid() == 0:
# Special handling for root user in order to handle properly
# cases where users use sudo without -H flag.
try:
path_uid = get_path_uid(path)
except OSError:
return False
return path_uid == 0
else:
return os.access(path, os.W_OK)
else:
previous, path = path, os.path.dirname(path)
return False # assume we don't own the path
def copy2_fixed(src, dest):
# type: (str, str) -> None
"""Wrap shutil.copy2() but map errors copying socket files to
SpecialFileError as expected.
See also https://bugs.python.org/issue37700.
"""
try:
shutil.copy2(src, dest)
except (OSError, IOError):
for f in [src, dest]:
try:
is_socket_file = is_socket(f)
except OSError:
# An error has already occurred. Another error here is not
# a problem and we can ignore it.
pass
else:
if is_socket_file:
raise shutil.SpecialFileError(
"`{f}` is a socket".format(**locals()))
raise
def is_socket(path):
# type: (str) -> bool
return stat.S_ISSOCK(os.lstat(path).st_mode)
@contextmanager
def adjacent_tmp_file(path, **kwargs):
# type: (str, **Any) -> Iterator[NamedTemporaryFileResult]
"""Return a file-like object pointing to a tmp file next to path.
The file is created securely and is ensured to be written to disk
after the context reaches its end.
kwargs will be passed to tempfile.NamedTemporaryFile to control
the way the temporary file will be opened.
"""
with NamedTemporaryFile(
delete=False,
dir=os.path.dirname(path),
prefix=os.path.basename(path),
suffix='.tmp',
**kwargs
) as f:
result = cast('NamedTemporaryFileResult', f)
try:
yield result
finally:
result.file.flush()
os.fsync(result.file.fileno())
_replace_retry = retry(stop_max_delay=1000, wait_fixed=250)
if PY2:
@_replace_retry
def replace(src, dest):
# type: (str, str) -> None
try:
os.rename(src, dest)
except OSError:
os.remove(dest)
os.rename(src, dest)
else:
replace = _replace_retry(os.replace)
# test_writable_dir and _test_writable_dir_win are copied from Flit,
# with the author's agreement to also place them under pip's license.
def test_writable_dir(path):
# type: (str) -> bool
"""Check if a directory is writable.
Uses os.access() on POSIX, tries creating files on Windows.
"""
# If the directory doesn't exist, find the closest parent that does.
while not os.path.isdir(path):
parent = os.path.dirname(path)
if parent == path:
break # Should never get here, but infinite loops are bad
path = parent
if os.name == 'posix':
return os.access(path, os.W_OK)
return _test_writable_dir_win(path)
def _test_writable_dir_win(path):
# type: (str) -> bool
# os.access doesn't work on Windows: http://bugs.python.org/issue2528
# and we can't use tempfile: http://bugs.python.org/issue22107
basename = 'accesstest_deleteme_fishfingers_custard_'
alphabet = 'abcdefghijklmnopqrstuvwxyz0123456789'
for _ in range(10):
name = basename + ''.join(random.choice(alphabet) for _ in range(6))
file = os.path.join(path, name)
try:
fd = os.open(file, os.O_RDWR | os.O_CREAT | os.O_EXCL)
# Python 2 doesn't support FileExistsError and PermissionError.
except OSError as e:
# exception FileExistsError
if e.errno == errno.EEXIST:
continue
# exception PermissionError
if e.errno == errno.EPERM or e.errno == errno.EACCES:
# This could be because there's a directory with the same name.
# But it's highly unlikely there's a directory called that,
# so we'll assume it's because the parent dir is not writable.
# This could as well be because the parent dir is not readable,
# due to non-privileged user access.
return False
raise
else:
os.close(fd)
os.unlink(file)
return True
# This should never be reached
raise EnvironmentError(
'Unexpected condition testing for writable directory'
)
def find_files(path, pattern):
# type: (str, str) -> List[str]
"""Returns a list of absolute paths of files beneath path, recursively,
with filenames which match the UNIX-style shell glob pattern."""
result = [] # type: List[str]
for root, _, files in os.walk(path):
matches = fnmatch.filter(files, pattern)
result.extend(os.path.join(root, f) for f in matches)
return result
def file_size(path):
# type: (str) -> Union[int, float]
# If it's a symlink, return 0.
if os.path.islink(path):
return 0
return os.path.getsize(path)
def format_file_size(path):
# type: (str) -> str
return format_size(file_size(path))
def directory_size(path):
# type: (str) -> Union[int, float]
size = 0.0
for root, _dirs, files in os.walk(path):
for filename in files:
file_path = os.path.join(root, filename)
size += file_size(file_path)
return size
def format_directory_size(path):
# type: (str) -> str
return format_size(directory_size(path))
| Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/filesystem.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/filesystem.py",
"repo_id": "Django-locallibrary",
"token_count": 2997
} | 12 |
import os
import sys
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, Text, Union
def get_url_scheme(url):
# type: (Union[str, Text]) -> Optional[Text]
if ':' not in url:
return None
return url.split(':', 1)[0].lower()
def path_to_url(path):
# type: (Union[str, Text]) -> str
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path))
return url
def url_to_path(url):
# type: (str) -> str
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not {url!r})"
.format(**locals()))
_, netloc, path, _, _ = urllib_parse.urlsplit(url)
if not netloc or netloc == 'localhost':
# According to RFC 8089, same as empty authority.
netloc = ''
elif sys.platform == 'win32':
# If we have a UNC path, prepend UNC share notation.
netloc = '\\\\' + netloc
else:
raise ValueError(
'non-local file URIs are not supported on this platform: {url!r}'
.format(**locals())
)
path = urllib_request.url2pathname(netloc + path)
return path
| Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/urls.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/urls.py",
"repo_id": "Django-locallibrary",
"token_count": 633
} | 13 |
"""
pip._vendor is for vendoring dependencies of pip to prevent needing pip to
depend on something external.
Files inside of pip._vendor should be considered immutable and should only be
updated to versions from upstream.
"""
from __future__ import absolute_import
import glob
import os.path
import sys
# Downstream redistributors which have debundled our dependencies should also
# patch this value to be true. This will trigger the additional patching
# to cause things like "six" to be available as pip.
DEBUNDLED = False
# By default, look in this directory for a bunch of .whl files which we will
# add to the beginning of sys.path before attempting to import anything. This
# is done to support downstream re-distributors like Debian and Fedora who
# wish to create their own Wheels for our dependencies to aid in debundling.
WHEEL_DIR = os.path.abspath(os.path.dirname(__file__))
# Define a small helper function to alias our vendored modules to the real ones
# if the vendored ones do not exist. This idea of this was taken from
# https://github.com/kennethreitz/requests/pull/2567.
def vendored(modulename):
vendored_name = "{0}.{1}".format(__name__, modulename)
try:
__import__(modulename, globals(), locals(), level=0)
except ImportError:
# This error used to be silenced in earlier variants of this file, to instead
# raise the error when pip actually tries to use the missing module.
# Based on inputs in #5354, this was changed to explicitly raise the error.
# Re-raising the exception without modifying it is an intentional choice.
raise
else:
sys.modules[vendored_name] = sys.modules[modulename]
base, head = vendored_name.rsplit(".", 1)
setattr(sys.modules[base], head, sys.modules[modulename])
# If we're operating in a debundled setup, then we want to go ahead and trigger
# the aliasing of our vendored libraries as well as looking for wheels to add
# to our sys.path. This will cause all of this code to be a no-op typically
# however downstream redistributors can enable it in a consistent way across
# all platforms.
if DEBUNDLED:
# Actually look inside of WHEEL_DIR to find .whl files and add them to the
# front of our sys.path.
sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path
# Actually alias all of our vendored dependencies.
vendored("appdirs")
vendored("cachecontrol")
vendored("certifi")
vendored("colorama")
vendored("contextlib2")
vendored("distlib")
vendored("distro")
vendored("html5lib")
vendored("six")
vendored("six.moves")
vendored("six.moves.urllib")
vendored("six.moves.urllib.parse")
vendored("packaging")
vendored("packaging.version")
vendored("packaging.specifiers")
vendored("pep517")
vendored("pkg_resources")
vendored("progress")
vendored("retrying")
vendored("requests")
vendored("requests.exceptions")
vendored("requests.packages")
vendored("requests.packages.urllib3")
vendored("requests.packages.urllib3._collections")
vendored("requests.packages.urllib3.connection")
vendored("requests.packages.urllib3.connectionpool")
vendored("requests.packages.urllib3.contrib")
vendored("requests.packages.urllib3.contrib.ntlmpool")
vendored("requests.packages.urllib3.contrib.pyopenssl")
vendored("requests.packages.urllib3.exceptions")
vendored("requests.packages.urllib3.fields")
vendored("requests.packages.urllib3.filepost")
vendored("requests.packages.urllib3.packages")
vendored("requests.packages.urllib3.packages.ordered_dict")
vendored("requests.packages.urllib3.packages.six")
vendored("requests.packages.urllib3.packages.ssl_match_hostname")
vendored("requests.packages.urllib3.packages.ssl_match_hostname."
"_implementation")
vendored("requests.packages.urllib3.poolmanager")
vendored("requests.packages.urllib3.request")
vendored("requests.packages.urllib3.response")
vendored("requests.packages.urllib3.util")
vendored("requests.packages.urllib3.util.connection")
vendored("requests.packages.urllib3.util.request")
vendored("requests.packages.urllib3.util.response")
vendored("requests.packages.urllib3.util.retry")
vendored("requests.packages.urllib3.util.ssl_")
vendored("requests.packages.urllib3.util.timeout")
vendored("requests.packages.urllib3.util.url")
vendored("resolvelib")
vendored("toml")
vendored("toml.encoder")
vendored("toml.decoder")
vendored("urllib3")
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/__init__.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 1598
} | 14 |
from io import BytesIO
class CallbackFileWrapper(object):
"""
Small wrapper around a fp object which will tee everything read into a
buffer, and when that file is closed it will execute a callback with the
contents of that buffer.
All attributes are proxied to the underlying file object.
This class uses members with a double underscore (__) leading prefix so as
not to accidentally shadow an attribute.
"""
def __init__(self, fp, callback):
self.__buf = BytesIO()
self.__fp = fp
self.__callback = callback
def __getattr__(self, name):
# The vaguaries of garbage collection means that self.__fp is
# not always set. By using __getattribute__ and the private
# name[0] allows looking up the attribute value and raising an
# AttributeError when it doesn't exist. This stop thigns from
# infinitely recursing calls to getattr in the case where
# self.__fp hasn't been set.
#
# [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers
fp = self.__getattribute__("_CallbackFileWrapper__fp")
return getattr(fp, name)
def __is_fp_closed(self):
try:
return self.__fp.fp is None
except AttributeError:
pass
try:
return self.__fp.closed
except AttributeError:
pass
# We just don't cache it then.
# TODO: Add some logging here...
return False
def _close(self):
if self.__callback:
self.__callback(self.__buf.getvalue())
# We assign this to None here, because otherwise we can get into
# really tricky problems where the CPython interpreter dead locks
# because the callback is holding a reference to something which
# has a __del__ method. Setting this to None breaks the cycle
# and allows the garbage collector to do it's thing normally.
self.__callback = None
def read(self, amt=None):
data = self.__fp.read(amt)
self.__buf.write(data)
if self.__is_fp_closed():
self._close()
return data
def _safe_read(self, amt):
data = self.__fp._safe_read(amt)
if amt == 2 and data == b"\r\n":
# urllib executes this read to toss the CRLF at the end
# of the chunk.
return data
self.__buf.write(data)
if self.__is_fp_closed():
self._close()
return data
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py",
"repo_id": "Django-locallibrary",
"token_count": 1022
} | 15 |
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .enums import LanguageFilter, ProbingState, MachineState
from .escsm import (HZ_SM_MODEL, ISO2022CN_SM_MODEL, ISO2022JP_SM_MODEL,
ISO2022KR_SM_MODEL)
class EscCharSetProber(CharSetProber):
"""
This CharSetProber uses a "code scheme" approach for detecting encodings,
whereby easily recognizable escape or shift sequences are relied on to
identify these encodings.
"""
def __init__(self, lang_filter=None):
super(EscCharSetProber, self).__init__(lang_filter=lang_filter)
self.coding_sm = []
if self.lang_filter & LanguageFilter.CHINESE_SIMPLIFIED:
self.coding_sm.append(CodingStateMachine(HZ_SM_MODEL))
self.coding_sm.append(CodingStateMachine(ISO2022CN_SM_MODEL))
if self.lang_filter & LanguageFilter.JAPANESE:
self.coding_sm.append(CodingStateMachine(ISO2022JP_SM_MODEL))
if self.lang_filter & LanguageFilter.KOREAN:
self.coding_sm.append(CodingStateMachine(ISO2022KR_SM_MODEL))
self.active_sm_count = None
self._detected_charset = None
self._detected_language = None
self._state = None
self.reset()
def reset(self):
super(EscCharSetProber, self).reset()
for coding_sm in self.coding_sm:
if not coding_sm:
continue
coding_sm.active = True
coding_sm.reset()
self.active_sm_count = len(self.coding_sm)
self._detected_charset = None
self._detected_language = None
@property
def charset_name(self):
return self._detected_charset
@property
def language(self):
return self._detected_language
def get_confidence(self):
if self._detected_charset:
return 0.99
else:
return 0.00
def feed(self, byte_str):
for c in byte_str:
for coding_sm in self.coding_sm:
if not coding_sm or not coding_sm.active:
continue
coding_state = coding_sm.next_state(c)
if coding_state == MachineState.ERROR:
coding_sm.active = False
self.active_sm_count -= 1
if self.active_sm_count <= 0:
self._state = ProbingState.NOT_ME
return self.state
elif coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT
self._detected_charset = coding_sm.get_coding_state_machine()
self._detected_language = coding_sm.language
return self.state
return self.state
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/escprober.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/escprober.py",
"repo_id": "Django-locallibrary",
"token_count": 1612
} | 16 |
[posix_prefix]
# Configuration directories. Some of these come straight out of the
# configure script. They are for implementing the other variables, not to
# be used directly in [resource_locations].
confdir = /etc
datadir = /usr/share
libdir = /usr/lib
statedir = /var
# User resource directory
local = ~/.local/{distribution.name}
stdlib = {base}/lib/python{py_version_short}
platstdlib = {platbase}/lib/python{py_version_short}
purelib = {base}/lib/python{py_version_short}/site-packages
platlib = {platbase}/lib/python{py_version_short}/site-packages
include = {base}/include/python{py_version_short}{abiflags}
platinclude = {platbase}/include/python{py_version_short}{abiflags}
data = {base}
[posix_home]
stdlib = {base}/lib/python
platstdlib = {base}/lib/python
purelib = {base}/lib/python
platlib = {base}/lib/python
include = {base}/include/python
platinclude = {base}/include/python
scripts = {base}/bin
data = {base}
[nt]
stdlib = {base}/Lib
platstdlib = {base}/Lib
purelib = {base}/Lib/site-packages
platlib = {base}/Lib/site-packages
include = {base}/Include
platinclude = {base}/Include
scripts = {base}/Scripts
data = {base}
[os2]
stdlib = {base}/Lib
platstdlib = {base}/Lib
purelib = {base}/Lib/site-packages
platlib = {base}/Lib/site-packages
include = {base}/Include
platinclude = {base}/Include
scripts = {base}/Scripts
data = {base}
[os2_home]
stdlib = {userbase}/lib/python{py_version_short}
platstdlib = {userbase}/lib/python{py_version_short}
purelib = {userbase}/lib/python{py_version_short}/site-packages
platlib = {userbase}/lib/python{py_version_short}/site-packages
include = {userbase}/include/python{py_version_short}
scripts = {userbase}/bin
data = {userbase}
[nt_user]
stdlib = {userbase}/Python{py_version_nodot}
platstdlib = {userbase}/Python{py_version_nodot}
purelib = {userbase}/Python{py_version_nodot}/site-packages
platlib = {userbase}/Python{py_version_nodot}/site-packages
include = {userbase}/Python{py_version_nodot}/Include
scripts = {userbase}/Scripts
data = {userbase}
[posix_user]
stdlib = {userbase}/lib/python{py_version_short}
platstdlib = {userbase}/lib/python{py_version_short}
purelib = {userbase}/lib/python{py_version_short}/site-packages
platlib = {userbase}/lib/python{py_version_short}/site-packages
include = {userbase}/include/python{py_version_short}
scripts = {userbase}/bin
data = {userbase}
[osx_framework_user]
stdlib = {userbase}/lib/python
platstdlib = {userbase}/lib/python
purelib = {userbase}/lib/python/site-packages
platlib = {userbase}/lib/python/site-packages
include = {userbase}/include
scripts = {userbase}/bin
data = {userbase}
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg",
"repo_id": "Django-locallibrary",
"token_count": 987
} | 17 |
from __future__ import absolute_import, division, unicode_literals
from .py import Trie
__all__ = ["Trie"]
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/_trie/__init__.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/_trie/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 35
} | 18 |
"""A collection of modules for building different kinds of trees from HTML
documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1. A set of classes for various types of elements: Document, Doctype, Comment,
Element. These must implement the interface of ``base.treebuilders.Node``
(although comment nodes have a different signature for their constructor,
see ``treebuilders.etree.Comment``) Textual content may also be implemented
as another node type, or not, as your tree implementation requires.
2. A treebuilder object (called ``TreeBuilder`` by convention) that inherits
from ``treebuilders.base.TreeBuilder``. This has 4 required attributes:
* ``documentClass`` - the class to use for the bottommost node of a document
* ``elementClass`` - the class to use for HTML Elements
* ``commentClass`` - the class to use for comments
* ``doctypeClass`` - the class to use for doctypes
It also has one required method:
* ``getDocument`` - Returns the root node of the complete document tree
3. If you wish to run the unit tests, you must also create a ``testSerializer``
method on your treebuilder which accepts a node and returns a string
containing Node and its children serialized according to the format used in
the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from .._utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of trees with built-in support
:arg treeType: the name of the tree type required (case-insensitive). Supported
values are:
* "dom" - A generic builder for DOM implementations, defaulting to a
xml.dom.minidom based implementation.
* "etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to xml.etree.cElementTree if
available and xml.etree.ElementTree if not.
* "lxml" - A etree-based builder for lxml.etree, handling limitations
of lxml's implementation.
:arg implementation: (Currently applies to the "etree" and "dom" tree
types). A module implementing the tree type e.g. xml.etree.ElementTree
or xml.etree.cElementTree.
:arg kwargs: Any additional options to pass to the TreeBuilder when
creating it.
Example:
>>> from html5lib.treebuilders import getTreeBuilder
>>> builder = getTreeBuilder('etree')
"""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 1193
} | 19 |
from .core import *
from .codec import *
def ToASCII(label):
return encode(label)
def ToUnicode(label):
return decode(label)
def nameprep(s):
raise NotImplementedError("IDNA 2008 does not utilise nameprep protocol")
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/idna/compat.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/idna/compat.py",
"repo_id": "Django-locallibrary",
"token_count": 80
} | 20 |
"""Fallback pure Python implementation of msgpack"""
from datetime import datetime as _DateTime
import sys
import struct
PY2 = sys.version_info[0] == 2
if PY2:
int_types = (int, long)
def dict_iteritems(d):
return d.iteritems()
else:
int_types = int
unicode = str
xrange = range
def dict_iteritems(d):
return d.items()
if sys.version_info < (3, 5):
# Ugly hack...
RecursionError = RuntimeError
def _is_recursionerror(e):
return (
len(e.args) == 1
and isinstance(e.args[0], str)
and e.args[0].startswith("maximum recursion depth exceeded")
)
else:
def _is_recursionerror(e):
return True
if hasattr(sys, "pypy_version_info"):
# StringIO is slow on PyPy, StringIO is faster. However: PyPy's own
# StringBuilder is fastest.
from __pypy__ import newlist_hint
try:
from __pypy__.builders import BytesBuilder as StringBuilder
except ImportError:
from __pypy__.builders import StringBuilder
USING_STRINGBUILDER = True
class StringIO(object):
def __init__(self, s=b""):
if s:
self.builder = StringBuilder(len(s))
self.builder.append(s)
else:
self.builder = StringBuilder()
def write(self, s):
if isinstance(s, memoryview):
s = s.tobytes()
elif isinstance(s, bytearray):
s = bytes(s)
self.builder.append(s)
def getvalue(self):
return self.builder.build()
else:
USING_STRINGBUILDER = False
from io import BytesIO as StringIO
newlist_hint = lambda size: []
from .exceptions import BufferFull, OutOfData, ExtraData, FormatError, StackError
from .ext import ExtType, Timestamp
EX_SKIP = 0
EX_CONSTRUCT = 1
EX_READ_ARRAY_HEADER = 2
EX_READ_MAP_HEADER = 3
TYPE_IMMEDIATE = 0
TYPE_ARRAY = 1
TYPE_MAP = 2
TYPE_RAW = 3
TYPE_BIN = 4
TYPE_EXT = 5
DEFAULT_RECURSE_LIMIT = 511
def _check_type_strict(obj, t, type=type, tuple=tuple):
if type(t) is tuple:
return type(obj) in t
else:
return type(obj) is t
def _get_data_from_buffer(obj):
view = memoryview(obj)
if view.itemsize != 1:
raise ValueError("cannot unpack from multi-byte object")
return view
def unpackb(packed, **kwargs):
"""
Unpack an object from `packed`.
Raises ``ExtraData`` when *packed* contains extra bytes.
Raises ``ValueError`` when *packed* is incomplete.
Raises ``FormatError`` when *packed* is not valid msgpack.
Raises ``StackError`` when *packed* contains too nested.
Other exceptions can be raised during unpacking.
See :class:`Unpacker` for options.
"""
unpacker = Unpacker(None, max_buffer_size=len(packed), **kwargs)
unpacker.feed(packed)
try:
ret = unpacker._unpack()
except OutOfData:
raise ValueError("Unpack failed: incomplete input")
except RecursionError as e:
if _is_recursionerror(e):
raise StackError
raise
if unpacker._got_extradata():
raise ExtraData(ret, unpacker._get_extradata())
return ret
if sys.version_info < (2, 7, 6):
def _unpack_from(f, b, o=0):
"""Explicit type cast for legacy struct.unpack_from"""
return struct.unpack_from(f, bytes(b), o)
else:
_unpack_from = struct.unpack_from
class Unpacker(object):
"""Streaming unpacker.
Arguments:
:param file_like:
File-like object having `.read(n)` method.
If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable.
:param int read_size:
Used as `file_like.read(read_size)`. (default: `min(16*1024, max_buffer_size)`)
:param bool use_list:
If true, unpack msgpack array to Python list.
Otherwise, unpack to Python tuple. (default: True)
:param bool raw:
If true, unpack msgpack raw to Python bytes.
Otherwise, unpack to Python str by decoding with UTF-8 encoding (default).
:param int timestamp:
Control how timestamp type is unpacked:
0 - Timestamp
1 - float (Seconds from the EPOCH)
2 - int (Nanoseconds from the EPOCH)
3 - datetime.datetime (UTC). Python 2 is not supported.
:param bool strict_map_key:
If true (default), only str or bytes are accepted for map (dict) keys.
:param callable object_hook:
When specified, it should be callable.
Unpacker calls it with a dict argument after unpacking msgpack map.
(See also simplejson)
:param callable object_pairs_hook:
When specified, it should be callable.
Unpacker calls it with a list of key-value pairs after unpacking msgpack map.
(See also simplejson)
:param str unicode_errors:
The error handler for decoding unicode. (default: 'strict')
This option should be used only when you have msgpack data which
contains invalid UTF-8 string.
:param int max_buffer_size:
Limits size of data waiting unpacked. 0 means 2**32-1.
The default value is 100*1024*1024 (100MiB).
Raises `BufferFull` exception when it is insufficient.
You should set this parameter when unpacking data from untrusted source.
:param int max_str_len:
Deprecated, use *max_buffer_size* instead.
Limits max length of str. (default: max_buffer_size)
:param int max_bin_len:
Deprecated, use *max_buffer_size* instead.
Limits max length of bin. (default: max_buffer_size)
:param int max_array_len:
Limits max length of array.
(default: max_buffer_size)
:param int max_map_len:
Limits max length of map.
(default: max_buffer_size//2)
:param int max_ext_len:
Deprecated, use *max_buffer_size* instead.
Limits max size of ext type. (default: max_buffer_size)
Example of streaming deserialize from file-like object::
unpacker = Unpacker(file_like)
for o in unpacker:
process(o)
Example of streaming deserialize from socket::
unpacker = Unpacker(max_buffer_size)
while True:
buf = sock.recv(1024**2)
if not buf:
break
unpacker.feed(buf)
for o in unpacker:
process(o)
Raises ``ExtraData`` when *packed* contains extra bytes.
Raises ``OutOfData`` when *packed* is incomplete.
Raises ``FormatError`` when *packed* is not valid msgpack.
Raises ``StackError`` when *packed* contains too nested.
Other exceptions can be raised during unpacking.
"""
def __init__(
self,
file_like=None,
read_size=0,
use_list=True,
raw=False,
timestamp=0,
strict_map_key=True,
object_hook=None,
object_pairs_hook=None,
list_hook=None,
unicode_errors=None,
max_buffer_size=100 * 1024 * 1024,
ext_hook=ExtType,
max_str_len=-1,
max_bin_len=-1,
max_array_len=-1,
max_map_len=-1,
max_ext_len=-1,
):
if unicode_errors is None:
unicode_errors = "strict"
if file_like is None:
self._feeding = True
else:
if not callable(file_like.read):
raise TypeError("`file_like.read` must be callable")
self.file_like = file_like
self._feeding = False
#: array of bytes fed.
self._buffer = bytearray()
#: Which position we currently reads
self._buff_i = 0
# When Unpacker is used as an iterable, between the calls to next(),
# the buffer is not "consumed" completely, for efficiency sake.
# Instead, it is done sloppily. To make sure we raise BufferFull at
# the correct moments, we have to keep track of how sloppy we were.
# Furthermore, when the buffer is incomplete (that is: in the case
# we raise an OutOfData) we need to rollback the buffer to the correct
# state, which _buf_checkpoint records.
self._buf_checkpoint = 0
if not max_buffer_size:
max_buffer_size = 2 ** 31 - 1
if max_str_len == -1:
max_str_len = max_buffer_size
if max_bin_len == -1:
max_bin_len = max_buffer_size
if max_array_len == -1:
max_array_len = max_buffer_size
if max_map_len == -1:
max_map_len = max_buffer_size // 2
if max_ext_len == -1:
max_ext_len = max_buffer_size
self._max_buffer_size = max_buffer_size
if read_size > self._max_buffer_size:
raise ValueError("read_size must be smaller than max_buffer_size")
self._read_size = read_size or min(self._max_buffer_size, 16 * 1024)
self._raw = bool(raw)
self._strict_map_key = bool(strict_map_key)
self._unicode_errors = unicode_errors
self._use_list = use_list
if not (0 <= timestamp <= 3):
raise ValueError("timestamp must be 0..3")
self._timestamp = timestamp
self._list_hook = list_hook
self._object_hook = object_hook
self._object_pairs_hook = object_pairs_hook
self._ext_hook = ext_hook
self._max_str_len = max_str_len
self._max_bin_len = max_bin_len
self._max_array_len = max_array_len
self._max_map_len = max_map_len
self._max_ext_len = max_ext_len
self._stream_offset = 0
if list_hook is not None and not callable(list_hook):
raise TypeError("`list_hook` is not callable")
if object_hook is not None and not callable(object_hook):
raise TypeError("`object_hook` is not callable")
if object_pairs_hook is not None and not callable(object_pairs_hook):
raise TypeError("`object_pairs_hook` is not callable")
if object_hook is not None and object_pairs_hook is not None:
raise TypeError(
"object_pairs_hook and object_hook are mutually " "exclusive"
)
if not callable(ext_hook):
raise TypeError("`ext_hook` is not callable")
def feed(self, next_bytes):
assert self._feeding
view = _get_data_from_buffer(next_bytes)
if len(self._buffer) - self._buff_i + len(view) > self._max_buffer_size:
raise BufferFull
# Strip buffer before checkpoint before reading file.
if self._buf_checkpoint > 0:
del self._buffer[: self._buf_checkpoint]
self._buff_i -= self._buf_checkpoint
self._buf_checkpoint = 0
# Use extend here: INPLACE_ADD += doesn't reliably typecast memoryview in jython
self._buffer.extend(view)
def _consume(self):
""" Gets rid of the used parts of the buffer. """
self._stream_offset += self._buff_i - self._buf_checkpoint
self._buf_checkpoint = self._buff_i
def _got_extradata(self):
return self._buff_i < len(self._buffer)
def _get_extradata(self):
return self._buffer[self._buff_i :]
def read_bytes(self, n):
ret = self._read(n)
self._consume()
return ret
def _read(self, n):
# (int) -> bytearray
self._reserve(n)
i = self._buff_i
self._buff_i = i + n
return self._buffer[i : i + n]
def _reserve(self, n):
remain_bytes = len(self._buffer) - self._buff_i - n
# Fast path: buffer has n bytes already
if remain_bytes >= 0:
return
if self._feeding:
self._buff_i = self._buf_checkpoint
raise OutOfData
# Strip buffer before checkpoint before reading file.
if self._buf_checkpoint > 0:
del self._buffer[: self._buf_checkpoint]
self._buff_i -= self._buf_checkpoint
self._buf_checkpoint = 0
# Read from file
remain_bytes = -remain_bytes
while remain_bytes > 0:
to_read_bytes = max(self._read_size, remain_bytes)
read_data = self.file_like.read(to_read_bytes)
if not read_data:
break
assert isinstance(read_data, bytes)
self._buffer += read_data
remain_bytes -= len(read_data)
if len(self._buffer) < n + self._buff_i:
self._buff_i = 0 # rollback
raise OutOfData
def _read_header(self, execute=EX_CONSTRUCT):
typ = TYPE_IMMEDIATE
n = 0
obj = None
self._reserve(1)
b = self._buffer[self._buff_i]
self._buff_i += 1
if b & 0b10000000 == 0:
obj = b
elif b & 0b11100000 == 0b11100000:
obj = -1 - (b ^ 0xFF)
elif b & 0b11100000 == 0b10100000:
n = b & 0b00011111
typ = TYPE_RAW
if n > self._max_str_len:
raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len)
obj = self._read(n)
elif b & 0b11110000 == 0b10010000:
n = b & 0b00001111
typ = TYPE_ARRAY
if n > self._max_array_len:
raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len)
elif b & 0b11110000 == 0b10000000:
n = b & 0b00001111
typ = TYPE_MAP
if n > self._max_map_len:
raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len)
elif b == 0xC0:
obj = None
elif b == 0xC2:
obj = False
elif b == 0xC3:
obj = True
elif b == 0xC4:
typ = TYPE_BIN
self._reserve(1)
n = self._buffer[self._buff_i]
self._buff_i += 1
if n > self._max_bin_len:
raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len))
obj = self._read(n)
elif b == 0xC5:
typ = TYPE_BIN
self._reserve(2)
n = _unpack_from(">H", self._buffer, self._buff_i)[0]
self._buff_i += 2
if n > self._max_bin_len:
raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len))
obj = self._read(n)
elif b == 0xC6:
typ = TYPE_BIN
self._reserve(4)
n = _unpack_from(">I", self._buffer, self._buff_i)[0]
self._buff_i += 4
if n > self._max_bin_len:
raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len))
obj = self._read(n)
elif b == 0xC7: # ext 8
typ = TYPE_EXT
self._reserve(2)
L, n = _unpack_from("Bb", self._buffer, self._buff_i)
self._buff_i += 2
if L > self._max_ext_len:
raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len))
obj = self._read(L)
elif b == 0xC8: # ext 16
typ = TYPE_EXT
self._reserve(3)
L, n = _unpack_from(">Hb", self._buffer, self._buff_i)
self._buff_i += 3
if L > self._max_ext_len:
raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len))
obj = self._read(L)
elif b == 0xC9: # ext 32
typ = TYPE_EXT
self._reserve(5)
L, n = _unpack_from(">Ib", self._buffer, self._buff_i)
self._buff_i += 5
if L > self._max_ext_len:
raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len))
obj = self._read(L)
elif b == 0xCA:
self._reserve(4)
obj = _unpack_from(">f", self._buffer, self._buff_i)[0]
self._buff_i += 4
elif b == 0xCB:
self._reserve(8)
obj = _unpack_from(">d", self._buffer, self._buff_i)[0]
self._buff_i += 8
elif b == 0xCC:
self._reserve(1)
obj = self._buffer[self._buff_i]
self._buff_i += 1
elif b == 0xCD:
self._reserve(2)
obj = _unpack_from(">H", self._buffer, self._buff_i)[0]
self._buff_i += 2
elif b == 0xCE:
self._reserve(4)
obj = _unpack_from(">I", self._buffer, self._buff_i)[0]
self._buff_i += 4
elif b == 0xCF:
self._reserve(8)
obj = _unpack_from(">Q", self._buffer, self._buff_i)[0]
self._buff_i += 8
elif b == 0xD0:
self._reserve(1)
obj = _unpack_from("b", self._buffer, self._buff_i)[0]
self._buff_i += 1
elif b == 0xD1:
self._reserve(2)
obj = _unpack_from(">h", self._buffer, self._buff_i)[0]
self._buff_i += 2
elif b == 0xD2:
self._reserve(4)
obj = _unpack_from(">i", self._buffer, self._buff_i)[0]
self._buff_i += 4
elif b == 0xD3:
self._reserve(8)
obj = _unpack_from(">q", self._buffer, self._buff_i)[0]
self._buff_i += 8
elif b == 0xD4: # fixext 1
typ = TYPE_EXT
if self._max_ext_len < 1:
raise ValueError("%s exceeds max_ext_len(%s)" % (1, self._max_ext_len))
self._reserve(2)
n, obj = _unpack_from("b1s", self._buffer, self._buff_i)
self._buff_i += 2
elif b == 0xD5: # fixext 2
typ = TYPE_EXT
if self._max_ext_len < 2:
raise ValueError("%s exceeds max_ext_len(%s)" % (2, self._max_ext_len))
self._reserve(3)
n, obj = _unpack_from("b2s", self._buffer, self._buff_i)
self._buff_i += 3
elif b == 0xD6: # fixext 4
typ = TYPE_EXT
if self._max_ext_len < 4:
raise ValueError("%s exceeds max_ext_len(%s)" % (4, self._max_ext_len))
self._reserve(5)
n, obj = _unpack_from("b4s", self._buffer, self._buff_i)
self._buff_i += 5
elif b == 0xD7: # fixext 8
typ = TYPE_EXT
if self._max_ext_len < 8:
raise ValueError("%s exceeds max_ext_len(%s)" % (8, self._max_ext_len))
self._reserve(9)
n, obj = _unpack_from("b8s", self._buffer, self._buff_i)
self._buff_i += 9
elif b == 0xD8: # fixext 16
typ = TYPE_EXT
if self._max_ext_len < 16:
raise ValueError("%s exceeds max_ext_len(%s)" % (16, self._max_ext_len))
self._reserve(17)
n, obj = _unpack_from("b16s", self._buffer, self._buff_i)
self._buff_i += 17
elif b == 0xD9:
typ = TYPE_RAW
self._reserve(1)
n = self._buffer[self._buff_i]
self._buff_i += 1
if n > self._max_str_len:
raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len)
obj = self._read(n)
elif b == 0xDA:
typ = TYPE_RAW
self._reserve(2)
(n,) = _unpack_from(">H", self._buffer, self._buff_i)
self._buff_i += 2
if n > self._max_str_len:
raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len)
obj = self._read(n)
elif b == 0xDB:
typ = TYPE_RAW
self._reserve(4)
(n,) = _unpack_from(">I", self._buffer, self._buff_i)
self._buff_i += 4
if n > self._max_str_len:
raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len)
obj = self._read(n)
elif b == 0xDC:
typ = TYPE_ARRAY
self._reserve(2)
(n,) = _unpack_from(">H", self._buffer, self._buff_i)
self._buff_i += 2
if n > self._max_array_len:
raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len)
elif b == 0xDD:
typ = TYPE_ARRAY
self._reserve(4)
(n,) = _unpack_from(">I", self._buffer, self._buff_i)
self._buff_i += 4
if n > self._max_array_len:
raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len)
elif b == 0xDE:
self._reserve(2)
(n,) = _unpack_from(">H", self._buffer, self._buff_i)
self._buff_i += 2
if n > self._max_map_len:
raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len)
typ = TYPE_MAP
elif b == 0xDF:
self._reserve(4)
(n,) = _unpack_from(">I", self._buffer, self._buff_i)
self._buff_i += 4
if n > self._max_map_len:
raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len)
typ = TYPE_MAP
else:
raise FormatError("Unknown header: 0x%x" % b)
return typ, n, obj
def _unpack(self, execute=EX_CONSTRUCT):
typ, n, obj = self._read_header(execute)
if execute == EX_READ_ARRAY_HEADER:
if typ != TYPE_ARRAY:
raise ValueError("Expected array")
return n
if execute == EX_READ_MAP_HEADER:
if typ != TYPE_MAP:
raise ValueError("Expected map")
return n
# TODO should we eliminate the recursion?
if typ == TYPE_ARRAY:
if execute == EX_SKIP:
for i in xrange(n):
# TODO check whether we need to call `list_hook`
self._unpack(EX_SKIP)
return
ret = newlist_hint(n)
for i in xrange(n):
ret.append(self._unpack(EX_CONSTRUCT))
if self._list_hook is not None:
ret = self._list_hook(ret)
# TODO is the interaction between `list_hook` and `use_list` ok?
return ret if self._use_list else tuple(ret)
if typ == TYPE_MAP:
if execute == EX_SKIP:
for i in xrange(n):
# TODO check whether we need to call hooks
self._unpack(EX_SKIP)
self._unpack(EX_SKIP)
return
if self._object_pairs_hook is not None:
ret = self._object_pairs_hook(
(self._unpack(EX_CONSTRUCT), self._unpack(EX_CONSTRUCT))
for _ in xrange(n)
)
else:
ret = {}
for _ in xrange(n):
key = self._unpack(EX_CONSTRUCT)
if self._strict_map_key and type(key) not in (unicode, bytes):
raise ValueError(
"%s is not allowed for map key" % str(type(key))
)
if not PY2 and type(key) is str:
key = sys.intern(key)
ret[key] = self._unpack(EX_CONSTRUCT)
if self._object_hook is not None:
ret = self._object_hook(ret)
return ret
if execute == EX_SKIP:
return
if typ == TYPE_RAW:
if self._raw:
obj = bytes(obj)
else:
obj = obj.decode("utf_8", self._unicode_errors)
return obj
if typ == TYPE_BIN:
return bytes(obj)
if typ == TYPE_EXT:
if n == -1: # timestamp
ts = Timestamp.from_bytes(bytes(obj))
if self._timestamp == 1:
return ts.to_unix()
elif self._timestamp == 2:
return ts.to_unix_nano()
elif self._timestamp == 3:
return ts.to_datetime()
else:
return ts
else:
return self._ext_hook(n, bytes(obj))
assert typ == TYPE_IMMEDIATE
return obj
def __iter__(self):
return self
def __next__(self):
try:
ret = self._unpack(EX_CONSTRUCT)
self._consume()
return ret
except OutOfData:
self._consume()
raise StopIteration
except RecursionError:
raise StackError
next = __next__
def skip(self):
self._unpack(EX_SKIP)
self._consume()
def unpack(self):
try:
ret = self._unpack(EX_CONSTRUCT)
except RecursionError:
raise StackError
self._consume()
return ret
def read_array_header(self):
ret = self._unpack(EX_READ_ARRAY_HEADER)
self._consume()
return ret
def read_map_header(self):
ret = self._unpack(EX_READ_MAP_HEADER)
self._consume()
return ret
def tell(self):
return self._stream_offset
class Packer(object):
"""
MessagePack Packer
Usage:
packer = Packer()
astream.write(packer.pack(a))
astream.write(packer.pack(b))
Packer's constructor has some keyword arguments:
:param callable default:
Convert user type to builtin type that Packer supports.
See also simplejson's document.
:param bool use_single_float:
Use single precision float type for float. (default: False)
:param bool autoreset:
Reset buffer after each pack and return its content as `bytes`. (default: True).
If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
:param bool use_bin_type:
Use bin type introduced in msgpack spec 2.0 for bytes.
It also enables str8 type for unicode. (default: True)
:param bool strict_types:
If set to true, types will be checked to be exact. Derived classes
from serializable types will not be serialized and will be
treated as unsupported type and forwarded to default.
Additionally tuples will not be serialized as lists.
This is useful when trying to implement accurate serialization
for python types.
:param bool datetime:
If set to true, datetime with tzinfo is packed into Timestamp type.
Note that the tzinfo is stripped in the timestamp.
You can get UTC datetime with `timestamp=3` option of the Unpacker.
(Python 2 is not supported).
:param str unicode_errors:
The error handler for encoding unicode. (default: 'strict')
DO NOT USE THIS!! This option is kept for very specific usage.
"""
def __init__(
self,
default=None,
use_single_float=False,
autoreset=True,
use_bin_type=True,
strict_types=False,
datetime=False,
unicode_errors=None,
):
self._strict_types = strict_types
self._use_float = use_single_float
self._autoreset = autoreset
self._use_bin_type = use_bin_type
self._buffer = StringIO()
if PY2 and datetime:
raise ValueError("datetime is not supported in Python 2")
self._datetime = bool(datetime)
self._unicode_errors = unicode_errors or "strict"
if default is not None:
if not callable(default):
raise TypeError("default must be callable")
self._default = default
def _pack(
self,
obj,
nest_limit=DEFAULT_RECURSE_LIMIT,
check=isinstance,
check_type_strict=_check_type_strict,
):
default_used = False
if self._strict_types:
check = check_type_strict
list_types = list
else:
list_types = (list, tuple)
while True:
if nest_limit < 0:
raise ValueError("recursion limit exceeded")
if obj is None:
return self._buffer.write(b"\xc0")
if check(obj, bool):
if obj:
return self._buffer.write(b"\xc3")
return self._buffer.write(b"\xc2")
if check(obj, int_types):
if 0 <= obj < 0x80:
return self._buffer.write(struct.pack("B", obj))
if -0x20 <= obj < 0:
return self._buffer.write(struct.pack("b", obj))
if 0x80 <= obj <= 0xFF:
return self._buffer.write(struct.pack("BB", 0xCC, obj))
if -0x80 <= obj < 0:
return self._buffer.write(struct.pack(">Bb", 0xD0, obj))
if 0xFF < obj <= 0xFFFF:
return self._buffer.write(struct.pack(">BH", 0xCD, obj))
if -0x8000 <= obj < -0x80:
return self._buffer.write(struct.pack(">Bh", 0xD1, obj))
if 0xFFFF < obj <= 0xFFFFFFFF:
return self._buffer.write(struct.pack(">BI", 0xCE, obj))
if -0x80000000 <= obj < -0x8000:
return self._buffer.write(struct.pack(">Bi", 0xD2, obj))
if 0xFFFFFFFF < obj <= 0xFFFFFFFFFFFFFFFF:
return self._buffer.write(struct.pack(">BQ", 0xCF, obj))
if -0x8000000000000000 <= obj < -0x80000000:
return self._buffer.write(struct.pack(">Bq", 0xD3, obj))
if not default_used and self._default is not None:
obj = self._default(obj)
default_used = True
continue
raise OverflowError("Integer value out of range")
if check(obj, (bytes, bytearray)):
n = len(obj)
if n >= 2 ** 32:
raise ValueError("%s is too large" % type(obj).__name__)
self._pack_bin_header(n)
return self._buffer.write(obj)
if check(obj, unicode):
obj = obj.encode("utf-8", self._unicode_errors)
n = len(obj)
if n >= 2 ** 32:
raise ValueError("String is too large")
self._pack_raw_header(n)
return self._buffer.write(obj)
if check(obj, memoryview):
n = len(obj) * obj.itemsize
if n >= 2 ** 32:
raise ValueError("Memoryview is too large")
self._pack_bin_header(n)
return self._buffer.write(obj)
if check(obj, float):
if self._use_float:
return self._buffer.write(struct.pack(">Bf", 0xCA, obj))
return self._buffer.write(struct.pack(">Bd", 0xCB, obj))
if check(obj, (ExtType, Timestamp)):
if check(obj, Timestamp):
code = -1
data = obj.to_bytes()
else:
code = obj.code
data = obj.data
assert isinstance(code, int)
assert isinstance(data, bytes)
L = len(data)
if L == 1:
self._buffer.write(b"\xd4")
elif L == 2:
self._buffer.write(b"\xd5")
elif L == 4:
self._buffer.write(b"\xd6")
elif L == 8:
self._buffer.write(b"\xd7")
elif L == 16:
self._buffer.write(b"\xd8")
elif L <= 0xFF:
self._buffer.write(struct.pack(">BB", 0xC7, L))
elif L <= 0xFFFF:
self._buffer.write(struct.pack(">BH", 0xC8, L))
else:
self._buffer.write(struct.pack(">BI", 0xC9, L))
self._buffer.write(struct.pack("b", code))
self._buffer.write(data)
return
if check(obj, list_types):
n = len(obj)
self._pack_array_header(n)
for i in xrange(n):
self._pack(obj[i], nest_limit - 1)
return
if check(obj, dict):
return self._pack_map_pairs(
len(obj), dict_iteritems(obj), nest_limit - 1
)
if self._datetime and check(obj, _DateTime):
obj = Timestamp.from_datetime(obj)
default_used = 1
continue
if not default_used and self._default is not None:
obj = self._default(obj)
default_used = 1
continue
raise TypeError("Cannot serialize %r" % (obj,))
def pack(self, obj):
try:
self._pack(obj)
except:
self._buffer = StringIO() # force reset
raise
if self._autoreset:
ret = self._buffer.getvalue()
self._buffer = StringIO()
return ret
def pack_map_pairs(self, pairs):
self._pack_map_pairs(len(pairs), pairs)
if self._autoreset:
ret = self._buffer.getvalue()
self._buffer = StringIO()
return ret
def pack_array_header(self, n):
if n >= 2 ** 32:
raise ValueError
self._pack_array_header(n)
if self._autoreset:
ret = self._buffer.getvalue()
self._buffer = StringIO()
return ret
def pack_map_header(self, n):
if n >= 2 ** 32:
raise ValueError
self._pack_map_header(n)
if self._autoreset:
ret = self._buffer.getvalue()
self._buffer = StringIO()
return ret
def pack_ext_type(self, typecode, data):
if not isinstance(typecode, int):
raise TypeError("typecode must have int type.")
if not 0 <= typecode <= 127:
raise ValueError("typecode should be 0-127")
if not isinstance(data, bytes):
raise TypeError("data must have bytes type")
L = len(data)
if L > 0xFFFFFFFF:
raise ValueError("Too large data")
if L == 1:
self._buffer.write(b"\xd4")
elif L == 2:
self._buffer.write(b"\xd5")
elif L == 4:
self._buffer.write(b"\xd6")
elif L == 8:
self._buffer.write(b"\xd7")
elif L == 16:
self._buffer.write(b"\xd8")
elif L <= 0xFF:
self._buffer.write(b"\xc7" + struct.pack("B", L))
elif L <= 0xFFFF:
self._buffer.write(b"\xc8" + struct.pack(">H", L))
else:
self._buffer.write(b"\xc9" + struct.pack(">I", L))
self._buffer.write(struct.pack("B", typecode))
self._buffer.write(data)
def _pack_array_header(self, n):
if n <= 0x0F:
return self._buffer.write(struct.pack("B", 0x90 + n))
if n <= 0xFFFF:
return self._buffer.write(struct.pack(">BH", 0xDC, n))
if n <= 0xFFFFFFFF:
return self._buffer.write(struct.pack(">BI", 0xDD, n))
raise ValueError("Array is too large")
def _pack_map_header(self, n):
if n <= 0x0F:
return self._buffer.write(struct.pack("B", 0x80 + n))
if n <= 0xFFFF:
return self._buffer.write(struct.pack(">BH", 0xDE, n))
if n <= 0xFFFFFFFF:
return self._buffer.write(struct.pack(">BI", 0xDF, n))
raise ValueError("Dict is too large")
def _pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT):
self._pack_map_header(n)
for (k, v) in pairs:
self._pack(k, nest_limit - 1)
self._pack(v, nest_limit - 1)
def _pack_raw_header(self, n):
if n <= 0x1F:
self._buffer.write(struct.pack("B", 0xA0 + n))
elif self._use_bin_type and n <= 0xFF:
self._buffer.write(struct.pack(">BB", 0xD9, n))
elif n <= 0xFFFF:
self._buffer.write(struct.pack(">BH", 0xDA, n))
elif n <= 0xFFFFFFFF:
self._buffer.write(struct.pack(">BI", 0xDB, n))
else:
raise ValueError("Raw is too large")
def _pack_bin_header(self, n):
if not self._use_bin_type:
return self._pack_raw_header(n)
elif n <= 0xFF:
return self._buffer.write(struct.pack(">BB", 0xC4, n))
elif n <= 0xFFFF:
return self._buffer.write(struct.pack(">BH", 0xC5, n))
elif n <= 0xFFFFFFFF:
return self._buffer.write(struct.pack(">BI", 0xC6, n))
else:
raise ValueError("Bin is too large")
def bytes(self):
"""Return internal buffer contents as bytes object"""
return self._buffer.getvalue()
def reset(self):
"""Reset internal buffer.
This method is useful only when autoreset=False.
"""
self._buffer = StringIO()
def getbuffer(self):
"""Return view of internal buffer."""
if USING_STRINGBUILDER or PY2:
return memoryview(self.bytes())
else:
return self._buffer.getbuffer()
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/msgpack/fallback.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/msgpack/fallback.py",
"repo_id": "Django-locallibrary",
"token_count": 18884
} | 21 |
"""For neatly implementing static typing in packaging.
`mypy` - the static type analysis tool we use - uses the `typing` module, which
provides core functionality fundamental to mypy's functioning.
Generally, `typing` would be imported at runtime and used in that fashion -
it acts as a no-op at runtime and does not have any run-time overhead by
design.
As it turns out, `typing` is not vendorable - it uses separate sources for
Python 2/Python 3. Thus, this codebase can not expect it to be present.
To work around this, mypy allows the typing import to be behind a False-y
optional to prevent it from running at runtime and type-comments can be used
to remove the need for the types to be accessible directly during runtime.
This module provides the False-y guard in a nicely named fashion so that a
curious maintainer can reach here to read this.
In packaging, all static-typing related imports should be guarded as follows:
from pip._vendor.packaging._typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import ...
Ref: https://github.com/python/mypy/issues/3216
"""
__all__ = ["TYPE_CHECKING", "cast"]
# The TYPE_CHECKING constant defined by the typing module is False at runtime
# but True while type checking.
if False: # pragma: no cover
from typing import TYPE_CHECKING
else:
TYPE_CHECKING = False
# typing's cast syntax requires calling typing.cast at runtime, but we don't
# want to import typing at runtime. Here, we inform the type checkers that
# we're importing `typing.cast` as `cast` and re-implement typing.cast's
# runtime behavior in a block that is ignored by type checkers.
if TYPE_CHECKING: # pragma: no cover
# not executed at runtime
from typing import cast
else:
# executed at runtime
def cast(type_, value): # noqa
return value
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/packaging/_typing.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/packaging/_typing.py",
"repo_id": "Django-locallibrary",
"token_count": 510
} | 22 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
requests.certs
~~~~~~~~~~~~~~
This module returns the preferred default CA certificate bundle. There is
only one — the one from the certifi package.
If you are packaging Requests, e.g., for a Linux distribution or a managed
environment, you can change the definition of where() to return a separately
packaged CA bundle.
"""
from pip._vendor.certifi import where
if __name__ == '__main__':
print(where())
| Django-locallibrary/env/Lib/site-packages/pip/_vendor/requests/certs.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/requests/certs.py",
"repo_id": "Django-locallibrary",
"token_count": 136
} | 23 |
"""distutils.command.bdist_dumb
Implements the Distutils 'bdist_dumb' command (create a "dumb" built
distribution -- i.e., just an archive to be unpacked under $prefix or
$exec_prefix)."""
import os
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import remove_tree, ensure_relative
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_dumb(Command):
description = "create a \"dumb\" built distribution"
user_options = [('bdist-dir=', 'd',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('format=', 'f',
"archive format to create (tar, gztar, bztar, xztar, "
"ztar, zip)"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths "
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
]
boolean_options = ['keep-temp', 'skip-build', 'relative']
default_format = { 'posix': 'gztar',
'nt': 'zip' }
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.format = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = None
self.relative = 0
self.owner = None
self.group = None
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'dumb')
if self.format is None:
try:
self.format = self.default_format[os.name]
except KeyError:
raise DistutilsPlatformError(
"don't know how to create dumb built distributions "
"on platform %s" % os.name)
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
('skip_build', 'skip_build'))
def run(self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
log.info("installing to %s", self.bdist_dir)
self.run_command('install')
# And make an archive relative to the root of the
# pseudo-installation tree.
archive_basename = "%s.%s" % (self.distribution.get_fullname(),
self.plat_name)
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
if (self.distribution.has_ext_modules() and
(install.install_base != install.install_platbase)):
raise DistutilsPlatformError(
"can't make a dumb built distribution where "
"base and platbase are different (%s, %s)"
% (repr(install.install_base),
repr(install.install_platbase)))
else:
archive_root = os.path.join(self.bdist_dir,
ensure_relative(install.install_base))
# Make the archive
filename = self.make_archive(pseudoinstall_root,
self.format, root_dir=archive_root,
owner=self.owner, group=self.group)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_dumb', pyversion,
filename))
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
| Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/bdist_dumb.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/bdist_dumb.py",
"repo_id": "Django-locallibrary",
"token_count": 2496
} | 24 |
"""distutils.command.install_lib
Implements the Distutils 'install_lib' command
(install all Python modules)."""
import os
import importlib.util
import sys
from distutils.core import Command
from distutils.errors import DistutilsOptionError
# Extension for Python source files.
PYTHON_SOURCE_EXTENSION = ".py"
class install_lib(Command):
description = "install all Python modules (extensions and pure Python)"
# The byte-compilation options are a tad confusing. Here are the
# possible scenarios:
# 1) no compilation at all (--no-compile --no-optimize)
# 2) compile .pyc only (--compile --no-optimize; default)
# 3) compile .pyc and "opt-1" .pyc (--compile --optimize)
# 4) compile "opt-1" .pyc only (--no-compile --optimize)
# 5) compile .pyc and "opt-2" .pyc (--compile --optimize-more)
# 6) compile "opt-2" .pyc only (--no-compile --optimize-more)
#
# The UI for this is two options, 'compile' and 'optimize'.
# 'compile' is strictly boolean, and only decides whether to
# generate .pyc files. 'optimize' is three-way (0, 1, or 2), and
# decides both whether to generate .pyc files and what level of
# optimization to use.
user_options = [
('install-dir=', 'd', "directory to install to"),
('build-dir=','b', "build directory (where to install from)"),
('force', 'f', "force installation (overwrite existing files)"),
('compile', 'c', "compile .py to .pyc [default]"),
('no-compile', None, "don't compile .py files"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('skip-build', None, "skip the build steps"),
]
boolean_options = ['force', 'compile', 'skip-build']
negative_opt = {'no-compile' : 'compile'}
def initialize_options(self):
# let the 'install' command dictate our installation directory
self.install_dir = None
self.build_dir = None
self.force = 0
self.compile = None
self.optimize = None
self.skip_build = None
def finalize_options(self):
# Get all the information we need to install pure Python modules
# from the umbrella 'install' command -- build (source) directory,
# install (target) directory, and whether to compile .py files.
self.set_undefined_options('install',
('build_lib', 'build_dir'),
('install_lib', 'install_dir'),
('force', 'force'),
('compile', 'compile'),
('optimize', 'optimize'),
('skip_build', 'skip_build'),
)
if self.compile is None:
self.compile = True
if self.optimize is None:
self.optimize = False
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if self.optimize not in (0, 1, 2):
raise AssertionError
except (ValueError, AssertionError):
raise DistutilsOptionError("optimize must be 0, 1, or 2")
def run(self):
# Make sure we have built everything we need first
self.build()
# Install everything: simply dump the entire contents of the build
# directory to the installation directory (that's the beauty of
# having a build directory!)
outfiles = self.install()
# (Optionally) compile .py to .pyc
if outfiles is not None and self.distribution.has_pure_modules():
self.byte_compile(outfiles)
# -- Top-level worker functions ------------------------------------
# (called from 'run()')
def build(self):
if not self.skip_build:
if self.distribution.has_pure_modules():
self.run_command('build_py')
if self.distribution.has_ext_modules():
self.run_command('build_ext')
def install(self):
if os.path.isdir(self.build_dir):
outfiles = self.copy_tree(self.build_dir, self.install_dir)
else:
self.warn("'%s' does not exist -- no Python modules to install" %
self.build_dir)
return
return outfiles
def byte_compile(self, files):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
# Get the "--root" directory supplied to the "install" command,
# and use it as a prefix to strip off the purported filename
# encoded in bytecode files. This is far from complete, but it
# should at least generate usable bytecode in RPM distributions.
install_root = self.get_finalized_command('install').root
if self.compile:
byte_compile(files, optimize=0,
force=self.force, prefix=install_root,
dry_run=self.dry_run)
if self.optimize > 0:
byte_compile(files, optimize=self.optimize,
force=self.force, prefix=install_root,
verbose=self.verbose, dry_run=self.dry_run)
# -- Utility methods -----------------------------------------------
def _mutate_outputs(self, has_any, build_cmd, cmd_option, output_dir):
if not has_any:
return []
build_cmd = self.get_finalized_command(build_cmd)
build_files = build_cmd.get_outputs()
build_dir = getattr(build_cmd, cmd_option)
prefix_len = len(build_dir) + len(os.sep)
outputs = []
for file in build_files:
outputs.append(os.path.join(output_dir, file[prefix_len:]))
return outputs
def _bytecode_filenames(self, py_filenames):
bytecode_files = []
for py_file in py_filenames:
# Since build_py handles package data installation, the
# list of outputs can contain more than just .py files.
# Make sure we only report bytecode for the .py files.
ext = os.path.splitext(os.path.normcase(py_file))[1]
if ext != PYTHON_SOURCE_EXTENSION:
continue
if self.compile:
bytecode_files.append(importlib.util.cache_from_source(
py_file, optimization=''))
if self.optimize > 0:
bytecode_files.append(importlib.util.cache_from_source(
py_file, optimization=self.optimize))
return bytecode_files
# -- External interface --------------------------------------------
# (called by outsiders)
def get_outputs(self):
"""Return the list of files that would be installed if this command
were actually run. Not affected by the "dry-run" flag or whether
modules have actually been built yet.
"""
pure_outputs = \
self._mutate_outputs(self.distribution.has_pure_modules(),
'build_py', 'build_lib',
self.install_dir)
if self.compile:
bytecode_outputs = self._bytecode_filenames(pure_outputs)
else:
bytecode_outputs = []
ext_outputs = \
self._mutate_outputs(self.distribution.has_ext_modules(),
'build_ext', 'build_lib',
self.install_dir)
return pure_outputs + bytecode_outputs + ext_outputs
def get_inputs(self):
"""Get the list of files that are input to this command, ie. the
files that get installed as they are named in the build tree.
The files in this list correspond one-to-one to the output
filenames returned by 'get_outputs()'.
"""
inputs = []
if self.distribution.has_pure_modules():
build_py = self.get_finalized_command('build_py')
inputs.extend(build_py.get_outputs())
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
inputs.extend(build_ext.get_outputs())
return inputs
| Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/install_lib.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/install_lib.py",
"repo_id": "Django-locallibrary",
"token_count": 3716
} | 25 |
"""distutils.filelist
Provides the FileList class, used for poking about the filesystem
and building lists of files.
"""
import os, re
import fnmatch
import functools
from distutils.util import convert_path
from distutils.errors import DistutilsTemplateError, DistutilsInternalError
from distutils import log
class FileList:
"""A list of files built by on exploring the filesystem and filtered by
applying various patterns to what we find there.
Instance attributes:
dir
directory from which files will be taken -- only used if
'allfiles' not supplied to constructor
files
list of filenames currently being built/filtered/manipulated
allfiles
complete list of files under consideration (ie. without any
filtering applied)
"""
def __init__(self, warn=None, debug_print=None):
# ignore argument to FileList, but keep them for backwards
# compatibility
self.allfiles = None
self.files = []
def set_allfiles(self, allfiles):
self.allfiles = allfiles
def findall(self, dir=os.curdir):
self.allfiles = findall(dir)
def debug_print(self, msg):
"""Print 'msg' to stdout if the global DEBUG (taken from the
DISTUTILS_DEBUG environment variable) flag is true.
"""
from distutils.debug import DEBUG
if DEBUG:
print(msg)
# -- List-like methods ---------------------------------------------
def append(self, item):
self.files.append(item)
def extend(self, items):
self.files.extend(items)
def sort(self):
# Not a strict lexical sort!
sortable_files = sorted(map(os.path.split, self.files))
self.files = []
for sort_tuple in sortable_files:
self.files.append(os.path.join(*sort_tuple))
# -- Other miscellaneous utility methods ---------------------------
def remove_duplicates(self):
# Assumes list has been sorted!
for i in range(len(self.files) - 1, 0, -1):
if self.files[i] == self.files[i - 1]:
del self.files[i]
# -- "File template" methods ---------------------------------------
def _parse_template_line(self, line):
words = line.split()
action = words[0]
patterns = dir = dir_pattern = None
if action in ('include', 'exclude',
'global-include', 'global-exclude'):
if len(words) < 2:
raise DistutilsTemplateError(
"'%s' expects <pattern1> <pattern2> ..." % action)
patterns = [convert_path(w) for w in words[1:]]
elif action in ('recursive-include', 'recursive-exclude'):
if len(words) < 3:
raise DistutilsTemplateError(
"'%s' expects <dir> <pattern1> <pattern2> ..." % action)
dir = convert_path(words[1])
patterns = [convert_path(w) for w in words[2:]]
elif action in ('graft', 'prune'):
if len(words) != 2:
raise DistutilsTemplateError(
"'%s' expects a single <dir_pattern>" % action)
dir_pattern = convert_path(words[1])
else:
raise DistutilsTemplateError("unknown action '%s'" % action)
return (action, patterns, dir, dir_pattern)
def process_template_line(self, line):
# Parse the line: split it up, make sure the right number of words
# is there, and return the relevant words. 'action' is always
# defined: it's the first word of the line. Which of the other
# three are defined depends on the action; it'll be either
# patterns, (dir and patterns), or (dir_pattern).
(action, patterns, dir, dir_pattern) = self._parse_template_line(line)
# OK, now we know that the action is valid and we have the
# right number of words on the line for that action -- so we
# can proceed with minimal error-checking.
if action == 'include':
self.debug_print("include " + ' '.join(patterns))
for pattern in patterns:
if not self.include_pattern(pattern, anchor=1):
log.warn("warning: no files found matching '%s'",
pattern)
elif action == 'exclude':
self.debug_print("exclude " + ' '.join(patterns))
for pattern in patterns:
if not self.exclude_pattern(pattern, anchor=1):
log.warn(("warning: no previously-included files "
"found matching '%s'"), pattern)
elif action == 'global-include':
self.debug_print("global-include " + ' '.join(patterns))
for pattern in patterns:
if not self.include_pattern(pattern, anchor=0):
log.warn(("warning: no files found matching '%s' "
"anywhere in distribution"), pattern)
elif action == 'global-exclude':
self.debug_print("global-exclude " + ' '.join(patterns))
for pattern in patterns:
if not self.exclude_pattern(pattern, anchor=0):
log.warn(("warning: no previously-included files matching "
"'%s' found anywhere in distribution"),
pattern)
elif action == 'recursive-include':
self.debug_print("recursive-include %s %s" %
(dir, ' '.join(patterns)))
for pattern in patterns:
if not self.include_pattern(pattern, prefix=dir):
log.warn(("warning: no files found matching '%s' "
"under directory '%s'"),
pattern, dir)
elif action == 'recursive-exclude':
self.debug_print("recursive-exclude %s %s" %
(dir, ' '.join(patterns)))
for pattern in patterns:
if not self.exclude_pattern(pattern, prefix=dir):
log.warn(("warning: no previously-included files matching "
"'%s' found under directory '%s'"),
pattern, dir)
elif action == 'graft':
self.debug_print("graft " + dir_pattern)
if not self.include_pattern(None, prefix=dir_pattern):
log.warn("warning: no directories found matching '%s'",
dir_pattern)
elif action == 'prune':
self.debug_print("prune " + dir_pattern)
if not self.exclude_pattern(None, prefix=dir_pattern):
log.warn(("no previously-included directories found "
"matching '%s'"), dir_pattern)
else:
raise DistutilsInternalError(
"this cannot happen: invalid action '%s'" % action)
# -- Filtering/selection methods -----------------------------------
def include_pattern(self, pattern, anchor=1, prefix=None, is_regex=0):
"""Select strings (presumably filenames) from 'self.files' that
match 'pattern', a Unix-style wildcard (glob) pattern. Patterns
are not quite the same as implemented by the 'fnmatch' module: '*'
and '?' match non-special characters, where "special" is platform-
dependent: slash on Unix; colon, slash, and backslash on
DOS/Windows; and colon on Mac OS.
If 'anchor' is true (the default), then the pattern match is more
stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
'anchor' is false, both of these will match.
If 'prefix' is supplied, then only filenames starting with 'prefix'
(itself a pattern) and ending with 'pattern', with anything in between
them, will match. 'anchor' is ignored in this case.
If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
'pattern' is assumed to be either a string containing a regex or a
regex object -- no translation is done, the regex is just compiled
and used as-is.
Selected strings will be added to self.files.
Return True if files are found, False otherwise.
"""
# XXX docstring lying about what the special chars are?
files_found = False
pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
self.debug_print("include_pattern: applying regex r'%s'" %
pattern_re.pattern)
# delayed loading of allfiles list
if self.allfiles is None:
self.findall()
for name in self.allfiles:
if pattern_re.search(name):
self.debug_print(" adding " + name)
self.files.append(name)
files_found = True
return files_found
def exclude_pattern (self, pattern,
anchor=1, prefix=None, is_regex=0):
"""Remove strings (presumably filenames) from 'files' that match
'pattern'. Other parameters are the same as for
'include_pattern()', above.
The list 'self.files' is modified in place.
Return True if files are found, False otherwise.
"""
files_found = False
pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
self.debug_print("exclude_pattern: applying regex r'%s'" %
pattern_re.pattern)
for i in range(len(self.files)-1, -1, -1):
if pattern_re.search(self.files[i]):
self.debug_print(" removing " + self.files[i])
del self.files[i]
files_found = True
return files_found
# ----------------------------------------------------------------------
# Utility functions
def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results)
def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files)
def glob_to_re(pattern):
"""Translate a shell-like glob pattern to a regular expression; return
a string containing the regex. Differs from 'fnmatch.translate()' in
that '*' does not match "special characters" (which are
platform-specific).
"""
pattern_re = fnmatch.translate(pattern)
# '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
# IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
# and by extension they shouldn't match such "special characters" under
# any OS. So change all non-escaped dots in the RE to match any
# character except the special characters (currently: just os.sep).
sep = os.sep
if os.sep == '\\':
# we're using a regex to manipulate a regex, so we need
# to escape the backslash twice
sep = r'\\\\'
escaped = r'\1[^%s]' % sep
pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
return pattern_re
def translate_pattern(pattern, anchor=1, prefix=None, is_regex=0):
"""Translate a shell-like wildcard pattern to a compiled regular
expression. Return the compiled regex. If 'is_regex' true,
then 'pattern' is directly compiled to a regex (if it's a string)
or just returned as-is (assumes it's a regex object).
"""
if is_regex:
if isinstance(pattern, str):
return re.compile(pattern)
else:
return pattern
# ditch start and end characters
start, _, end = glob_to_re('_').partition('_')
if pattern:
pattern_re = glob_to_re(pattern)
assert pattern_re.startswith(start) and pattern_re.endswith(end)
else:
pattern_re = ''
if prefix is not None:
prefix_re = glob_to_re(prefix)
assert prefix_re.startswith(start) and prefix_re.endswith(end)
prefix_re = prefix_re[len(start): len(prefix_re) - len(end)]
sep = os.sep
if os.sep == '\\':
sep = r'\\'
pattern_re = pattern_re[len(start): len(pattern_re) - len(end)]
pattern_re = r'%s\A%s%s.*%s%s' % (start, prefix_re, sep, pattern_re, end)
else: # no prefix -- respect anchor flag
if anchor:
pattern_re = r'%s\A%s' % (start, pattern_re[len(start):])
return re.compile(pattern_re)
| Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/filelist.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/filelist.py",
"repo_id": "Django-locallibrary",
"token_count": 5486
} | 26 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import operator
import os
import platform
import sys
from setuptools.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
from setuptools.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
from setuptools.extern.pyparsing import Literal as L # noqa
from ._compat import string_types
from .specifiers import Specifier, InvalidSpecifier
__all__ = [
"InvalidMarker",
"UndefinedComparison",
"UndefinedEnvironmentName",
"Marker",
"default_environment",
]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Node(object):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
def serialize(self):
raise NotImplementedError
class Variable(Node):
def serialize(self):
return str(self)
class Value(Node):
def serialize(self):
return '"{0}"'.format(self)
class Op(Node):
def serialize(self):
return str(self)
VARIABLE = (
L("implementation_version")
| L("platform_python_implementation")
| L("implementation_name")
| L("python_full_version")
| L("platform_release")
| L("platform_version")
| L("platform_machine")
| L("platform_system")
| L("python_version")
| L("sys_platform")
| L("os_name")
| L("os.name")
| L("sys.platform") # PEP-345
| L("platform.version") # PEP-345
| L("platform.machine") # PEP-345
| L("platform.python_implementation") # PEP-345
| L("python_implementation") # PEP-345
| L("extra") # undocumented setuptools legacy
)
ALIASES = {
"os.name": "os_name",
"sys.platform": "sys_platform",
"platform.version": "platform_version",
"platform.machine": "platform_machine",
"platform.python_implementation": "platform_python_implementation",
"python_implementation": "platform_python_implementation",
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results):
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(marker, first=True):
assert isinstance(marker, (list, tuple, string_types))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs, op, rhs):
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
oper = _operators.get(op.serialize())
if oper is None:
raise UndefinedComparison(
"Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
)
return oper(lhs, rhs)
_undefined = object()
def _get_env(environment, name):
value = environment.get(name, _undefined)
if value is _undefined:
raise UndefinedEnvironmentName(
"{0!r} does not exist in evaluation environment.".format(name)
)
return value
def _evaluate_markers(markers, environment):
groups = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, string_types))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info):
version = "{0.major}.{0.minor}.{0.micro}".format(info)
kind = info.releaselevel
if kind != "final":
version += kind[0] + str(info.serial)
return version
def default_environment():
if hasattr(sys, "implementation"):
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
else:
iver = "0"
implementation_name = ""
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": ".".join(platform.python_version_tuple()[:2]),
"sys_platform": sys.platform,
}
class Marker(object):
def __init__(self, marker):
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
marker, marker[e.loc : e.loc + 8]
)
raise InvalidMarker(err_str)
def __str__(self):
return _format_marker(self._markers)
def __repr__(self):
return "<Marker({0!r})>".format(str(self))
def evaluate(self, environment=None):
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment)
| Django-locallibrary/env/Lib/site-packages/setuptools/_vendor/packaging/markers.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_vendor/packaging/markers.py",
"repo_id": "Django-locallibrary",
"token_count": 3403
} | 27 |
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://setuptools.readthedocs.io/en/latest/easy_install.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import (
DistutilsArgError, DistutilsOptionError,
DistutilsError, DistutilsPlatformError,
)
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
from distutils.spawn import find_executable
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import textwrap
import warnings
import site
import struct
import contextlib
import subprocess
import shlex
import io
from sysconfig import get_config_vars, get_path
from setuptools import SetuptoolsDeprecationWarning
from setuptools.extern import six
from setuptools.extern.six.moves import configparser, map
from setuptools import Command
from setuptools.sandbox import run_setup
from setuptools.py27compat import rmtree_safe
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import (
PackageIndex, parse_requirement_arg, URL_SCHEME,
)
from setuptools.command import bdist_egg, egg_info
from setuptools.wheel import Wheel
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources
__metaclass__ = type
# Turn on PEP440Warnings
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
"""
Determine if two paths reference the same file.
Augments os.path.samefile to work on Windows and
suppresses errors if the path doesn't exist.
"""
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if six.PY2:
def _to_bytes(s):
return s
def isascii(s):
try:
six.text_type(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_bytes(s):
return s.encode('utf8')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
def _one_liner(text):
return textwrap.dedent(text).strip().replace('\n', '; ')
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed"),
('user', None, "install in user site-package '%s'" % site.USER_SITE)
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version',
'user'
]
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
# the --user option seems to be an opt-in one,
# so the default should be False.
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
extant_blockers = (
filename for filename in blockers
if os.path.exists(filename) or os.path.islink(filename)
)
list(map(self._delete_path, extant_blockers))
def _delete_path(self, path):
log.info("Deleting %s", path)
if self.dry_run:
return
is_tree = os.path.isdir(path) and not os.path.islink(path)
remover = rmtree if is_tree else os.unlink
remover(path)
@staticmethod
def _render_version():
"""
Render the Setuptools version and installation details, then exit.
"""
ver = '{}.{}'.format(*sys.version_info)
dist = get_distribution('setuptools')
tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
print(tmpl.format(**locals()))
raise SystemExit()
def finalize_options(self):
self.version and self._render_version()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
elif self.user:
log.warn("WARNING: The user site-packages directory is disabled.")
self._fix_install_dir_for_user_site()
self.expand_basedirs()
self.expand_dirs()
self._expand(
'install_dir', 'script_dir', 'build_directory',
'site_dirs',
)
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options(
'install_lib', ('install_dir', 'install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options(
'install_scripts', ('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in
self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d + " (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable:
self.check_site_dir()
self.index_url = self.index_url or "https://pypi.org/simple/"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path=self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path + sys.path)
if self.find_links is not None:
if isinstance(self.find_links, six.string_types):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path + sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2):
raise ValueError
except ValueError as e:
raise DistutilsOptionError(
"--optimize must be 0, 1, or 2"
) from e
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _fix_install_dir_for_user_site(self):
"""
Fix the install_dir if "--user" was used.
"""
if not self.user or not site.ENABLE_USER_SITE:
return
self.create_home_path()
if self.install_userbase is None:
msg = "User base directory is not specified"
raise DistutilsPlatformError(msg)
self.install_base = self.install_platbase = self.install_userbase
scheme_name = os.name.replace('posix', 'unix') + '_user'
self.select_scheme(scheme_name)
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
dirs = [
'install_purelib',
'install_platlib',
'install_lib',
'install_headers',
'install_scripts',
'install_data',
]
self._expand_attrs(dirs)
def run(self, show_deprecation=True):
if show_deprecation:
self.announce(
"WARNING: The easy_install command is deprecated "
"and will be removed in a future version.",
log.WARN,
)
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except Exception:
pid = random.randint(0, sys.maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
if not os.path.exists(instdir):
try:
os.makedirs(instdir)
except (OSError, IOError):
self.cant_write_to_target()
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir with easy_install
pythonpath = os.environ.get('PYTHONPATH', '')
log.warn(self.__no_default_msg, self.install_dir, pythonpath)
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
if self.multi_version and not os.path.exists(pth_file):
self.pth_file = None # don't create a .pth file
self.install_dir = instdir
__cant_write_msg = textwrap.dedent("""
can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
""").lstrip() # noqa
__not_exists_id = textwrap.dedent("""
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
""").lstrip() # noqa
__access_msg = textwrap.dedent("""
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://setuptools.readthedocs.io/en/latest/easy_install.html
Please make the appropriate changes for your system and try again.
""").lstrip() # noqa
def cant_write_to_target(self):
msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += '\n' + self.__not_exists_id
else:
msg += '\n' + self.__access_msg
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname() + ".pth"
ok_file = pth_file + '.ok'
ok_exists = os.path.exists(ok_file)
tmpl = _one_liner("""
import os
f = open({ok_file!r}, 'w')
f.write('OK')
f.close()
""") + '\n'
try:
if ok_exists:
os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
os.makedirs(dirname, exist_ok=True)
f = open(pth_file, 'w')
except (OSError, IOError):
self.cant_write_to_target()
else:
try:
f.write(tmpl.format(**locals()))
f.close()
f = None
executable = sys.executable
if os.name == 'nt':
dirname, basename = os.path.split(executable)
alt = os.path.join(dirname, 'pythonw.exe')
use_alt = (
basename.lower() == 'python.exe' and
os.path.exists(alt)
)
if use_alt:
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable, '-E', '-c', 'pass'], 0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/' + script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base, filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self, spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
@contextlib.contextmanager
def _tmpdir(self):
tmpdir = tempfile.mkdtemp(prefix=u"easy_install-")
try:
# cast to str as workaround for #709 and #710 and #712
yield str(tmpdir)
finally:
os.path.exists(tmpdir) and rmtree(rmtree_safe(tmpdir))
def easy_install(self, spec, deps=False):
with self._tmpdir() as tmpdir:
if not isinstance(spec, Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
dl = self.package_index.download(spec, tmpdir)
return self.install_item(None, dl, tmpdir, deps, True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable,
not self.always_copy, self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg += " (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence == DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location == download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = Requirement(str(distreq))
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound as e:
raise DistutilsError(str(e)) from e
except VersionConflict as e:
raise DistutilsError(e.report()) from e
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = (
"%r already exists in %s; build directory %s will not be kept"
)
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename) == setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents) == 1:
dist_filename = os.path.join(setup_base, contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if self.exclude_scripts:
return
for args in ScriptWriter.best().get_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
body = self._load_template(dev_path) % locals()
script_text = ScriptWriter.get_header(script_text) + body
self.write_script(script_name, _to_bytes(script_text), 'b')
@staticmethod
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://github.com/pypa/setuptools/issues/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir, x) for x in blockers]
)
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
if self.dry_run:
return
mask = current_umask()
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
with open(target, "w" + mode) as f:
f.write(contents)
chmod(target, 0o777 - mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.whl'):
return [self.install_wheel(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" %
os.path.abspath(dist_filename)
)
if len(setups) > 1:
raise DistutilsError(
"Multiple setup scripts in %s" %
os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path, os.path.join(egg_path,
'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path, metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(
self.install_dir,
os.path.basename(egg_path),
)
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(
os.unlink,
(destination,),
"Removing " + destination,
)
try:
new_dist_is_zipped = False
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f, m = self.unpack_and_compile, "Extracting"
else:
new_dist_is_zipped = True
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copy2, "Copying"
self.execute(
f,
(egg_path, destination),
(m + " %s to %s") % (
os.path.basename(egg_path),
os.path.dirname(destination)
),
)
update_dist_caches(
destination,
fix_zipimporter_caches=new_dist_is_zipped,
)
except Exception:
update_dist_caches(destination, fix_zipimporter_caches=False)
raise
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata', 'name'),
version=cfg.get('metadata', 'version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
dist.location = egg_path
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf, 'w')
f.write('Metadata-Version: 1.0\n')
for k, v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
# delete entry-point scripts to avoid duping
self.delete_blockers([
os.path.join(script_dir, args[0])
for args in ScriptWriter.get_args(dist)
])
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run,
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src, dst):
s = src.lower()
for old, new in prefixes:
if s.startswith(old):
src = new + src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old != 'SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(
os.path.join(egg_tmp, 'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level', 'native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
if not os.path.exists(txt):
f = open(txt, 'w')
f.write('\n'.join(locals()[name]) + '\n')
f.close()
def install_wheel(self, wheel_path, tmpdir):
wheel = Wheel(wheel_path)
assert wheel.is_compatible()
destination = os.path.join(self.install_dir, wheel.egg_name())
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(
os.unlink,
(destination,),
"Removing " + destination,
)
try:
self.execute(
wheel.install_as_egg,
(destination,),
("Installing %s to %s") % (
os.path.basename(wheel_path),
os.path.dirname(destination)
),
)
finally:
update_dist_caches(destination, fix_zipimporter_caches=False)
self.add_output(destination)
return self.egg_distribution(destination)
__mv_warning = textwrap.dedent("""
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
""").lstrip() # noqa
__id_warning = textwrap.dedent("""
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
""") # noqa
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += '\n' + self.__mv_warning
if self.install_dir not in map(normalize_path, sys.path):
msg += '\n' + self.__id_warning
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
__editable_msg = textwrap.dedent("""
Extracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""").lstrip() # noqa
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return '\n' + self.__editable_msg % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose > 2:
v = 'v' * (self.verbose - 1)
args.insert(0, '-' + v)
elif self.verbose < 2:
args.insert(0, '-q')
if self.dry_run:
args.insert(0, '-n')
log.info(
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit as v:
raise DistutilsError(
"Setup script exited with %s" % (v.args[0],)
) from v
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives:
continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist,
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key == 'setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
if os.path.islink(filename):
os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location) + '\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src, dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if sys.dont_write_bytecode:
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run,
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
__no_default_msg = textwrap.dedent("""
bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://setuptools.readthedocs.io/en/latest/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again.
""").strip()
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in six.iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
INSTALL_SCHEMES = dict(
posix=dict(
install_dir='$base/lib/python$py_version_short/site-packages',
script_dir='$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir='$base/Lib/site-packages',
script_dir='$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def _pythonpath():
items = os.environ.get('PYTHONPATH', '').split(os.pathsep)
return filter(None, items)
def get_site_dirs():
"""
Return a list of 'site' dirs
"""
sitedirs = []
# start with PYTHONPATH
sitedirs.extend(_pythonpath())
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([
os.path.join(
prefix,
"lib",
"python{}.{}".format(*sys.version_info),
"site-packages",
),
os.path.join(prefix, "lib", "site-python"),
])
else:
sitedirs.extend([
prefix,
os.path.join(prefix, "lib", "site-packages"),
])
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
home_sp = os.path.join(
home,
'Library',
'Python',
'{}.{}'.format(*sys.version_info),
'site-packages',
)
sitedirs.append(home_sp)
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs:
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
try:
sitedirs.extend(site.getsitepackages())
except AttributeError:
pass
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a configparser.RawConfigParser, or None
"""
f = open(dist_filename, 'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended - 12)
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended - (12 + cfglen))
init = {'version': '', 'target_version': ''}
cfg = configparser.RawConfigParser(init)
try:
part = f.read(cfglen)
# Read up to the first null byte.
config = part.split(b'\0', 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(six.StringIO(config))
except configparser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''),
('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if not six.PY2:
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename, 'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir, path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
rel_paths = list(map(self.make_relative, self.paths))
if rel_paths:
log.debug("Saving %s", self.filename)
lines = self._wrap_lines(rel_paths)
data = '\n'.join(lines) + '\n'
if os.path.islink(self.filename):
os.unlink(self.filename)
with open(self.filename, 'wt') as f:
f.write(data)
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
@staticmethod
def _wrap_lines(lines):
return lines
def add(self, dist):
"""Add `dist` to the distribution map"""
new_path = (
dist.location not in self.paths and (
dist.location not in self.sitedirs or
# account for '.' being in PYTHONPATH
dist.location == os.getcwd()
)
)
if new_path:
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self, path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep == '/' and '/' or os.sep
while len(npath) >= baselen:
if npath == self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
class RewritePthDistributions(PthDistributions):
@classmethod
def _wrap_lines(cls, lines):
yield cls.prelude
for line in lines:
yield line
yield cls.postlude
prelude = _one_liner("""
import sys
sys.__plen = len(sys.path)
""")
postlude = _one_liner("""
import sys
new = sys.path[sys.__plen:]
del sys.path[sys.__plen:]
p = getattr(sys, '__egginsert', 0)
sys.path[p:p] = new
sys.__egginsert = p + len(new)
""")
if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
PthDistributions = RewritePthDistributions
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def auto_chmod(func, arg, exc):
if func in [os.unlink, os.remove] and os.name == 'nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
six.reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
def update_dist_caches(dist_path, fix_zipimporter_caches):
"""
Fix any globally cached `dist_path` related data
`dist_path` should be a path of a newly installed egg distribution (zipped
or unzipped).
sys.path_importer_cache contains finder objects that have been cached when
importing data from the original distribution. Any such finders need to be
cleared since the replacement distribution might be packaged differently,
e.g. a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa. Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader.
zipimport.zipimporter objects are Python loaders charged with importing
data packaged inside zip archives. If stale loaders referencing the
original distribution, are left behind, they can fail to load modules from
the replacement distribution. E.g. if an old zipimport.zipimporter instance
is used to load data from a new zipped egg archive, it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution's zip archive directory
information. Such an operation may then fail outright, e.g. report having
read a 'bad local file header', or even worse, it may fail silently &
return invalid data.
zipimport._zip_directory_cache contains cached zip archive directory
information for all existing zipimport.zipimporter instances and all such
instances connected to the same archive share the same cached directory
information.
If asked, and the underlying Python implementation allows it, we can fix
all existing zipimport.zipimporter instances instead of having to track
them down and remove them one by one, by updating their shared cached zip
archive directory information. This, of course, assumes that the
replacement distribution is packaged as a zipped egg.
If not asked to fix existing zipimport.zipimporter instances, we still do
our best to clear any remaining zipimport.zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail. Note that when
tracking down such remaining stale data, we can not catch every conceivable
usage from here, and we clear only those that we know of and have found to
cause problems if left alive. Any remaining caches should be updated by
whomever is in charge of maintaining them, i.e. they should be ready to
handle us replacing their zip archives with new distributions at runtime.
"""
# There are several other known sources of stale zipimport.zipimporter
# instances that we do not clear here, but might if ever given a reason to
# do so:
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
# set') may contain distributions which may in turn contain their
# zipimport.zipimporter loaders.
# * Several zipimport.zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation.
# * Already loaded modules may have their __loader__ attribute set to the
# exact loader instance used when importing them. Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems.
normalized_path = normalize_path(dist_path)
_uncache(normalized_path, sys.path_importer_cache)
if fix_zipimporter_caches:
_replace_zip_directory_cache_data(normalized_path)
else:
# Here, even though we do not want to fix existing and now stale
# zipimporter cache information, we still want to remove it. Related to
# Python's zip archive directory information cache, we clear each of
# its stale entries in two phases:
# 1. Clear the entry so attempting to access zip archive information
# via any existing stale zipimport.zipimporter instances fails.
# 2. Remove the entry from the cache so any newly constructed
# zipimport.zipimporter instances do not end up using old stale
# zip archive directory information.
# This whole stale data removal step does not seem strictly necessary,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible, and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed.
_remove_and_clear_zip_directory_cache_data(normalized_path)
def _collect_zipimporter_cache_entries(normalized_path, cache):
"""
Return zipimporter cache entry keys related to a given normalized path.
Alternative path spellings (e.g. those using different character case or
those using alternative path separators) related to the same path are
included. Any sub-path entries are included as well, i.e. those
corresponding to zip archives embedded in other zip archives.
"""
result = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
result.append(p)
return result
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
# http://bit.ly/2h9itJX
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry
def _uncache(normalized_path, cache):
_update_zipimporter_cache(normalized_path, cache)
def _remove_and_clear_zip_directory_cache_data(normalized_path):
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
old_entry.clear()
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=clear_and_remove_cached_zip_archive_directory_data)
# PyPy Python implementation does not allow directly writing to the
# zipimport._zip_directory_cache and so prevents us from attempting to correct
# its content. The best we can do there is clear the problematic cache content
# and have PyPy repopulate it as needed. The downside is that if there are any
# stale zipimport.zipimporter instances laying around, attempting to use them
# will fail due to not having its zip archive directory information available
# instead of being automatically corrected to use the new correct zip archive
# directory information.
if '__pypy__' in sys.builtin_module_names:
_replace_zip_directory_cache_data = \
_remove_and_clear_zip_directory_cache_data
else:
def _replace_zip_directory_cache_data(normalized_path):
def replace_cached_zip_archive_directory_data(path, old_entry):
# N.B. In theory, we could load the zip directory information just
# once for all updated path spellings, and then copy it locally and
# update its contained path strings to contain the correct
# spelling, but that seems like a way too invasive move (this cache
# structure is not officially documented anywhere and could in
# theory change with new Python releases) for no significant
# benefit.
old_entry.clear()
zipimport.zipimporter(path)
old_entry.update(zipimport._zip_directory_cache[path])
return old_entry
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=replace_cached_zip_archive_directory_data)
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
with io.open(executable, encoding='latin-1') as fp:
magic = fp.read(2)
except (OSError, IOError):
return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
return subprocess.list2cmdline([arg])
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args):
pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error as e:
log.debug("chmod failed: %s", e)
class CommandSpec(list):
"""
A command spec for a #! header, specified as a list of arguments akin to
those passed to Popen.
"""
options = []
split_args = dict()
@classmethod
def best(cls):
"""
Choose the best CommandSpec class based on environmental conditions.
"""
return cls
@classmethod
def _sys_executable(cls):
_default = os.path.normpath(sys.executable)
return os.environ.get('__PYVENV_LAUNCHER__', _default)
@classmethod
def from_param(cls, param):
"""
Construct a CommandSpec from a parameter to build_scripts, which may
be None.
"""
if isinstance(param, cls):
return param
if isinstance(param, list):
return cls(param)
if param is None:
return cls.from_environment()
# otherwise, assume it's a string.
return cls.from_string(param)
@classmethod
def from_environment(cls):
return cls([cls._sys_executable()])
@classmethod
def from_string(cls, string):
"""
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
"""
items = shlex.split(string, **cls.split_args)
return cls(items)
def install_options(self, script_text):
self.options = shlex.split(self._extract_options(script_text))
cmdline = subprocess.list2cmdline(self)
if not isascii(cmdline):
self.options[:0] = ['-x']
@staticmethod
def _extract_options(orig_script):
"""
Extract any options from the first line of the script.
"""
first = (orig_script + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = match.group(1) or '' if match else ''
return options.strip()
def as_header(self):
return self._render(self + list(self.options))
@staticmethod
def _strip_quotes(item):
_QUOTES = '"\''
for q in _QUOTES:
if item.startswith(q) and item.endswith(q):
return item[1:-1]
return item
@staticmethod
def _render(items):
cmdline = subprocess.list2cmdline(
CommandSpec._strip_quotes(item.strip()) for item in items)
return '#!' + cmdline + '\n'
# For pbr compat; will be removed in a future version.
sys_executable = CommandSpec._sys_executable()
class WindowsCommandSpec(CommandSpec):
split_args = dict(posix=False)
class ScriptWriter:
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent(r"""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = %(spec)r
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point(%(spec)r, %(group)r, %(name)r)())
""").lstrip()
command_spec_class = CommandSpec
@classmethod
def get_script_args(cls, dist, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_args", EasyInstallDeprecationWarning)
writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
header = cls.get_script_header("", executable, wininst)
return writer.get_args(dist, header)
@classmethod
def get_script_header(cls, script_text, executable=None, wininst=False):
# for backward compatibility
warnings.warn(
"Use get_header", EasyInstallDeprecationWarning, stacklevel=2)
if wininst:
executable = "python.exe"
return cls.get_header(script_text, executable)
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = cls.template % locals()
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
@staticmethod
def _ensure_safe_name(name):
"""
Prevent paths in *_scripts entry point names.
"""
has_path_sep = re.search(r'[\\/]', name)
if has_path_sep:
raise ValueError("Path separators not allowed in script names")
@classmethod
def get_writer(cls, force_windows):
# for backward compatibility
warnings.warn("Use best", EasyInstallDeprecationWarning)
return WindowsScriptWriter.best() if force_windows else cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter for this environment.
"""
if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
return WindowsScriptWriter.best()
else:
return cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
@classmethod
def get_header(cls, script_text="", executable=None):
"""Create a #! line, getting options (if any) from script_text"""
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
class WindowsScriptWriter(ScriptWriter):
command_spec_class = WindowsCommandSpec
@classmethod
def get_writer(cls):
# for backward compatibility
warnings.warn("Use best", EasyInstallDeprecationWarning)
return cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
msg = (
"{ext} not listed in PATHEXT; scripts will not be "
"recognized as executables."
).format(**locals())
warnings.warn(msg, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@classmethod
def _adjust_header(cls, type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
return new_header if cls._use_header(new_header) else orig_header
@staticmethod
def _use_header(new_header):
"""
Should _adjust_header use the replaced header?
On non-windows systems, always use. On
Windows systems, only use the replaced header if it resolves
to an executable on the system.
"""
clean_header = new_header[2:-1].strip('"')
return sys.platform != 'win32' or find_executable(clean_header)
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_ == 'gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py', '.pyc', '.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield (name + ext, hdr + script_text, 't', blockers)
yield (
name + '.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
get_script_header = ScriptWriter.get_script_header
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if six.PY2:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
return shutil.rmtree(path, ignore_errors, onerror)
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if argv is None:
argv = sys.argv[1:]
with _patch_usage():
setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands,
**kw
)
@contextlib.contextmanager
def _patch_usage():
import distutils.core
USAGE = textwrap.dedent("""
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
""").lstrip()
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
saved = distutils.core.gen_usage
distutils.core.gen_usage = gen_usage
try:
yield
finally:
distutils.core.gen_usage = saved
class EasyInstallDeprecationWarning(SetuptoolsDeprecationWarning):
"""
Warning for EasyInstall deprecations, bypassing suppression.
"""
| Django-locallibrary/env/Lib/site-packages/setuptools/command/easy_install.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/command/easy_install.py",
"repo_id": "Django-locallibrary",
"token_count": 39662
} | 28 |
from __future__ import absolute_import, unicode_literals
import ast
import io
import os
import sys
import warnings
import functools
import importlib
from collections import defaultdict
from functools import partial
from functools import wraps
import contextlib
from distutils.errors import DistutilsOptionError, DistutilsFileError
from setuptools.extern.packaging.version import LegacyVersion, parse
from setuptools.extern.packaging.specifiers import SpecifierSet
from setuptools.extern.six import string_types, PY3
__metaclass__ = type
class StaticModule:
"""
Attempt to load the module by the name
"""
def __init__(self, name):
spec = importlib.util.find_spec(name)
with open(spec.origin) as strm:
src = strm.read()
module = ast.parse(src)
vars(self).update(locals())
del self.self
def __getattr__(self, attr):
try:
return next(
ast.literal_eval(statement.value)
for statement in self.module.body
if isinstance(statement, ast.Assign)
for target in statement.targets
if isinstance(target, ast.Name) and target.id == attr
)
except Exception as e:
raise AttributeError(
"{self.name} has no attribute {attr}".format(**locals())
) from e
@contextlib.contextmanager
def patch_path(path):
"""
Add path to front of sys.path for the duration of the context.
"""
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
def read_configuration(
filepath, find_others=False, ignore_option_errors=False):
"""Read given configuration file and returns options from it as a dict.
:param str|unicode filepath: Path to configuration file
to get options from.
:param bool find_others: Whether to search for other configuration files
which could be on in various places.
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: dict
"""
from setuptools.dist import Distribution, _Distribution
filepath = os.path.abspath(filepath)
if not os.path.isfile(filepath):
raise DistutilsFileError(
'Configuration file %s does not exist.' % filepath)
current_directory = os.getcwd()
os.chdir(os.path.dirname(filepath))
try:
dist = Distribution()
filenames = dist.find_config_files() if find_others else []
if filepath not in filenames:
filenames.append(filepath)
_Distribution.parse_config_files(dist, filenames=filenames)
handlers = parse_configuration(
dist, dist.command_options,
ignore_option_errors=ignore_option_errors)
finally:
os.chdir(current_directory)
return configuration_to_dict(handlers)
def _get_option(target_obj, key):
"""
Given a target object and option key, get that option from
the target object, either through a get_{key} method or
from an attribute directly.
"""
getter_name = 'get_{key}'.format(**locals())
by_attribute = functools.partial(getattr, target_obj, key)
getter = getattr(target_obj, getter_name, by_attribute)
return getter()
def configuration_to_dict(handlers):
"""Returns configuration data gathered by given handlers as a dict.
:param list[ConfigHandler] handlers: Handlers list,
usually from parse_configuration()
:rtype: dict
"""
config_dict = defaultdict(dict)
for handler in handlers:
for option in handler.set_options:
value = _get_option(handler.target_obj, option)
config_dict[handler.section_prefix][option] = value
return config_dict
def parse_configuration(
distribution, command_options, ignore_option_errors=False):
"""Performs additional parsing of configuration options
for a distribution.
Returns a list of used option handlers.
:param Distribution distribution:
:param dict command_options:
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: list
"""
options = ConfigOptionsHandler(
distribution, command_options, ignore_option_errors)
options.parse()
meta = ConfigMetadataHandler(
distribution.metadata, command_options, ignore_option_errors,
distribution.package_dir)
meta.parse()
return meta, options
class ConfigHandler:
"""Handles metadata supplied in configuration files."""
section_prefix = None
"""Prefix for config sections handled by this handler.
Must be provided by class heirs.
"""
aliases = {}
"""Options aliases.
For compatibility with various packages. E.g.: d2to1 and pbr.
Note: `-` in keys is replaced with `_` by config parser.
"""
def __init__(self, target_obj, options, ignore_option_errors=False):
sections = {}
section_prefix = self.section_prefix
for section_name, section_options in options.items():
if not section_name.startswith(section_prefix):
continue
section_name = section_name.replace(section_prefix, '').strip('.')
sections[section_name] = section_options
self.ignore_option_errors = ignore_option_errors
self.target_obj = target_obj
self.sections = sections
self.set_options = []
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
raise NotImplementedError(
'%s must provide .parsers property' % self.__class__.__name__)
def __setitem__(self, option_name, value):
unknown = tuple()
target_obj = self.target_obj
# Translate alias into real name.
option_name = self.aliases.get(option_name, option_name)
current_value = getattr(target_obj, option_name, unknown)
if current_value is unknown:
raise KeyError(option_name)
if current_value:
# Already inhabited. Skipping.
return
skip_option = False
parser = self.parsers.get(option_name)
if parser:
try:
value = parser(value)
except Exception:
skip_option = True
if not self.ignore_option_errors:
raise
if skip_option:
return
setter = getattr(target_obj, 'set_%s' % option_name, None)
if setter is None:
setattr(target_obj, option_name, value)
else:
setter(value)
self.set_options.append(option_name)
@classmethod
def _parse_list(cls, value, separator=','):
"""Represents value as a list.
Value is split either by separator (defaults to comma) or by lines.
:param value:
:param separator: List items separator character.
:rtype: list
"""
if isinstance(value, list): # _get_parser_compound case
return value
if '\n' in value:
value = value.splitlines()
else:
value = value.split(separator)
return [chunk.strip() for chunk in value if chunk.strip()]
@classmethod
def _parse_dict(cls, value):
"""Represents value as a dict.
:param value:
:rtype: dict
"""
separator = '='
result = {}
for line in cls._parse_list(value):
key, sep, val = line.partition(separator)
if sep != separator:
raise DistutilsOptionError(
'Unable to parse option value to dict: %s' % value)
result[key.strip()] = val.strip()
return result
@classmethod
def _parse_bool(cls, value):
"""Represents value as boolean.
:param value:
:rtype: bool
"""
value = value.lower()
return value in ('1', 'true', 'yes')
@classmethod
def _exclude_files_parser(cls, key):
"""Returns a parser function to make sure field inputs
are not files.
Parses a value after getting the key so error messages are
more informative.
:param key:
:rtype: callable
"""
def parser(value):
exclude_directive = 'file:'
if value.startswith(exclude_directive):
raise ValueError(
'Only strings are accepted for the {0} field, '
'files are not accepted'.format(key))
return value
return parser
@classmethod
def _parse_file(cls, value):
"""Represents value as a string, allowing including text
from nearest files using `file:` directive.
Directive is sandboxed and won't reach anything outside
directory with setup.py.
Examples:
file: README.rst, CHANGELOG.md, src/file.txt
:param str value:
:rtype: str
"""
include_directive = 'file:'
if not isinstance(value, string_types):
return value
if not value.startswith(include_directive):
return value
spec = value[len(include_directive):]
filepaths = (os.path.abspath(path.strip()) for path in spec.split(','))
return '\n'.join(
cls._read_file(path)
for path in filepaths
if (cls._assert_local(path) or True)
and os.path.isfile(path)
)
@staticmethod
def _assert_local(filepath):
if not filepath.startswith(os.getcwd()):
raise DistutilsOptionError(
'`file:` directive can not access %s' % filepath)
@staticmethod
def _read_file(filepath):
with io.open(filepath, encoding='utf-8') as f:
return f.read()
@classmethod
def _parse_attr(cls, value, package_dir=None):
"""Represents value as a module attribute.
Examples:
attr: package.attr
attr: package.module.attr
:param str value:
:rtype: str
"""
attr_directive = 'attr:'
if not value.startswith(attr_directive):
return value
attrs_path = value.replace(attr_directive, '').strip().split('.')
attr_name = attrs_path.pop()
module_name = '.'.join(attrs_path)
module_name = module_name or '__init__'
parent_path = os.getcwd()
if package_dir:
if attrs_path[0] in package_dir:
# A custom path was specified for the module we want to import
custom_path = package_dir[attrs_path[0]]
parts = custom_path.rsplit('/', 1)
if len(parts) > 1:
parent_path = os.path.join(os.getcwd(), parts[0])
module_name = parts[1]
else:
module_name = custom_path
elif '' in package_dir:
# A custom parent directory was specified for all root modules
parent_path = os.path.join(os.getcwd(), package_dir[''])
with patch_path(parent_path):
try:
# attempt to load value statically
return getattr(StaticModule(module_name), attr_name)
except Exception:
# fallback to simple import
module = importlib.import_module(module_name)
return getattr(module, attr_name)
@classmethod
def _get_parser_compound(cls, *parse_methods):
"""Returns parser function to represents value as a list.
Parses a value applying given methods one after another.
:param parse_methods:
:rtype: callable
"""
def parse(value):
parsed = value
for method in parse_methods:
parsed = method(parsed)
return parsed
return parse
@classmethod
def _parse_section_to_dict(cls, section_options, values_parser=None):
"""Parses section options into a dictionary.
Optionally applies a given parser to values.
:param dict section_options:
:param callable values_parser:
:rtype: dict
"""
value = {}
values_parser = values_parser or (lambda val: val)
for key, (_, val) in section_options.items():
value[key] = values_parser(val)
return value
def parse_section(self, section_options):
"""Parses configuration file section.
:param dict section_options:
"""
for (name, (_, value)) in section_options.items():
try:
self[name] = value
except KeyError:
pass # Keep silent for a new option may appear anytime.
def parse(self):
"""Parses configuration file items from one
or more related sections.
"""
for section_name, section_options in self.sections.items():
method_postfix = ''
if section_name: # [section.option] variant
method_postfix = '_%s' % section_name
section_parser_method = getattr(
self,
# Dots in section names are translated into dunderscores.
('parse_section%s' % method_postfix).replace('.', '__'),
None)
if section_parser_method is None:
raise DistutilsOptionError(
'Unsupported distribution option section: [%s.%s]' % (
self.section_prefix, section_name))
section_parser_method(section_options)
def _deprecated_config_handler(self, func, msg, warning_class):
""" this function will wrap around parameters that are deprecated
:param msg: deprecation message
:param warning_class: class of warning exception to be raised
:param func: function to be wrapped around
"""
@wraps(func)
def config_handler(*args, **kwargs):
warnings.warn(msg, warning_class)
return func(*args, **kwargs)
return config_handler
class ConfigMetadataHandler(ConfigHandler):
section_prefix = 'metadata'
aliases = {
'home_page': 'url',
'summary': 'description',
'classifier': 'classifiers',
'platform': 'platforms',
}
strict_mode = False
"""We need to keep it loose, to be partially compatible with
`pbr` and `d2to1` packages which also uses `metadata` section.
"""
def __init__(self, target_obj, options, ignore_option_errors=False,
package_dir=None):
super(ConfigMetadataHandler, self).__init__(target_obj, options,
ignore_option_errors)
self.package_dir = package_dir
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_file = self._parse_file
parse_dict = self._parse_dict
exclude_files_parser = self._exclude_files_parser
return {
'platforms': parse_list,
'keywords': parse_list,
'provides': parse_list,
'requires': self._deprecated_config_handler(
parse_list,
"The requires parameter is deprecated, please use "
"install_requires for runtime dependencies.",
DeprecationWarning),
'obsoletes': parse_list,
'classifiers': self._get_parser_compound(parse_file, parse_list),
'license': exclude_files_parser('license'),
'license_files': parse_list,
'description': parse_file,
'long_description': parse_file,
'version': self._parse_version,
'project_urls': parse_dict,
}
def _parse_version(self, value):
"""Parses `version` option value.
:param value:
:rtype: str
"""
version = self._parse_file(value)
if version != value:
version = version.strip()
# Be strict about versions loaded from file because it's easy to
# accidentally include newlines and other unintended content
if isinstance(parse(version), LegacyVersion):
tmpl = (
'Version loaded from {value} does not '
'comply with PEP 440: {version}'
)
raise DistutilsOptionError(tmpl.format(**locals()))
return version
version = self._parse_attr(value, self.package_dir)
if callable(version):
version = version()
if not isinstance(version, string_types):
if hasattr(version, '__iter__'):
version = '.'.join(map(str, version))
else:
version = '%s' % version
return version
class ConfigOptionsHandler(ConfigHandler):
section_prefix = 'options'
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_list_semicolon = partial(self._parse_list, separator=';')
parse_bool = self._parse_bool
parse_dict = self._parse_dict
return {
'zip_safe': parse_bool,
'use_2to3': parse_bool,
'include_package_data': parse_bool,
'package_dir': parse_dict,
'use_2to3_fixers': parse_list,
'use_2to3_exclude_fixers': parse_list,
'convert_2to3_doctests': parse_list,
'scripts': parse_list,
'eager_resources': parse_list,
'dependency_links': parse_list,
'namespace_packages': parse_list,
'install_requires': parse_list_semicolon,
'setup_requires': parse_list_semicolon,
'tests_require': parse_list_semicolon,
'packages': self._parse_packages,
'entry_points': self._parse_file,
'py_modules': parse_list,
'python_requires': SpecifierSet,
}
def _parse_packages(self, value):
"""Parses `packages` option value.
:param value:
:rtype: list
"""
find_directives = ['find:', 'find_namespace:']
trimmed_value = value.strip()
if trimmed_value not in find_directives:
return self._parse_list(value)
findns = trimmed_value == find_directives[1]
if findns and not PY3:
raise DistutilsOptionError(
'find_namespace: directive is unsupported on Python < 3.3')
# Read function arguments from a dedicated section.
find_kwargs = self.parse_section_packages__find(
self.sections.get('packages.find', {}))
if findns:
from setuptools import find_namespace_packages as find_packages
else:
from setuptools import find_packages
return find_packages(**find_kwargs)
def parse_section_packages__find(self, section_options):
"""Parses `packages.find` configuration file section.
To be used in conjunction with _parse_packages().
:param dict section_options:
"""
section_data = self._parse_section_to_dict(
section_options, self._parse_list)
valid_keys = ['where', 'include', 'exclude']
find_kwargs = dict(
[(k, v) for k, v in section_data.items() if k in valid_keys and v])
where = find_kwargs.get('where')
if where is not None:
find_kwargs['where'] = where[0] # cast list to single val
return find_kwargs
def parse_section_entry_points(self, section_options):
"""Parses `entry_points` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['entry_points'] = parsed
def _parse_package_data(self, section_options):
parsed = self._parse_section_to_dict(section_options, self._parse_list)
root = parsed.get('*')
if root:
parsed[''] = root
del parsed['*']
return parsed
def parse_section_package_data(self, section_options):
"""Parses `package_data` configuration file section.
:param dict section_options:
"""
self['package_data'] = self._parse_package_data(section_options)
def parse_section_exclude_package_data(self, section_options):
"""Parses `exclude_package_data` configuration file section.
:param dict section_options:
"""
self['exclude_package_data'] = self._parse_package_data(
section_options)
def parse_section_extras_require(self, section_options):
"""Parses `extras_require` configuration file section.
:param dict section_options:
"""
parse_list = partial(self._parse_list, separator=';')
self['extras_require'] = self._parse_section_to_dict(
section_options, parse_list)
def parse_section_data_files(self, section_options):
"""Parses `data_files` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['data_files'] = [(k, v) for k, v in parsed.items()]
| Django-locallibrary/env/Lib/site-packages/setuptools/config.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/config.py",
"repo_id": "Django-locallibrary",
"token_count": 9393
} | 29 |
"""
Monkey patching of distutils.
"""
import sys
import distutils.filelist
import platform
import types
import functools
from importlib import import_module
import inspect
from setuptools.extern import six
import setuptools
__all__ = []
"""
Everything is private. Contact the project team
if you think you need this functionality.
"""
def _get_mro(cls):
"""
Returns the bases classes for cls sorted by the MRO.
Works around an issue on Jython where inspect.getmro will not return all
base classes if multiple classes share the same name. Instead, this
function will return a tuple containing the class itself, and the contents
of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024.
"""
if platform.python_implementation() == "Jython":
return (cls,) + cls.__bases__
return inspect.getmro(cls)
def get_unpatched(item):
lookup = (
get_unpatched_class if isinstance(item, six.class_types) else
get_unpatched_function if isinstance(item, types.FunctionType) else
lambda item: None
)
return lookup(item)
def get_unpatched_class(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
external_bases = (
cls
for cls in _get_mro(cls)
if not cls.__module__.startswith('setuptools')
)
base = next(external_bases)
if not base.__module__.startswith('distutils'):
msg = "distutils has already been patched by %r" % cls
raise AssertionError(msg)
return base
def patch_all():
# we can't patch distutils.cmd, alas
distutils.core.Command = setuptools.Command
has_issue_12885 = sys.version_info <= (3, 5, 3)
if has_issue_12885:
# fix findall bug in distutils (http://bugs.python.org/issue12885)
distutils.filelist.findall = setuptools.findall
needs_warehouse = (
sys.version_info < (2, 7, 13)
or
(3, 4) < sys.version_info < (3, 4, 6)
or
(3, 5) < sys.version_info <= (3, 5, 3)
)
if needs_warehouse:
warehouse = 'https://upload.pypi.org/legacy/'
distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse
_patch_distribution_metadata()
# Install Distribution throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = setuptools.dist.Distribution
# Install the patched Extension
distutils.core.Extension = setuptools.extension.Extension
distutils.extension.Extension = setuptools.extension.Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = (
setuptools.extension.Extension
)
patch_for_msvc_specialized_compiler()
def _patch_distribution_metadata():
"""Patch write_pkg_file and read_pkg_file for higher metadata standards"""
for attr in ('write_pkg_file', 'read_pkg_file', 'get_metadata_version'):
new_val = getattr(setuptools.dist, attr)
setattr(distutils.dist.DistributionMetadata, attr, new_val)
def patch_func(replacement, target_mod, func_name):
"""
Patch func_name in target_mod with replacement
Important - original must be resolved by name to avoid
patching an already patched function.
"""
original = getattr(target_mod, func_name)
# set the 'unpatched' attribute on the replacement to
# point to the original.
vars(replacement).setdefault('unpatched', original)
# replace the function in the original module
setattr(target_mod, func_name, replacement)
def get_unpatched_function(candidate):
return getattr(candidate, 'unpatched')
def patch_for_msvc_specialized_compiler():
"""
Patch functions in distutils to use standalone Microsoft Visual C++
compilers.
"""
# import late to avoid circular imports on Python < 3.5
msvc = import_module('setuptools.msvc')
if platform.system() != 'Windows':
# Compilers only availables on Microsoft Windows
return
def patch_params(mod_name, func_name):
"""
Prepare the parameters for patch_func to patch indicated function.
"""
repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_'
repl_name = repl_prefix + func_name.lstrip('_')
repl = getattr(msvc, repl_name)
mod = import_module(mod_name)
if not hasattr(mod, func_name):
raise ImportError(func_name)
return repl, mod, func_name
# Python 2.7 to 3.4
msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler')
# Python 3.5+
msvc14 = functools.partial(patch_params, 'distutils._msvccompiler')
try:
# Patch distutils.msvc9compiler
patch_func(*msvc9('find_vcvarsall'))
patch_func(*msvc9('query_vcvarsall'))
except ImportError:
pass
try:
# Patch distutils._msvccompiler._get_vc_env
patch_func(*msvc14('_get_vc_env'))
except ImportError:
pass
try:
# Patch distutils._msvccompiler.gen_lib_options for Numpy
patch_func(*msvc14('gen_lib_options'))
except ImportError:
pass
| Django-locallibrary/env/Lib/site-packages/setuptools/monkey.py/0 | {
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/monkey.py",
"repo_id": "Django-locallibrary",
"token_count": 2035
} | 30 |
<#
.Synopsis
Activate a Python virtual environment for the current PowerShell session.
.Description
Pushes the python executable for a virtual environment to the front of the
$Env:PATH environment variable and sets the prompt to signify that you are
in a Python virtual environment. Makes use of the command line switches as
well as the `pyvenv.cfg` file values present in the virtual environment.
.Parameter VenvDir
Path to the directory that contains the virtual environment to activate. The
default value for this is the parent of the directory that the Activate.ps1
script is located within.
.Parameter Prompt
The prompt prefix to display when this virtual environment is activated. By
default, this prompt is the name of the virtual environment folder (VenvDir)
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
.Example
Activate.ps1
Activates the Python virtual environment that contains the Activate.ps1 script.
.Example
Activate.ps1 -Verbose
Activates the Python virtual environment that contains the Activate.ps1 script,
and shows extra information about the activation as it executes.
.Example
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
Activates the Python virtual environment located in the specified location.
.Example
Activate.ps1 -Prompt "MyPython"
Activates the Python virtual environment that contains the Activate.ps1 script,
and prefixes the current prompt with the specified string (surrounded in
parentheses) while the virtual environment is active.
.Notes
On Windows, it may be required to enable this Activate.ps1 script by setting the
execution policy for the user. You can do this by issuing the following PowerShell
command:
PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
For more information on Execution Policies:
https://go.microsoft.com/fwlink/?LinkID=135170
#>
Param(
[Parameter(Mandatory = $false)]
[String]
$VenvDir,
[Parameter(Mandatory = $false)]
[String]
$Prompt
)
<# Function declarations --------------------------------------------------- #>
<#
.Synopsis
Remove all shell session elements added by the Activate script, including the
addition of the virtual environment's Python executable from the beginning of
the PATH variable.
.Parameter NonDestructive
If present, do not remove this function from the global namespace for the
session.
#>
function global:deactivate ([switch]$NonDestructive) {
# Revert to original values
# The prior prompt:
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
}
# The prior PYTHONHOME:
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
}
# The prior PATH:
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
}
# Just remove the VIRTUAL_ENV altogether:
if (Test-Path -Path Env:VIRTUAL_ENV) {
Remove-Item -Path env:VIRTUAL_ENV
}
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
}
# Leave deactivate function in the global namespace if requested:
if (-not $NonDestructive) {
Remove-Item -Path function:deactivate
}
}
<#
.Description
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
given folder, and returns them in a map.
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
two strings separated by `=` (with any amount of whitespace surrounding the =)
then it is considered a `key = value` line. The left hand string is the key,
the right hand is the value.
If the value starts with a `'` or a `"` then the first and last character is
stripped from the value before being captured.
.Parameter ConfigDir
Path to the directory that contains the `pyvenv.cfg` file.
#>
function Get-PyVenvConfig(
[String]
$ConfigDir
) {
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
# An empty map will be returned if no config file is found.
$pyvenvConfig = @{ }
if ($pyvenvConfigPath) {
Write-Verbose "File exists, parse `key = value` lines"
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
$pyvenvConfigContent | ForEach-Object {
$keyval = $PSItem -split "\s*=\s*", 2
if ($keyval[0] -and $keyval[1]) {
$val = $keyval[1]
# Remove extraneous quotations around a string value.
if ("'""".Contains($val.Substring(0, 1))) {
$val = $val.Substring(1, $val.Length - 2)
}
$pyvenvConfig[$keyval[0]] = $val
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
}
}
}
return $pyvenvConfig
}
<# Begin Activate script --------------------------------------------------- #>
# Determine the containing directory of this script
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
$VenvExecDir = Get-Item -Path $VenvExecPath
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
# Set values required in priority: CmdLine, ConfigFile, Default
# First, get the location of the virtual environment, it might not be
# VenvExecDir if specified on the command line.
if ($VenvDir) {
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
}
else {
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
Write-Verbose "VenvDir=$VenvDir"
}
# Next, read the `pyvenv.cfg` file to determine any required value such
# as `prompt`.
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
# Next, set the prompt from the command line, or the config file, or
# just use the name of the virtual environment folder.
if ($Prompt) {
Write-Verbose "Prompt specified as argument, using '$Prompt'"
}
else {
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
$Prompt = $pyvenvCfg['prompt'];
}
else {
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virutal environment)"
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
$Prompt = Split-Path -Path $venvDir -Leaf
}
}
Write-Verbose "Prompt = '$Prompt'"
Write-Verbose "VenvDir='$VenvDir'"
# Deactivate any currently active virtual environment, but leave the
# deactivate function in place.
deactivate -nondestructive
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
# that there is an activated venv.
$env:VIRTUAL_ENV = $VenvDir
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
Write-Verbose "Setting prompt to '$Prompt'"
# Set the prompt to include the env name
# Make sure _OLD_VIRTUAL_PROMPT is global
function global:_OLD_VIRTUAL_PROMPT { "" }
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
function global:prompt {
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
_OLD_VIRTUAL_PROMPT
}
}
# Clear PYTHONHOME
if (Test-Path -Path Env:PYTHONHOME) {
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
Remove-Item -Path Env:PYTHONHOME
}
# Add the venv to the PATH
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
# SIG # Begin signature block
# MIIcvwYJKoZIhvcNAQcCoIIcsDCCHKwCAQExDzANBglghkgBZQMEAgEFADB5Bgor
# BgEEAYI3AgEEoGswaTA0BgorBgEEAYI3AgEeMCYCAwEAAAQQH8w7YFlLCE63JNLG
# KX7zUQIBAAIBAAIBAAIBAAIBADAxMA0GCWCGSAFlAwQCAQUABCAwnDYwEHaCQq0n
# 8NAvsN7H7BO7/48rXCNwrg891FS5vaCCC38wggUwMIIEGKADAgECAhAECRgbX9W7
# ZnVTQ7VvlVAIMA0GCSqGSIb3DQEBCwUAMGUxCzAJBgNVBAYTAlVTMRUwEwYDVQQK
# EwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xJDAiBgNV
# BAMTG0RpZ2lDZXJ0IEFzc3VyZWQgSUQgUm9vdCBDQTAeFw0xMzEwMjIxMjAwMDBa
# Fw0yODEwMjIxMjAwMDBaMHIxCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2Vy
# dCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xMTAvBgNVBAMTKERpZ2lD
# ZXJ0IFNIQTIgQXNzdXJlZCBJRCBDb2RlIFNpZ25pbmcgQ0EwggEiMA0GCSqGSIb3
# DQEBAQUAA4IBDwAwggEKAoIBAQD407Mcfw4Rr2d3B9MLMUkZz9D7RZmxOttE9X/l
# qJ3bMtdx6nadBS63j/qSQ8Cl+YnUNxnXtqrwnIal2CWsDnkoOn7p0WfTxvspJ8fT
# eyOU5JEjlpB3gvmhhCNmElQzUHSxKCa7JGnCwlLyFGeKiUXULaGj6YgsIJWuHEqH
# CN8M9eJNYBi+qsSyrnAxZjNxPqxwoqvOf+l8y5Kh5TsxHM/q8grkV7tKtel05iv+
# bMt+dDk2DZDv5LVOpKnqagqrhPOsZ061xPeM0SAlI+sIZD5SlsHyDxL0xY4PwaLo
# LFH3c7y9hbFig3NBggfkOItqcyDQD2RzPJ6fpjOp/RnfJZPRAgMBAAGjggHNMIIB
# yTASBgNVHRMBAf8ECDAGAQH/AgEAMA4GA1UdDwEB/wQEAwIBhjATBgNVHSUEDDAK
# BggrBgEFBQcDAzB5BggrBgEFBQcBAQRtMGswJAYIKwYBBQUHMAGGGGh0dHA6Ly9v
# Y3NwLmRpZ2ljZXJ0LmNvbTBDBggrBgEFBQcwAoY3aHR0cDovL2NhY2VydHMuZGln
# aWNlcnQuY29tL0RpZ2lDZXJ0QXNzdXJlZElEUm9vdENBLmNydDCBgQYDVR0fBHow
# eDA6oDigNoY0aHR0cDovL2NybDQuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0QXNzdXJl
# ZElEUm9vdENBLmNybDA6oDigNoY0aHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0Rp
# Z2lDZXJ0QXNzdXJlZElEUm9vdENBLmNybDBPBgNVHSAESDBGMDgGCmCGSAGG/WwA
# AgQwKjAoBggrBgEFBQcCARYcaHR0cHM6Ly93d3cuZGlnaWNlcnQuY29tL0NQUzAK
# BghghkgBhv1sAzAdBgNVHQ4EFgQUWsS5eyoKo6XqcQPAYPkt9mV1DlgwHwYDVR0j
# BBgwFoAUReuir/SSy4IxLVGLp6chnfNtyA8wDQYJKoZIhvcNAQELBQADggEBAD7s
# DVoks/Mi0RXILHwlKXaoHV0cLToaxO8wYdd+C2D9wz0PxK+L/e8q3yBVN7Dh9tGS
# dQ9RtG6ljlriXiSBThCk7j9xjmMOE0ut119EefM2FAaK95xGTlz/kLEbBw6RFfu6
# r7VRwo0kriTGxycqoSkoGjpxKAI8LpGjwCUR4pwUR6F6aGivm6dcIFzZcbEMj7uo
# +MUSaJ/PQMtARKUT8OZkDCUIQjKyNookAv4vcn4c10lFluhZHen6dGRrsutmQ9qz
# sIzV6Q3d9gEgzpkxYz0IGhizgZtPxpMQBvwHgfqL2vmCSfdibqFT+hKUGIUukpHq
# aGxEMrJmoecYpJpkUe8wggZHMIIFL6ADAgECAhADPtXtoGXRuMkd/PkqbJvYMA0G
# CSqGSIb3DQEBCwUAMHIxCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJ
# bmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xMTAvBgNVBAMTKERpZ2lDZXJ0
# IFNIQTIgQXNzdXJlZCBJRCBDb2RlIFNpZ25pbmcgQ0EwHhcNMTgxMjE4MDAwMDAw
# WhcNMjExMjIyMTIwMDAwWjCBgzELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDU5ldyBI
# YW1wc2hpcmUxEjAQBgNVBAcTCVdvbGZlYm9ybzEjMCEGA1UEChMaUHl0aG9uIFNv
# ZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMTGlB5dGhvbiBTb2Z0d2FyZSBGb3Vu
# ZGF0aW9uMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAqr2kS7J1uW7o
# JRxlsdrETAjKarfoH5TI8PWST6Yb2xPooP7vHT4iaVXyL5Lze1f53Jw67Sp+u524
# fJXf30qHViEWxumy2RWG0nciU2d+mMqzjlaAWSZNF0u4RcvyDJokEV0RUOqI5CG5
# zPI3W9uQ6LiUk3HCYW6kpH177A5T3pw/Po8O8KErJGn1anaqtIICq99ySxrMad/2
# hPMBRf6Ndah7f7HPn1gkSSTAoejyuqF5h+B0qI4+JK5+VLvz659VTbAWJsYakkxZ
# xVWYpFv4KeQSSwoo0DzMvmERsTzNvVBMWhu9OriJNg+QfFmf96zVTu93cZ+r7xMp
# bXyfIOGKhHMaRuZ8ihuWIx3gI9WHDFX6fBKR8+HlhdkaiBEWIsXRoy+EQUyK7zUs
# +FqOo2sRYttbs8MTF9YDKFZwyPjn9Wn+gLGd5NUEVyNvD9QVGBEtN7vx87bduJUB
# 8F4DylEsMtZTfjw/au6AmOnmneK5UcqSJuwRyZaGNk7y3qj06utx+HTTqHgi975U
# pxfyrwAqkovoZEWBVSpvku8PVhkBXcLmNe6MEHlFiaMoiADAeKmX5RFRkN+VrmYG
# Tg4zajxfdHeIY8TvLf48tTfmnQJd98geJQv/01NUy/FxuwqAuTkaez5Nl1LxP0Cp
# THhghzO4FRD4itT2wqTh4jpojw9QZnsCAwEAAaOCAcUwggHBMB8GA1UdIwQYMBaA
# FFrEuXsqCqOl6nEDwGD5LfZldQ5YMB0GA1UdDgQWBBT8Kr9+1L6s84KcpM97IgE7
# uI8H8jAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYIKwYBBQUHAwMwdwYDVR0f
# BHAwbjA1oDOgMYYvaHR0cDovL2NybDMuZGlnaWNlcnQuY29tL3NoYTItYXNzdXJl
# ZC1jcy1nMS5jcmwwNaAzoDGGL2h0dHA6Ly9jcmw0LmRpZ2ljZXJ0LmNvbS9zaGEy
# LWFzc3VyZWQtY3MtZzEuY3JsMEwGA1UdIARFMEMwNwYJYIZIAYb9bAMBMCowKAYI
# KwYBBQUHAgEWHGh0dHBzOi8vd3d3LmRpZ2ljZXJ0LmNvbS9DUFMwCAYGZ4EMAQQB
# MIGEBggrBgEFBQcBAQR4MHYwJAYIKwYBBQUHMAGGGGh0dHA6Ly9vY3NwLmRpZ2lj
# ZXJ0LmNvbTBOBggrBgEFBQcwAoZCaHR0cDovL2NhY2VydHMuZGlnaWNlcnQuY29t
# L0RpZ2lDZXJ0U0hBMkFzc3VyZWRJRENvZGVTaWduaW5nQ0EuY3J0MAwGA1UdEwEB
# /wQCMAAwDQYJKoZIhvcNAQELBQADggEBAEt1oS21X0axiafPjyY+vlYqjWKuUu/Y
# FuYWIEq6iRRaFabNDhj9RBFQF/aJiE5msrQEOfAD6/6gVSH91lZWBqg6NEeG9T9S
# XbiAPvJ9CEWFsdkXUrjbWhvCnuZ7kqUuU5BAumI1QRbpYgZL3UA+iZXkmjbGh1ln
# 8rUhWIxbBYL4Sg2nqpB44p7CUFYkPj/MbwU2gvBV2pXjj5WaskoZtsACMv5g42BN
# oVLoRAi+ev6s07POt+JtHRIm87lTyuc8wh0swTPUwksKbLU1Zdj9CpqtzXnuVE0w
# 50exJvRSK3Vt4g+0vigpI3qPmDdpkf9+4Mvy0XMNcqrthw20R+PkIlMxghCWMIIQ
# kgIBATCBhjByMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkw
# FwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMTEwLwYDVQQDEyhEaWdpQ2VydCBTSEEy
# IEFzc3VyZWQgSUQgQ29kZSBTaWduaW5nIENBAhADPtXtoGXRuMkd/PkqbJvYMA0G
# CWCGSAFlAwQCAQUAoIGYMBkGCSqGSIb3DQEJAzEMBgorBgEEAYI3AgEEMBwGCisG
# AQQBgjcCAQsxDjAMBgorBgEEAYI3AgEVMCwGCisGAQQBgjcCAQwxHjAcoBqAGABQ
# AHkAdABoAG8AbgAgADMALgA5AC4ANDAvBgkqhkiG9w0BCQQxIgQgBrni4mcRv7sM
# JHsxpROjRopOz2wuQVrJnn+lD7X7y+gwDQYJKoZIhvcNAQEBBQAEggIAgnraC5Ax
# LdvDJz/AUld/6WGZ21jxAG4ijZvDnAS7Hopm0vclO2+7jtddNTP0w1tbebW2o987
# AjD16hqG+D96N/sB3vfZ86fVjARf3XuyCWBYuIkLnjir+MfaXNU1n+kJuT7DNpo6
# H+BIUM8PYqLGo4SwHXC2H2d+VfMLNyZ+91LmqT9qAAC6aT+VuTvlC+BUF/J4N81f
# 3TCa0F7C9KT1cdAmKtt6EMIdAYqWp8r1merIFjD/olBTq9nLcyjTqE9lCb4Nf6J9
# jyM8/FA8hD41nHZTCKRSPCFKNZRqVYOaiWBHxQxPtYKuLJzMgxK0QHQhjWNpXTLs
# C1G1hQxX0MOWzLmcgtvxh5AhlQS+oHUs4/ebzmaovVzjbQRPqZHLDzYOQeG+79JM
# qi5gQt4L7TksfvmQ/dI4nJtzVDYAjN1v9rJY1snSqBlnSWgOyyZJX7aYBgVM3uJV
# u6j5tKXnPW7/u6USlVjtD4yKxKKoctomYiSIjjJA7DVL9CoCSF2ZyqxtuXDR8VD7
# fb8gS2XklEJ3wi8MbUg9LJtI5Q3e/Qursr9RpEL5uTjhW9xTV+ubc6SWMWMEj3RT
# +7PUi23Vdh2917qGR+jyrUap+GMCXrUyUsLkMR5UkiltErrubmRnSPTbkFJTfEcf
# aniVMNn3x63CGwXdmSgVJleq1n28KcM/A02hgg1FMIINQQYKKwYBBAGCNwMDATGC
# DTEwgg0tBgkqhkiG9w0BBwKggg0eMIINGgIBAzEPMA0GCWCGSAFlAwQCAQUAMHgG
# CyqGSIb3DQEJEAEEoGkEZzBlAgEBBglghkgBhv1sBwEwMTANBglghkgBZQMEAgEF
# AAQgnJMkz3GdNmSHANJI2WUD6lOcmRKl+QqVqKICyZcEo3wCEQCoQmR+Bv/7IKIC
# +H/HED8fGA8yMDIxMDQwNjE0MDc1MFqgggo3MIIE/jCCA+agAwIBAgIQDUJK4L46
# iP9gQCHOFADw3TANBgkqhkiG9w0BAQsFADByMQswCQYDVQQGEwJVUzEVMBMGA1UE
# ChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMTEwLwYD
# VQQDEyhEaWdpQ2VydCBTSEEyIEFzc3VyZWQgSUQgVGltZXN0YW1waW5nIENBMB4X
# DTIxMDEwMTAwMDAwMFoXDTMxMDEwNjAwMDAwMFowSDELMAkGA1UEBhMCVVMxFzAV
# BgNVBAoTDkRpZ2lDZXJ0LCBJbmMuMSAwHgYDVQQDExdEaWdpQ2VydCBUaW1lc3Rh
# bXAgMjAyMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMLmYYRnxYr1
# DQikRcpja1HXOhFCvQp1dU2UtAxQtSYQ/h3Ib5FrDJbnGlxI70Tlv5thzRWRYlq4
# /2cLnGP9NmqB+in43Stwhd4CGPN4bbx9+cdtCT2+anaH6Yq9+IRdHnbJ5MZ2djpT
# 0dHTWjaPxqPhLxs6t2HWc+xObTOKfF1FLUuxUOZBOjdWhtyTI433UCXoZObd048v
# V7WHIOsOjizVI9r0TXhG4wODMSlKXAwxikqMiMX3MFr5FK8VX2xDSQn9JiNT9o1j
# 6BqrW7EdMMKbaYK02/xWVLwfoYervnpbCiAvSwnJlaeNsvrWY4tOpXIc7p96AXP4
# Gdb+DUmEvQECAwEAAaOCAbgwggG0MA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMBAf8E
# AjAAMBYGA1UdJQEB/wQMMAoGCCsGAQUFBwMIMEEGA1UdIAQ6MDgwNgYJYIZIAYb9
# bAcBMCkwJwYIKwYBBQUHAgEWG2h0dHA6Ly93d3cuZGlnaWNlcnQuY29tL0NQUzAf
# BgNVHSMEGDAWgBT0tuEgHf4prtLkYaWyoiWyyBc1bjAdBgNVHQ4EFgQUNkSGjqS6
# sGa+vCgtHUQ23eNqerwwcQYDVR0fBGowaDAyoDCgLoYsaHR0cDovL2NybDMuZGln
# aWNlcnQuY29tL3NoYTItYXNzdXJlZC10cy5jcmwwMqAwoC6GLGh0dHA6Ly9jcmw0
# LmRpZ2ljZXJ0LmNvbS9zaGEyLWFzc3VyZWQtdHMuY3JsMIGFBggrBgEFBQcBAQR5
# MHcwJAYIKwYBBQUHMAGGGGh0dHA6Ly9vY3NwLmRpZ2ljZXJ0LmNvbTBPBggrBgEF
# BQcwAoZDaHR0cDovL2NhY2VydHMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0U0hBMkFz
# c3VyZWRJRFRpbWVzdGFtcGluZ0NBLmNydDANBgkqhkiG9w0BAQsFAAOCAQEASBzc
# temaI7znGucgDo5nRv1CclF0CiNHo6uS0iXEcFm+FKDlJ4GlTRQVGQd58NEEw4bZ
# O73+RAJmTe1ppA/2uHDPYuj1UUp4eTZ6J7fz51Kfk6ftQ55757TdQSKJ+4eiRgNO
# /PT+t2R3Y18jUmmDgvoaU+2QzI2hF3MN9PNlOXBL85zWenvaDLw9MtAby/Vh/HUI
# AHa8gQ74wOFcz8QRcucbZEnYIpp1FUL1LTI4gdr0YKK6tFL7XOBhJCVPst/JKahz
# Q1HavWPWH1ub9y4bTxMd90oNcX6Xt/Q/hOvB46NJofrOp79Wz7pZdmGJX36ntI5n
# ePk2mOHLKNpbh6aKLzCCBTEwggQZoAMCAQICEAqhJdbWMht+QeQF2jaXwhUwDQYJ
# KoZIhvcNAQELBQAwZTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IElu
# YzEZMBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTEkMCIGA1UEAxMbRGlnaUNlcnQg
# QXNzdXJlZCBJRCBSb290IENBMB4XDTE2MDEwNzEyMDAwMFoXDTMxMDEwNzEyMDAw
# MFowcjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UE
# CxMQd3d3LmRpZ2ljZXJ0LmNvbTExMC8GA1UEAxMoRGlnaUNlcnQgU0hBMiBBc3N1
# cmVkIElEIFRpbWVzdGFtcGluZyBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
# AQoCggEBAL3QMu5LzY9/3am6gpnFOVQoV7YjSsQOB0UzURB90Pl9TWh+57ag9I2z
# iOSXv2MhkJi/E7xX08PhfgjWahQAOPcuHjvuzKb2Mln+X2U/4Jvr40ZHBhpVfgsn
# fsCi9aDg3iI/Dv9+lfvzo7oiPhisEeTwmQNtO4V8CdPuXciaC1TjqAlxa+DPIhAP
# dc9xck4Krd9AOly3UeGheRTGTSQjMF287DxgaqwvB8z98OpH2YhQXv1mblZhJymJ
# hFHmgudGUP2UKiyn5HU+upgPhH+fMRTWrdXyZMt7HgXQhBlyF/EXBu89zdZN7wZC
# /aJTKk+FHcQdPK/P2qwQ9d2srOlW/5MCAwEAAaOCAc4wggHKMB0GA1UdDgQWBBT0
# tuEgHf4prtLkYaWyoiWyyBc1bjAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd
# 823IDzASBgNVHRMBAf8ECDAGAQH/AgEAMA4GA1UdDwEB/wQEAwIBhjATBgNVHSUE
# DDAKBggrBgEFBQcDCDB5BggrBgEFBQcBAQRtMGswJAYIKwYBBQUHMAGGGGh0dHA6
# Ly9vY3NwLmRpZ2ljZXJ0LmNvbTBDBggrBgEFBQcwAoY3aHR0cDovL2NhY2VydHMu
# ZGlnaWNlcnQuY29tL0RpZ2lDZXJ0QXNzdXJlZElEUm9vdENBLmNydDCBgQYDVR0f
# BHoweDA6oDigNoY0aHR0cDovL2NybDQuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0QXNz
# dXJlZElEUm9vdENBLmNybDA6oDigNoY0aHR0cDovL2NybDMuZGlnaWNlcnQuY29t
# L0RpZ2lDZXJ0QXNzdXJlZElEUm9vdENBLmNybDBQBgNVHSAESTBHMDgGCmCGSAGG
# /WwAAgQwKjAoBggrBgEFBQcCARYcaHR0cHM6Ly93d3cuZGlnaWNlcnQuY29tL0NQ
# UzALBglghkgBhv1sBwEwDQYJKoZIhvcNAQELBQADggEBAHGVEulRh1Zpze/d2nyq
# Y3qzeM8GN0CE70uEv8rPAwL9xafDDiBCLK938ysfDCFaKrcFNB1qrpn4J6Jmvwmq
# YN92pDqTD/iy0dh8GWLoXoIlHsS6HHssIeLWWywUNUMEaLLbdQLgcseY1jxk5R9I
# EBhfiThhTWJGJIdjjJFSLK8pieV4H9YLFKWA1xJHcLN11ZOFk362kmf7U2GJqPVr
# lsD0WGkNfMgBsbkodbeZY4UijGHKeZR+WfyMD+NvtQEmtmyl7odRIeRYYJu6DC0r
# baLEfrvEJStHAgh8Sa4TtuF8QkIoxhhWz0E0tmZdtnR79VYzIi8iNrJLokqV2PWm
# jlIxggJNMIICSQIBATCBhjByMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNl
# cnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMTEwLwYDVQQDEyhEaWdp
# Q2VydCBTSEEyIEFzc3VyZWQgSUQgVGltZXN0YW1waW5nIENBAhANQkrgvjqI/2BA
# Ic4UAPDdMA0GCWCGSAFlAwQCAQUAoIGYMBoGCSqGSIb3DQEJAzENBgsqhkiG9w0B
# CRABBDAcBgkqhkiG9w0BCQUxDxcNMjEwNDA2MTQwNzUwWjArBgsqhkiG9w0BCRAC
# DDEcMBowGDAWBBTh14Ko4ZG+72vKFpG1qrSUpiSb8zAvBgkqhkiG9w0BCQQxIgQg
# 5mFO2l6qrJzEhKgscyI4e20+BlIPLZai0pXpS+XFVIowDQYJKoZIhvcNAQEBBQAE
# ggEApEkQXZn24/PS2O3rXicGnIfxtSxqOLcJFE8C4TcyBsvtgHfiDXPbbctdnpbb
# KZhX60fHqjr98I17Lqg7GHop2SOZHrR3NOEJcbHxHsI74qrCg6b70MHXh2Q1OLzQ
# hCc4JQUv7O/63bzVyJ9H4W1MgHOdmAlNSc3fWGtj4K4jhcM3uHnVl1gF4bJOWhMs
# W5IxHeBmpO4/Xv0upkbQXtmPooNgxwYRTosEyU6tkuDWRvQlddhNndOgX53r6Qsz
# CWdCDv2CiUaUyKOJW8vhO+DKqyK9Cobq537UKIl047zb5yFXfzQ4u/YGeMukkoBt
# 10uT/66Q5dEY8U/Y04CnnzJ83w==
# SIG # End signature block
| Django-locallibrary/env/Scripts/Activate.ps1/0 | {
"file_path": "Django-locallibrary/env/Scripts/Activate.ps1",
"repo_id": "Django-locallibrary",
"token_count": 10722
} | 31 |
//+------------------------------------------------------------------+
//| MultipleMatRegression.mqh |
//| Copyright 2022, Omega Joctan. |
//| https://www.mql5.com/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Omega Joctan."
#property link "https://www.mql5.com/users/omegajoctan"
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
#define DBL_MAX_MIN(val) if (val>DBL_MAX) Alert("Function ",__FUNCTION__,"\n Maximum Double value Allowed reached"); if (val<DBL_MIN && val>0) Alert("Function ",__FUNCTION__,"\n MInimum Double value Allowed reached")
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CMatrixRegression
{
protected:
double TestXDataSet[];
double TestYDataSet[];
double m_train_split;
double m_allyCopy[]; //original values of y copied
double m_allxCopy[]; //original values of x copied
int m_handle;
string m_filename;
string DataColumnNames[]; //store the column names from csv file
int rows_total;
int x_columns_chosen; //Number of x columns chosen
bool m_debug;
double m_yvalues[]; //y values or dependent values matrix
double m_allxvalues[]; //All x values design matrix
double xT[]; //store the transposed values
string m_XColsArray[]; //store the x columns chosen on the Init
string m_delimiter;
double Betas[]; //Array for storing the coefficients
protected:
bool fileopen();
void GetColumnDatatoArray(int from_column_number, double &toArr[]);
void GetColumnDatatoArray(int from_column_number, string &toArr[]);
void GetAllDataToArray(double& array[]);
int MatrixtypeSquare(int sizearr);
void MatrixInverse(double &Matrix[],double &output_mat[]);
void TrainTestSplit(double &Arr[], double& outTrain[], double& outTest[], int each_rowsize, int colsinArray=1);
public:
CMatrixRegression(void);
~CMatrixRegression(void);
void LrInit(int y_column, string x_columns="", string filename = NULL, string delimiter = ",",double train_size_split = 0.7, bool debugmode=true);
void MatrixDetectType(double &Matrix[],int rows,int &__r__,int &__c__);
void MatrixMultiply(double &A[],double &B[],double &output_arr[],int row1,int col1,int row2,int col2);
void MatrixUnTranspose(double &Matrix[],int torows, int tocolumns);
void Gauss_JordanInverse(double &Matrix[],double &output_Mat[],int mat_order);
void MatrixPrint(double &Matrix[], int rows, int cols,int digits=0);
void LinearRegMain();
double corrcoef(double &x[],double &y[]);
void corrcoeff();
double mean(double &data[]);
double r_squared(double &y[],double &y_predicted[]);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CMatrixRegression::CMatrixRegression(void)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CMatrixRegression::~CMatrixRegression(void)
{
ArrayFree(TestXDataSet);
ArrayFree(TestYDataSet);
ArrayFree(DataColumnNames);
ArrayFree(m_yvalues);
ArrayFree(m_allxvalues);
ArrayFree(m_XColsArray);
ArrayFree(Betas);
ArrayFree(xT);
ArrayFree(m_allxCopy);
ArrayFree(m_allyCopy);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CMatrixRegression::LrInit(int y_column,string x_columns="",string filename=NULL,string delimiter=",",double train_size_split = 0.7, bool debugmode=true)
{
//--- pass some inputs to the global inputs since they are reusable
m_filename = filename;
m_debug = debugmode;
m_delimiter = delimiter;
m_train_split = train_size_split;
//---
ushort separator = StringGetCharacter(m_delimiter,0);
StringSplit(x_columns,separator,m_XColsArray);
x_columns_chosen = ArraySize(m_XColsArray);
ArrayResize(DataColumnNames,x_columns_chosen);
//---
if (m_debug)
{
Print("Init, number of X columns chosen =",x_columns_chosen);
ArrayPrint(m_XColsArray);
}
//--- collect the data and store into corresponding arrays
GetColumnDatatoArray(y_column,m_yvalues);
GetAllDataToArray(m_allxvalues);
// check for variance in the data set by dividing the rows total size by the number of x columns selected, there shouldn't be a reminder
if (rows_total % x_columns_chosen != 0)
Alert("There are variance(s) in your dataset columns sizes, This may Lead to Incorrect calculations");
else
{
int single_rowsize = rows_total/x_columns_chosen;
ArrayCopy(m_allxCopy,m_allxvalues); //copy these values before they are split
ArrayCopy(m_allyCopy,m_yvalues);
//--- Before all of that Let's split data into training and testing
if (m_train_split >= 1) //if there is no room for testing, do not split the data the model
{
if (!m_debug)
Alert("TrainTest split has to be less than one to leave the room for testing");
else
Print("trainTest split is greater than or equal to one,\n this model will not be tested");
}
else //if there is room for testing then split the data into training and testing dataset
{
TrainTestSplit(m_allxvalues,m_allxvalues,TestXDataSet,single_rowsize,x_columns_chosen);
TrainTestSplit(m_yvalues,m_yvalues,TestYDataSet,single_rowsize);
}
//--- modify the single rowsize to fit the trainsize number of rows
single_rowsize = (int)MathCeil(single_rowsize*train_size_split);
/*
Print("TrainDataSet x");
int train_rows = int(rows_total*train_size_split), test_rows = rows_total-train_rows;
MatrixPrint(m_allxvalues,train_rows,x_columns_chosen);
Print("TrainDataSet y");
MatrixPrint(m_allxvalues,train_rows,1);
Print("TestDataSet x");
MatrixPrint(TestXDataSet,test_rows,x_columns_chosen);
Print("TestDataSet y");
MatrixPrint(TestYDataSet,train_rows,1);
*/
// --- end of data split now le'ts begin the matrix calculations
//--- Refill the first row of a design matrix with the values of 1
double Temp_x[]; //Temporary x array
ArrayResize(Temp_x,single_rowsize);
ArrayFill(Temp_x,0,single_rowsize,1);
ArrayCopy(Temp_x,m_allxvalues,single_rowsize,0,WHOLE_ARRAY); //after filling the values of one fill the remaining space with values of x
//Print("Temp x arr size =",ArraySize(Temp_x));
ArrayCopy(m_allxvalues,Temp_x);
ArrayFree(Temp_x); //we no longer need this array
int tr_cols = x_columns_chosen+1,
tr_rows = single_rowsize;
ArrayCopy(xT,m_allxvalues); //store the transposed values to their global array before we untranspose them
MatrixUnTranspose(m_allxvalues,tr_cols,tr_rows); //we add one to leave the space for the values of one
//---
ArrayResize(Betas,tr_cols); //let's also not forget to resize our coefficients matrix
//---
if (m_debug)
{
//Print("Design matrix");
//MatrixPrint(m_allxvalues,tr_cols,tr_rows);
//Print("Transposed Design Matrix");
//MatrixPrint(m_allxvalues,tr_rows,tr_cols);
}
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CMatrixRegression::LinearRegMain(void)
{
double xTx[]; //x transpose matrix times x
int data_rowsize = rows_total/x_columns_chosen; //single rowsize for each dataset
int tr_rows = (int) MathCeil(m_train_split * data_rowsize),
tr_cols = x_columns_chosen+1; //we add one to leave the space for the values of one in our design matrix
MatrixMultiply(xT,m_allxvalues,xTx,tr_cols,tr_rows,tr_rows,tr_cols);
if (m_debug)
{
Print("xTx");
MatrixPrint(xTx,tr_cols,tr_cols,2);
}
//---
double inverse_xTx[];
if (x_columns_chosen > 1)
Gauss_JordanInverse(xTx,inverse_xTx,tr_cols);
else
MatrixInverse(xTx,inverse_xTx);
//---
if (m_debug)
{
Print("xtx Inverse");
MatrixPrint(inverse_xTx,tr_cols,tr_cols,7);
}
//---
double xTy[];
MatrixMultiply(xT,m_yvalues,xTy,tr_cols,tr_rows,tr_rows,1); //remember!! the value of 1 at the end is because we have only one dependent variable y
if (m_debug)
{
Print("xTy");
MatrixPrint(xTy,tr_cols,1,5);
}
//---
MatrixMultiply(inverse_xTx,xTy,Betas,tr_cols,tr_cols,tr_cols,1);
if (m_debug)
{
Print("Coefficients Matrix");
MatrixPrint(Betas,tr_cols,1,5);
}
//---
Print("==== TRAINED LINEAR REGRESSION MODEL COEFFICIENTS ====");
MatrixPrint(Betas,tr_cols,1,5);
//---
/////////////////////////////////////////////////////////////////////////
///////// END OF TRAINING OF OUR MODEL /////////////////////////////////
//////////// NOW IT'S TIME TO TEST /////////////////////////////////////
/// since this model was primarly created on the data that was filtered
// 70% of it only was being used in creating the model and training it at the
//// same time now it's time to use the rest of the data /////////////////
////////////////////////////////////////////////////////////////////////////
//--- Testing the model
int index = 0 , index_from=0;
int each_row = ArraySize(TestYDataSet);
double TestPredicted[];
ArrayResize(TestPredicted,each_row);
//---
if (m_train_split < 1) // if there is room for testing, test the model
{
Print("========= LINEAR REGRESSION MODEL TESTING STARTED =========");
for ( int i =0; i < each_row; i++ ) // let's predict the values first using the model coefficient
{
//Print("Test Array index ",index);
double sum=0; int start=0;
for ( int j=0, beta_index=1; j < x_columns_chosen; j++ , index++, beta_index++)
{
index_from = start + i;
sum += TestXDataSet[index_from] * Betas[beta_index];
//Print(" from ", index_from,/*" data = ",TestXDataSet[index_from],*/" betas index =",beta_index," arr size =",ArraySize(TestXDataSet));
start += each_row;
}
TestPredicted[i] = Betas[0] + sum; //plus the summation output with the constant
}
//--- Checking the Accuracy of testing dataset
Print("Tested Linear Model R square is = ",r_squared(TestYDataSet,TestPredicted));
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CMatrixRegression::fileopen(void)
{
m_handle = FileOpen(m_filename,FILE_READ|FILE_CSV|FILE_ANSI,m_delimiter);
if (m_handle == INVALID_HANDLE)
{
return(false);
Print(__FUNCTION__," Invalid csv handle err=",GetLastError());
}
return (true);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CMatrixRegression::GetAllDataToArray(double &toArr[])
{
int counter=0;
for (int i=0; i<ArraySize(m_XColsArray); i++)
{
if (fileopen())
{
int column = 0, rows=0;
while (!FileIsEnding(m_handle))
{
string data = FileReadString(m_handle);
column++;
//---
if (column==(int)m_XColsArray[i])
{
if (rows>=1) //Avoid the first column which contains the column's header
{
counter++;
ArrayResize(toArr,counter); //array size for all the columns
toArr[counter-1]=(double)data;
}
else
DataColumnNames[i]=data;
}
//---
if (FileIsLineEnding(m_handle))
{
rows++;
column=0;
}
}
rows_total += rows-1; //since we are avoiding the first row we have to also remove it's number on the list here
//adding a plus equals sign to rows total ensures that we get the total number of rows for the entire dataset
}
FileClose(m_handle);
}
if (m_debug)
Print("All data Array Size ",ArraySize(toArr)," consuming ", sizeof(toArr)," bytes of memory");
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CMatrixRegression::GetColumnDatatoArray(int from_column_number, double &toArr[])
{
int counter=0;
int column = 0, rows=0;
fileopen();
while (!FileIsEnding(m_handle))
{
string data = FileReadString(m_handle);
column++;
//---
if (column==from_column_number)
{
if (rows>=1) //Avoid the first column which contains the column's header
{
counter++;
ArrayResize(toArr,counter);
toArr[counter-1]=(double)data;
}
}
//---
if (FileIsLineEnding(m_handle))
{
rows++;
column=0;
}
}
FileClose(m_handle);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CMatrixRegression::GetColumnDatatoArray(int from_column_number, string &toArr[])
{
int counter=0;
int column = 0, rows=0;
fileopen();
while (!FileIsEnding(m_handle))
{
string data = FileReadString(m_handle);
column++;
//---
if (column==from_column_number)
{
if (rows>=1) //Avoid the first column which contains the column's header
{
counter++;
ArrayResize(toArr,counter);
toArr[counter-1]=data;
}
}
//---
if (FileIsLineEnding(m_handle))
{
rows++;
column=0;
}
}
FileClose(m_handle);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CMatrixRegression::MatrixUnTranspose(double &Matrix[],int torows, int tocolumns)
{
int rows, columns;
double Temp_Mat[]; //temporary array
rows = torows;
columns = tocolumns;
//--- UnTransposing Array Starting
ArrayResize(Temp_Mat,ArraySize(Matrix));
int index=0; int start_incr = 0;
for (int C=0; C<columns; C++)
{
start_incr= C; //the columns are the ones resposible for shaping the new array
for (int R=0; R<rows; R++, index++)
{
//if (m_debug)
//Print("Old Array Access key = ",index," New Array Access Key = ",start_incr);
Temp_Mat[index] = Matrix[start_incr];
start_incr += columns;
}
}
ArrayCopy(Matrix,Temp_Mat);
ArrayFree(Temp_Mat);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CMatrixRegression::MatrixMultiply(double &A[],double &B[],double &output_arr[],int row1,int col1,int row2,int col2)
{
//---
double MultPl_Mat[]; //where the multiplications will be stored
if (col1 != row2)
Alert("Matrix Multiplication Error, \n The number of columns in the first matrix is not equal to the number of rows in second matrix");
else
{
ArrayResize(MultPl_Mat,row1*col2);
int mat1_index, mat2_index;
if (col1==1) //Multiplication for 1D Array
{
for (int i=0; i<row1; i++)
for(int k=0; k<row1; k++)
{
int index = k + (i*row1);
MultPl_Mat[index] = A[i] * B[k];
}
//Print("Matrix Multiplication output");
//ArrayPrint(MultPl_Mat);
}
else
{
//if the matrix has more than 2 dimensionals
for (int i=0; i<row1; i++)
for (int j=0; j<col2; j++)
{
int index = j + (i*col2);
MultPl_Mat[index] = 0;
for (int k=0; k<col1; k++)
{
mat1_index = k + (i*row2); //k + (i*row2)
mat2_index = j + (k*col2); //j + (k*col2)
//Print("index out ",index," index a ",mat1_index," index b ",mat2_index);
MultPl_Mat[index] += A[mat1_index] * B[mat2_index];
DBL_MAX_MIN(MultPl_Mat[index]);
}
//Print(index," ",MultPl_Mat[index]);
}
//Print("Matrix Multiplication output");
//ArrayPrint(MultPl_Mat);
ArrayCopy(output_arr,MultPl_Mat);
ArrayFree(MultPl_Mat);
}
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CMatrixRegression::MatrixPrint(double &Matrix[],int rows,int cols,int digits=0)
{
Print("[ ");
int start = 0;
//if (rows>=cols)
for (int i=0; i<cols; i++)
{
ArrayPrint(Matrix,digits,NULL,start,rows);
start += rows;
}
printf("] \ncolumns = %d rows = %d",rows,cols);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CMatrixRegression::MatrixDetectType(double &Matrix[],int rows,int &__r__,int &__c__)
{
int size = ArraySize(Matrix);
__c__ = size/rows;
__r__ = size/__c__;
//if (m_debug)
// printf("Matrix Type \n %dx%d Before Transpose/Original \n %dx%d After Transposed/Array Format",__r__,__c__,__c__,__r__);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CMatrixRegression::Gauss_JordanInverse(double &Matrix[],double &output_Mat[],int mat_order)
{
int rowsCols = mat_order;
//---
if (mat_order <= 2)
Alert("To find the Inverse of a matrix Using this method, it order has to be greater that 2 ie more than 2x2 matrix");
else
{
int size = (int)MathPow(mat_order,2); //since the array has to be a square
// Create a multiplicative identity matrix
int start = 0;
double Identity_Mat[];
ArrayResize(Identity_Mat,size);
for (int i=0; i<size; i++)
{
if (i==start)
{
Identity_Mat[i] = 1;
start += rowsCols+1;
}
else
Identity_Mat[i] = 0;
}
//Print("Multiplicative Indentity Matrix");
//ArrayPrint(Identity_Mat);
//---
double MatnIdent[]; //original matrix sided with identity matrix
start = 0;
for (int i=0; i<rowsCols; i++) //operation to append Identical matrix to an original one
{
ArrayCopy(MatnIdent,Matrix,ArraySize(MatnIdent),start,rowsCols); //add the identity matrix to the end
ArrayCopy(MatnIdent,Identity_Mat,ArraySize(MatnIdent),start,rowsCols);
start += rowsCols;
}
//---
int diagonal_index = 0, index =0; start = 0;
double ratio = 0;
for (int i=0; i<rowsCols; i++)
{
if (MatnIdent[diagonal_index] == 0)
Print("Mathematical Error, Diagonal has zero value");
for (int j=0; j<rowsCols; j++)
if (i != j) //if we are not on the diagonal
{
/* i stands for rows while j for columns, In finding the ratio we keep the rows constant while
incrementing the columns that are not on the diagonal on the above if statement this helps us to
Access array value based on both rows and columns */
int i__i = i + (i*rowsCols*2);
diagonal_index = i__i;
int mat_ind = (i)+(j*rowsCols*2); //row number + (column number) AKA i__j
ratio = MatnIdent[mat_ind] / MatnIdent[diagonal_index];
DBL_MAX_MIN(MatnIdent[mat_ind]); DBL_MAX_MIN(MatnIdent[diagonal_index]);
//printf("Numerator = %.4f denominator =%.4f ratio =%.4f ",MatnIdent[mat_ind],MatnIdent[diagonal_index],ratio);
for (int k=0; k<rowsCols*2; k++)
{
int j_k, i_k; //first element for column second for row
j_k = k + (j*(rowsCols*2));
i_k = k + (i*(rowsCols*2));
//Print("val =",MatnIdent[j_k]," val = ",MatnIdent[i_k]);
//printf("\n jk val =%.4f, ratio = %.4f , ik val =%.4f ",MatnIdent[j_k], ratio, MatnIdent[i_k]);
MatnIdent[j_k] = MatnIdent[j_k] - ratio*MatnIdent[i_k];
DBL_MAX_MIN(MatnIdent[j_k]); DBL_MAX_MIN(ratio*MatnIdent[i_k]);
}
}
}
// Row Operation to make Principal diagonal to 1
/*back to our MatrixandIdentical Matrix Array then we'll perform
operations to make its principal diagonal to 1 */
ArrayResize(output_Mat,size);
int counter=0;
for (int i=0; i<rowsCols; i++)
for (int j=rowsCols; j<2*rowsCols; j++)
{
int i_j, i_i;
i_j = j + (i*(rowsCols*2));
i_i = i + (i*(rowsCols*2));
//Print("i_j ",i_j," val = ",MatnIdent[i_j]," i_i =",i_i," val =",MatnIdent[i_i]);
MatnIdent[i_j] = MatnIdent[i_j] / MatnIdent[i_i];
//printf("%d Mathematical operation =%.4f",i_j, MatnIdent[i_j]);
output_Mat[counter]= MatnIdent[i_j]; //store the Inverse of Matrix in the output Array
counter++;
}
}
//---
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CMatrixRegression::MatrixInverse(double &Matrix[],double &output_mat[])
{
// According to Matrix Rules the Inverse of a matrix can only be found when the
// Matrix is Identical Starting from a 2x2 matrix so this is our starting point
int matrix_size = ArraySize(Matrix);
if (matrix_size > 4)
Print("Matrix allowed using this method is a 2x2 matrix Only");
if (matrix_size==4)
{
MatrixtypeSquare(matrix_size);
//first step is we swap the first and the last value of the matrix
//so far we know that the last value is equal to arraysize minus one
int last_mat = matrix_size-1;
ArrayCopy(output_mat,Matrix);
// first diagonal
output_mat[0] = Matrix[last_mat]; //swap first array with last one
output_mat[last_mat] = Matrix[0]; //swap the last array with the first one
double first_diagonal = output_mat[0]*output_mat[last_mat];
// second diagonal //adiing negative signs >>>
output_mat[1] = - Matrix[1];
output_mat[2] = - Matrix[2];
double second_diagonal = output_mat[1]*output_mat[2];
if (m_debug)
{
Print("Diagonal already Swapped Matrix");
MatrixPrint(output_mat,2,2);
}
//formula for inverse is 1/det(xTx) * (xtx)-1
//determinant equals the product of the first diagonal minus the product of the second diagonal
double det = first_diagonal-second_diagonal;
if (m_debug)
Print("determinant =",det);
for (int i=0; i<matrix_size; i++)
{ output_mat[i] = output_mat[i]*(1/det); DBL_MAX_MIN(output_mat[i]); }
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CMatrixRegression::MatrixtypeSquare(int sizearr)
{
//function for checking if the matrix is a square matrix or not
int squarematrices[9] = {4,9,16,25,36,49,64,81,100}; //the squares of 2...10
//int divident=0;
int type=0;
for (int i=0; i<9; i++)
{
if (sizearr % squarematrices[i] == 0)
{
//divident = sizearr/squarematrices[i];
type = (int)sqrt(sizearr);
printf("This is a %dx%d Matrix",type,type);
break;
}
if (i==9 && sizearr % squarematrices[i] !=0 ) //if after 10 iterations the matrix size couldn't be found on the list then its not a square one
Print("This is not a Square Matrix");
}
return (type);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CMatrixRegression::TrainTestSplit(double &Arr[], double& outTrain[], double& outTest[], int each_rowsize, int colsinArray=1)
{
double TestArr[], TrainArr[];
int selected_x_cols = colsinArray;
int start = 0, index=0;
int train_index = 0;
int total_size = ArraySize(Arr); //size of the original array
int train_size = (int)MathCeil(each_rowsize*m_train_split);
int test_size = (int)MathFloor(each_rowsize*(1-m_train_split));
//---
int start_train =0, start_test=0;
for (int i=0; i<selected_x_cols; i++)
{
start_test += train_size;
//printf("start train %d, start test %d",start_train,start_test);
ArrayCopy(TrainArr, Arr , ArraySize(TrainArr) , start_train, train_size);
ArrayCopy(TestArr, Arr , ArraySize(TestArr) , start_test, test_size);
start_train += each_rowsize;
}
//--- Get the output
ArrayFree(outTrain); //free preexisting data
ArrayFree(outTest);
ArrayCopy(outTrain,TrainArr);
ArrayCopy(outTest,TestArr);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CMatrixRegression::mean(double &data[])
{
double x_y__bar=0;
for (int i=0; i<ArraySize(data); i++)
{
x_y__bar += data[i]; // all values summation
}
x_y__bar = x_y__bar/ArraySize(data); //total value after summation divided by total number of elements
return(x_y__bar);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CMatrixRegression::corrcoef(double &x[],double &y[])
{
double r=0;
double mean_x = mean(x);
double mean_y = mean(y);
double numerator =0, denominator =0;
double x__x =0, y__y=0;
for(int i=0; i<ArraySize(x); i++)
{
numerator += (x[i]-mean_x)*(y[i]-mean_y);
x__x += MathPow((x[i]-mean_x),2); //summation of x values minus it's mean squared
y__y += MathPow((y[i]-mean_y),2); //summation of y values minus it's mean squared
}
denominator = MathSqrt(x__x)*MathSqrt(y__y); //left x side of the equation squared times right side of the equation squared
r = numerator/denominator;
return(r);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CMatrixRegression::corrcoeff(void)
{
double TempXArr[]; //Temporary array
double corrArr[];
ArrayResize(corrArr,x_columns_chosen);
int start_x = 0, single_colRows = rows_total/x_columns_chosen;
Print("Correlation Coefficients");
for (int i=0; i<x_columns_chosen; i++)
{
ArrayCopy(TempXArr,m_allxCopy,0,start_x, single_colRows);
corrArr[i] = corrcoef(TempXArr,m_allyCopy);
printf(" Independent Var Vs %s = %.3f",DataColumnNames[i],corrArr[i]);
start_x += single_colRows;
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CMatrixRegression::r_squared(double &y[],double &y_predicted[])
{
double error=0;
double numerator =0, denominator=0;
double mean_y = mean(y);
//---
if (ArraySize(y_predicted)==0)
Print("The Predicted values Array seems to have no values, Call the main Simple Linear Regression Funtion before any use of this function = ",__FUNCTION__);
else
{
for (int i=0; i<ArraySize(y); i++)
{
numerator += MathPow((y[i]-y_predicted[i]),2);
denominator += MathPow((y[i]-mean_y),2);
}
error = 1 - (numerator/denominator);
}
return(error);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| LogisticRegression-MQL5-and-python/LinearRegression mql5/LinearRegressionLib.mqh/0 | {
"file_path": "LogisticRegression-MQL5-and-python/LinearRegression mql5/LinearRegressionLib.mqh",
"repo_id": "LogisticRegression-MQL5-and-python",
"token_count": 17518
} | 32 |
//+------------------------------------------------------------------+
//| NMF.mqh |
//| Copyright 2023, Omega Joctan |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2023, Omega Joctan"
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
//| defines |
//+------------------------------------------------------------------+
#include "base.mqh";
class CNMF
{
protected:
uint m_components;
uint m_max_iter;
int m_randseed;
ulong n_features;
matrix W; //Basic matrix
matrix H; //coefficient matrix
double m_tol; //loss tolerance
public:
CNMF(uint max_iter=100, double tol=1e-4, int random_state=-1);
~CNMF(void);
matrix fit_transform(matrix &X, uint k=2);
matrix transform(matrix &X);
vector transform(vector &X);
uint select_best_components(matrix &X);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CNMF::CNMF(uint max_iter=100, double tol=1e-4,int random_state=-1)
:m_max_iter(max_iter),
m_randseed(random_state),
m_tol(tol)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CNMF::~CNMF(void)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix CNMF::transform(matrix &X)
{
n_features = X.Cols();
if (m_components>n_features)
{
printf("%s Number of dimensions K[%d] is supposed to be <= number of features %d",__FUNCTION__,m_components,n_features);
this.m_components = (uint)n_features;
}
if (this.W.Rows()==0 || this.H.Rows()==0)
{
Print(__FUNCTION__," Model not fitted. Call fit method first.");
matrix mat={};
return mat;
}
return X.MatMul(this.H.Transpose());
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector CNMF::transform(vector &X)
{
matrix INPUT_MAT = MatrixExtend::VectorToMatrix(X, X.Size());
matrix OUTPUT_MAT = transform(INPUT_MAT);
return MatrixExtend::MatrixToVector(OUTPUT_MAT);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix CNMF::fit_transform(matrix &X, uint k=2)
{
ulong m = X.Rows(), n = X.Cols();
double best_frobenius_norm = DBL_MIN;
m_components = m_components == 0 ? (uint)n : k;
//--- Initialize Random values
this.W = MatrixExtend::Random(0,1, m, this.m_components, this.m_randseed);
this.H = MatrixExtend::Random(0,1,this.m_components, n, this.m_randseed);
//--- Update factors
vector loss(this.m_max_iter);
for (uint i=0; i<this.m_max_iter; i++)
{
// Update W
this.W *= MathAbs((X.MatMul(this.H.Transpose())) / (this.W.MatMul(this.H.MatMul(this.H.Transpose()))+ 1e-10));
// Update H
this.H *= MathAbs((this.W.Transpose().MatMul(X)) / (this.W.Transpose().MatMul(this.W.MatMul(this.H))+ 1e-10));
loss[i] = MathPow((X - W.MatMul(H)).Flat(1), 2);
// Calculate Frobenius norm of the difference
double frobenius_norm = (X - W.MatMul(H)).Norm(MATRIX_NORM_FROBENIUS);
if (MQLInfoInteger(MQL_DEBUG))
printf("%s [%d/%d] Loss = %.5f frobenius norm %.5f",__FUNCTION__,i+1,m_max_iter,loss[i],frobenius_norm);
// Check convergence
if (frobenius_norm < this.m_tol)
break;
}
return this.W.MatMul(this.H);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
uint CNMF::select_best_components(matrix &X)
{
uint best_components = 1;
this.m_components = (uint)X.Cols();
vector explained_ratio(X.Cols());
for (uint k = 1; k <= X.Cols(); k++)
{
// Calculate explained variance or other criterion
matrix X_reduced = fit_transform(X, k);
// Calculate explained variance as the ratio of squared Frobenius norms
double explained_variance = 1.0 - (X-X_reduced).Norm(MATRIX_NORM_FROBENIUS) / (X.Norm(MATRIX_NORM_FROBENIUS));
if (MQLInfoInteger(MQL_DEBUG))
printf("k %d Explained Var %.5f",k,explained_variance);
explained_ratio[k-1] = explained_variance;
}
return uint(explained_ratio.ArgMax()+1);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| MALE5/Dimensionality Reduction/NMF.mqh/0 | {
"file_path": "MALE5/Dimensionality Reduction/NMF.mqh",
"repo_id": "MALE5",
"token_count": 2602
} | 33 |
//+------------------------------------------------------------------+
//| Naive Bayes.mqh |
//| Copyright 2022, Fxalgebra.com |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Fxalgebra.com"
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
//| defines |
//+------------------------------------------------------------------+
#include <MALE5\MatrixExtend.mqh>
//+------------------------------------------------------------------+
//| N A I V E B A Y E |
//| |
//| suitable for classification of discrete values, that have |
//| been load to a matrix using the method ReadCSVEncode from |
//| MatrixExtend::mqh |
//| |
//+------------------------------------------------------------------+
class CNaiveBayes
{
protected:
uint n_features;
vector y_target;
vector class_proba; //prior class probability
vector features_proba; //features probability
vector c_prior_proba; //class prior probability
vector c_evidence; //class evidence
vector calcProba(vector &v_features);
public:
vector classes; //classes available
CNaiveBayes(void);
~CNaiveBayes(void);
void fit(matrix &x, vector &y);
int predict(vector &x);
vector predict(matrix &x);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CNaiveBayes::CNaiveBayes(void)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CNaiveBayes::fit(matrix &x, vector &y)
{
ulong samples = x.Rows(),
features = x.Cols();
vector unique = MatrixExtend::Unique_count(y);
this.class_proba = unique / samples;
if (MQLInfoInteger(MQL_DEBUG))
Print("class probabilities: ",class_proba);
/*
y_target = y;
n_features = x.Cols();
classes = MatrixExtend::Unique(y);
c_evidence.Resize((ulong)classes.Size());
n = y.Size();
if (n==0) { Print("--> n == 0 | Naive Bayes class failed"); return; }
//---
vector v = {};
for (ulong i=0; i<c_evidence.Size(); i++)
{
v = MatrixExtend::Search(y,classes[i]);
c_evidence[i] = (int)v.Size();
}
//---
c_prior_proba.Resize(classes.Size());
for (ulong i=0; i<classes.Size(); i++)
c_prior_proba[i] = c_evidence[i]/(double)n;
Print("---> GROUPS ",classes);
Print("Prior Class Proba ",c_prior_proba,"\nEvidence ",c_evidence);
*/
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CNaiveBayes::~CNaiveBayes(void)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CNaiveBayes::predict(vector &x)
{
vector v = calcProba(x);
double sum = v.Sum();
for (ulong i=0; i<v.Size(); i++) //converting the values into probabilities
v[i] = NormalizeDouble(v[i]/sum,2);
vector p = v;
#ifdef DEBUG_MODE
Print("Probabilities ",p);
#endif
return((int)classes[p.ArgMax()]);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector CNaiveBayes::predict(matrix &x)
{
ulong rows = x.Rows();
vector v(rows), pred(rows);
for (ulong i=0; i<rows; i++)
{
v = x.Row(i);
pred[i] = predict(v);
}
return pred;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
/*
vector CNaiveBayes::calcProba(vector &v_features)
{
vector proba_v(classes.Size()); //vector to return
if (v_features.Size() != n_features)
{
printf("FATAL | Can't calculate probability, fetures columns size = %d is not equal to x_matrix columns =%d",v_features.Size(),n_features);
return proba_v;
}
//---
vector v = {};
for (ulong c=0; c<classes.Size(); c++)
{
double proba = 1;
for (ulong i=0; i<n_features; i++)
{
v = x_matrix.Col(i);
int count =0;
for (ulong j=0; j<v.Size(); j++)
{
if (v_features[i] == v[j] && classes[c] == y[j])
count++;
}
proba *= count==0 ? 1 : count/(double)c_evidence[c]; //do not calculate if there isn't enough evidence'
}
proba_v[c] = proba*c_prior_proba[c];
}
return proba_v;
}*/
//+------------------------------------------------------------------+
//| |
//| |
//| NORMAL DISTRIBUTION CLASS |
//| |
//| |
//| |
//+------------------------------------------------------------------+
class CNormDistribution
{
public:
double m_mean; //Assign the value of the mean
double m_std; //Assign the value of Variance
CNormDistribution(void);
~CNormDistribution(void);
double PDF(double x); //Probability density function
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CNormDistribution::CNormDistribution(void)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CNormDistribution::~CNormDistribution(void)
{
ZeroMemory(m_mean);
ZeroMemory(m_std);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CNormDistribution::PDF(double x)
{
double nurm = MathPow((x - m_mean),2)/(2*MathPow(m_std,2));
nurm = exp(-nurm);
double denorm = 1.0/(MathSqrt(2*M_PI*MathPow(m_std,2)));
return(nurm*denorm);
}
//+------------------------------------------------------------------+
//| |
//| GAUSSIAN NAIVE BAYES CLASS |
//| |
//| Suitable for classification based on features with |
//| continuous variables, |
//| |
//+------------------------------------------------------------------+
#include <MALE5\preprocessing.mqh>
/*
class CGaussianNaiveBayes
{
protected:
CNormDistribution norm_distribution;
vector c_prior_proba; //prior probability
vector c_evidence;
ulong n;
ulong m_cols; //columns in x_matrix
vector calcProba(vector &v_features);
public:
vector classes; //Target classes
CGaussianNaiveBayes(void);
~CGaussianNaiveBayes(void);
void fit(matrix &x, vector &y);
int predict_bin(vector &x);
vector predict_bin(matrix &x);
vector predict_proba(vector &x);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CGaussianNaiveBayes::CGaussianNaiveBayes(void)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CGaussianNaiveBayes::fit(matrix &x, vector &y)
{
classes = MatrixExtend::Unique(y);
m_cols = n_features;
//---
c_evidence.Resize((ulong)classes.Size());
n = y.Size();
if (n==0) { Print("---> n == 0 | Gaussian Naive Bayes class failed"); return; }
//---
vector v = {};
for (ulong i=0; i<c_evidence.Size(); i++)
{
v = MatrixExtend::Search(y, classes[i]);
c_evidence[i] = (int)v.Size();
}
c_prior_proba.Resize(classes.Size());
for (ulong i=0; i<classes.Size(); i++)
c_prior_proba[i] = c_evidence[i]/(double)n;
//---
Print("---> GROUPS ",classes);
Print("\n---> Prior_proba ",c_prior_proba," Evidence ",c_evidence);
//---
during_training = false;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CGaussianNaiveBayes::~CGaussianNaiveBayes(void)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CGaussianNaiveBayes::predict_bin(vector &x)
{
if (x.Size() != m_cols)
{
Print("CRITICAL | The given x have different size than the trained x");
return (-1);
}
vector p = calcProba(x);
return((int)classes[p.ArgMax()]);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector CGaussianNaiveBayes::predict_proba(vector &x)
{
vector x = x;
vector ret_v = {};
if (x.Size() != m_cols)
{
Print("CRITICAL | The given x have different size than the trained x");
return (ret_v);
}
ret_v = calcProba(x);
return (ret_v);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector CGaussianNaiveBayes::predict_bin(matrix &x)
{
ulong rows = x.Rows();
vector v(rows), pred(rows);
for (ulong i=0; i<rows; i++)
{
v = x.Row(i);
pred[i] = predict_bin(v);
}
return pred;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector CGaussianNaiveBayes::calcProba(vector &v_features)
{
vector proba_v(classes.Size()); //vector to return
proba_v.Fill(-1);
if (v_features.Size() != m_cols)
{
printf("FATAL | Can't calculate probability, fetures columns size = %d is not equal to x_matrix columns =%d",v_features.Size(),m_cols);
return proba_v;
}
//---
vector v = {};
for (ulong c=0; c<classes.Size(); c++)
{
double proba = 1;
for (ulong i=0; i<m_cols; i++)
{
v = x_matrix.Col(i);
int count =0;
vector calc_v = {};
for (ulong j=0; j<v.Size(); j++)
{
if (classes[c] == y[j])
{
count++;
calc_v.Resize(count);
calc_v[count-1] = v[j];
}
}
norm_distribution.m_mean = calc_v.Mean(); //Assign these to Gaussian Normal distribution
norm_distribution.m_std = calc_v.Std();
#ifdef DEBUG_MODE
printf("mean %.5f std %.5f ",norm_distribution.m_mean,norm_distribution.m_std);
#endif
proba *= count==0 ? 1 : norm_distribution.PDF(v_features[i]); //do not calculate if there isn't enought evidence'
}
proba_v[c] = proba*c_prior_proba[c]; //Turning the probability density into probability
#ifdef DEBUG_MODE
Print(">> Proba ",proba," prior proba ",c_prior_proba);
#endif
}
//--- Normalize probabilities
proba_v = proba_v / proba_v.Sum();
return proba_v;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
*/ | MALE5/Naive Bayes/Naive Bayes.mqh/0 | {
"file_path": "MALE5/Naive Bayes/Naive Bayes.mqh",
"repo_id": "MALE5",
"token_count": 7490
} | 34 |
//+------------------------------------------------------------------+
//| kernels.mqh |
//| Copyright 2023, Omega Joctan |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2023, Omega Joctan"
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
//| Library containing machine learning kernels |
//+------------------------------------------------------------------+
#include <MALE5\linalg.mqh>
enum kernels
{
KERNEL_LINEAR,
KERNEL_POLYNOMIAL,
KERNEL_RADIAL_BASIS_FUNCTION_RBF,
KERNEL_SIGMOID,
};
class __kernels__
{
private:
kernels chosen_kernel;
CLinAlg linalg;
double alpha;
double beta;
int degree_polynomial;
double sigma;
//+------------------------------------------------------------------+
//| The linear kernel is the simplest one. It represents the dot |
//| product of the input vectors |
//+------------------------------------------------------------------+
matrix LinearKernel(matrix &x1, matrix &x2)
{
return (x1.MatMul(x2.Transpose()));
}
//+------------------------------------------------------------------+
//| The polynomial kernel allows for the modeling of polynomial |
//| relationships between data points |
//+------------------------------------------------------------------+
matrix PolynomialKernel(matrix &x1, matrix &x2, const double lambda=1)
{
return (MathPow(x1.MatMul(x2.Transpose()) + lambda, degree_polynomial));
}
//+------------------------------------------------------------------+
//| Radial Basis Function (RBF) Kernel: The RBF kernel, also known |
//| as the Gaussian kernel, is one of the most commonly used kernels.|
//| It captures complex, non-linear relationships |
//+------------------------------------------------------------------+
matrix RBFKernel(const matrix &x1, const matrix &x2)
{
matrix norm = linalg.norm(x1,x2);
return exp(-1* ((MathPow(norm, 2)) / (2*MathPow(sigma, 2))) );
}
//+------------------------------------------------------------------+
//| The sigmoid kernel is inspired by the sigmoid function |
//+------------------------------------------------------------------+
matrix SigmoidKernel(matrix &x1, matrix &x2)
{
return (tanh((alpha* x1.MatMul(x2.Transpose())) + beta));
}
public:
__kernels__::__kernels__(
kernels KERNEL,
double alpha_=0.1,
double beta_=0.1,
int degree_polynomial_=2,
double sigma_=0.1
)
:chosen_kernel(KERNEL),
alpha(alpha_),
beta(beta_),
degree_polynomial(degree_polynomial_),
sigma(sigma_)
{
}
__kernels__::~__kernels__(void)
{
}
//--- kernels in matrix form
matrix KernelFunction(matrix &x1, matrix &x2)
{
matrix ret = {};
switch(chosen_kernel)
{
case KERNEL_LINEAR:
ret = this.LinearKernel(x1, x2);
break;
case KERNEL_POLYNOMIAL:
ret = this.PolynomialKernel(x1, x2);
break;
case KERNEL_RADIAL_BASIS_FUNCTION_RBF:
ret = this.RBFKernel(x1, x2);
break;
case KERNEL_SIGMOID:
ret = this.SigmoidKernel(x1, x2);
break;
}
return ret;
}
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
| MALE5/kernels.mqh/0 | {
"file_path": "MALE5/kernels.mqh",
"repo_id": "MALE5",
"token_count": 1980
} | 35 |
from django.contrib import admin
from django.urls import path
from django.urls.conf import include
from django.views import generic
from django.views.generic import RedirectView
from django.conf import settings
from django.conf.urls.static import static
handler404 = 'catalog.views.page_not_foundview'
handler500 = 'catalog.views.server_error_view'
handler403 = 'catalog.views.permission_denied_view'
urlpatterns = [
path('admin/', admin.site.urls),
path('catalog/',include('catalog.urls')),
path('', RedirectView.as_view(url='catalog/')),
path('accounts/',include('django.contrib.auth.urls')),
] + static(settings.STATIC_URL,document_root = settings.STATIC_ROOT)
| Django-locallibrary/LocalLibrary/LocalLibrary/urls.py/0 | {
"file_path": "Django-locallibrary/LocalLibrary/LocalLibrary/urls.py",
"repo_id": "Django-locallibrary",
"token_count": 234
} | 0 |
{% extends "base_template.html" %}
{% block content %}
<h2>Error 500</h2>
<h3>Internal Web server Error</h3>
{% endblock %} | Django-locallibrary/LocalLibrary/catalog/Templates/500.html/0 | {
"file_path": "Django-locallibrary/LocalLibrary/catalog/Templates/500.html",
"repo_id": "Django-locallibrary",
"token_count": 68
} | 1 |
/*
DJANGO Admin styles
*/
@import url("fonts.168bab448fee.css");
/* VARIABLE DEFINITIONS */
:root {
--primary: #79aec8;
--secondary: #417690;
--accent: #f5dd5d;
--primary-fg: #fff;
--body-fg: #333;
--body-bg: #fff;
--body-quiet-color: #666;
--body-loud-color: #000;
--header-color: #ffc;
--header-branding-color: var(--accent);
--header-bg: var(--secondary);
--header-link-color: var(--primary-fg);
--breadcrumbs-fg: #c4dce8;
--breadcrumbs-link-fg: var(--body-bg);
--breadcrumbs-bg: var(--primary);
--link-fg: #447e9b;
--link-hover-color: #036;
--link-selected-fg: #5b80b2;
--hairline-color: #e8e8e8;
--border-color: #ccc;
--error-fg: #ba2121;
--message-success-bg: #dfd;
--message-warning-bg: #ffc;
--message-error-bg: #ffefef;
--darkened-bg: #f8f8f8; /* A bit darker than --body-bg */
--selected-bg: #e4e4e4; /* E.g. selected table cells */
--selected-row: #ffc;
--button-fg: #fff;
--button-bg: var(--primary);
--button-hover-bg: #609ab6;
--default-button-bg: var(--secondary);
--default-button-hover-bg: #205067;
--close-button-bg: #888; /* Previously #bbb, contrast 1.92 */
--close-button-hover-bg: #747474;
--delete-button-bg: #ba2121;
--delete-button-hover-bg: #a41515;
--object-tools-fg: var(--button-fg);
--object-tools-bg: var(--close-button-bg);
--object-tools-hover-bg: var(--close-button-hover-bg);
}
@media (prefers-color-scheme: dark) {
:root {
--primary: #264b5d;
--primary-fg: #eee;
--body-fg: #eeeeee;
--body-bg: #121212;
--body-quiet-color: #e0e0e0;
--body-loud-color: #ffffff;
--breadcrumbs-link-fg: #e0e0e0;
--breadcrumbs-bg: var(--primary);
--link-fg: #81d4fa;
--link-hover-color: #4ac1f7;
--link-selected-fg: #6f94c6;
--hairline-color: #272727;
--border-color: #353535;
--error-fg: #e35f5f;
--message-success-bg: #006b1b;
--message-warning-bg: #583305;
--message-error-bg: #570808;
--darkened-bg: #212121;
--selected-bg: #1b1b1b;
--selected-row: #00363a;
--close-button-bg: #333333;
--close-button-hover-bg: #666666;
}
}
html, body {
height: 100%;
}
body {
margin: 0;
padding: 0;
font-size: 14px;
font-family: "Roboto","Lucida Grande","DejaVu Sans","Bitstream Vera Sans",Verdana,Arial,sans-serif;
color: var(--body-fg);
background: var(--body-bg);
}
/* LINKS */
a:link, a:visited {
color: var(--link-fg);
text-decoration: none;
transition: color 0.15s, background 0.15s;
}
a:focus, a:hover {
color: var(--link-hover-color);
}
a:focus {
text-decoration: underline;
}
a img {
border: none;
}
a.section:link, a.section:visited {
color: var(--header-link-color);
text-decoration: none;
}
a.section:focus, a.section:hover {
text-decoration: underline;
}
/* GLOBAL DEFAULTS */
p, ol, ul, dl {
margin: .2em 0 .8em 0;
}
p {
padding: 0;
line-height: 140%;
}
h1,h2,h3,h4,h5 {
font-weight: bold;
}
h1 {
margin: 0 0 20px;
font-weight: 300;
font-size: 20px;
color: var(--body-quiet-color);
}
h2 {
font-size: 16px;
margin: 1em 0 .5em 0;
}
h2.subhead {
font-weight: normal;
margin-top: 0;
}
h3 {
font-size: 14px;
margin: .8em 0 .3em 0;
color: var(--body-quiet-color);
font-weight: bold;
}
h4 {
font-size: 12px;
margin: 1em 0 .8em 0;
padding-bottom: 3px;
}
h5 {
font-size: 10px;
margin: 1.5em 0 .5em 0;
color: var(--body-quiet-color);
text-transform: uppercase;
letter-spacing: 1px;
}
ul > li {
list-style-type: square;
padding: 1px 0;
}
li ul {
margin-bottom: 0;
}
li, dt, dd {
font-size: 13px;
line-height: 20px;
}
dt {
font-weight: bold;
margin-top: 4px;
}
dd {
margin-left: 0;
}
form {
margin: 0;
padding: 0;
}
fieldset {
margin: 0;
min-width: 0;
padding: 0;
border: none;
border-top: 1px solid var(--hairline-color);
}
blockquote {
font-size: 11px;
color: #777;
margin-left: 2px;
padding-left: 10px;
border-left: 5px solid #ddd;
}
code, pre {
font-family: "Bitstream Vera Sans Mono", Monaco, "Courier New", Courier, monospace;
color: var(--body-quiet-color);
font-size: 12px;
overflow-x: auto;
}
pre.literal-block {
margin: 10px;
background: var(--darkened-bg);
padding: 6px 8px;
}
code strong {
color: #930;
}
hr {
clear: both;
color: var(--hairline-color);
background-color: var(--hairline-color);
height: 1px;
border: none;
margin: 0;
padding: 0;
font-size: 1px;
line-height: 1px;
}
/* TEXT STYLES & MODIFIERS */
.small {
font-size: 11px;
}
.mini {
font-size: 10px;
}
.help, p.help, form p.help, div.help, form div.help, div.help li {
font-size: 11px;
color: var(--body-quiet-color);
}
div.help ul {
margin-bottom: 0;
}
.help-tooltip {
cursor: help;
}
p img, h1 img, h2 img, h3 img, h4 img, td img {
vertical-align: middle;
}
.quiet, a.quiet:link, a.quiet:visited {
color: var(--body-quiet-color);
font-weight: normal;
}
.clear {
clear: both;
}
.nowrap {
white-space: nowrap;
}
.hidden {
display: none;
}
/* TABLES */
table {
border-collapse: collapse;
border-color: var(--border-color);
}
td, th {
font-size: 13px;
line-height: 16px;
border-bottom: 1px solid var(--hairline-color);
vertical-align: top;
padding: 8px;
}
th {
font-weight: 600;
text-align: left;
}
thead th,
tfoot td {
color: var(--body-quiet-color);
padding: 5px 10px;
font-size: 11px;
background: var(--body-bg);
border: none;
border-top: 1px solid var(--hairline-color);
border-bottom: 1px solid var(--hairline-color);
}
tfoot td {
border-bottom: none;
border-top: 1px solid var(--hairline-color);
}
thead th.required {
color: var(--body-loud-color);
}
tr.alt {
background: var(--darkened-bg);
}
tr:nth-child(odd), .row-form-errors {
background: var(--body-bg);
}
tr:nth-child(even),
tr:nth-child(even) .errorlist,
tr:nth-child(odd) + .row-form-errors,
tr:nth-child(odd) + .row-form-errors .errorlist {
background: var(--darkened-bg);
}
/* SORTABLE TABLES */
thead th {
padding: 5px 10px;
line-height: normal;
text-transform: uppercase;
background: var(--darkened-bg);
}
thead th a:link, thead th a:visited {
color: var(--body-quiet-color);
}
thead th.sorted {
background: var(--selected-bg);
}
thead th.sorted .text {
padding-right: 42px;
}
table thead th .text span {
padding: 8px 10px;
display: block;
}
table thead th .text a {
display: block;
cursor: pointer;
padding: 8px 10px;
}
table thead th .text a:focus, table thead th .text a:hover {
background: var(--selected-bg);
}
thead th.sorted a.sortremove {
visibility: hidden;
}
table thead th.sorted:hover a.sortremove {
visibility: visible;
}
table thead th.sorted .sortoptions {
display: block;
padding: 9px 5px 0 5px;
float: right;
text-align: right;
}
table thead th.sorted .sortpriority {
font-size: .8em;
min-width: 12px;
text-align: center;
vertical-align: 3px;
margin-left: 2px;
margin-right: 2px;
}
table thead th.sorted .sortoptions a {
position: relative;
width: 14px;
height: 14px;
display: inline-block;
background: url("../img/sorting-icons.3a097b59f104.svg") 0 0 no-repeat;
background-size: 14px auto;
}
table thead th.sorted .sortoptions a.sortremove {
background-position: 0 0;
}
table thead th.sorted .sortoptions a.sortremove:after {
content: '\\';
position: absolute;
top: -6px;
left: 3px;
font-weight: 200;
font-size: 18px;
color: var(--body-quiet-color);
}
table thead th.sorted .sortoptions a.sortremove:focus:after,
table thead th.sorted .sortoptions a.sortremove:hover:after {
color: var(--link-fg);
}
table thead th.sorted .sortoptions a.sortremove:focus,
table thead th.sorted .sortoptions a.sortremove:hover {
background-position: 0 -14px;
}
table thead th.sorted .sortoptions a.ascending {
background-position: 0 -28px;
}
table thead th.sorted .sortoptions a.ascending:focus,
table thead th.sorted .sortoptions a.ascending:hover {
background-position: 0 -42px;
}
table thead th.sorted .sortoptions a.descending {
top: 1px;
background-position: 0 -56px;
}
table thead th.sorted .sortoptions a.descending:focus,
table thead th.sorted .sortoptions a.descending:hover {
background-position: 0 -70px;
}
/* FORM DEFAULTS */
input, textarea, select, .form-row p, form .button {
margin: 2px 0;
padding: 2px 3px;
vertical-align: middle;
font-family: "Roboto", "Lucida Grande", Verdana, Arial, sans-serif;
font-weight: normal;
font-size: 13px;
}
.form-row div.help {
padding: 2px 3px;
}
textarea {
vertical-align: top;
}
input[type=text], input[type=password], input[type=email], input[type=url],
input[type=number], input[type=tel], textarea, select, .vTextField {
border: 1px solid var(--border-color);
border-radius: 4px;
padding: 5px 6px;
margin-top: 0;
color: var(--body-fg);
background-color: var(--body-bg);
}
input[type=text]:focus, input[type=password]:focus, input[type=email]:focus,
input[type=url]:focus, input[type=number]:focus, input[type=tel]:focus,
textarea:focus, select:focus, .vTextField:focus {
border-color: var(--body-quiet-color);
}
select {
height: 30px;
}
select[multiple] {
/* Allow HTML size attribute to override the height in the rule above. */
height: auto;
min-height: 150px;
}
/* FORM BUTTONS */
.button, input[type=submit], input[type=button], .submit-row input, a.button {
background: var(--button-bg);
padding: 10px 15px;
border: none;
border-radius: 4px;
color: var(--button-fg);
cursor: pointer;
transition: background 0.15s;
}
a.button {
padding: 4px 5px;
}
.button:active, input[type=submit]:active, input[type=button]:active,
.button:focus, input[type=submit]:focus, input[type=button]:focus,
.button:hover, input[type=submit]:hover, input[type=button]:hover {
background: var(--button-hover-bg);
}
.button[disabled], input[type=submit][disabled], input[type=button][disabled] {
opacity: 0.4;
}
.button.default, input[type=submit].default, .submit-row input.default {
float: right;
border: none;
font-weight: 400;
background: var(--default-button-bg);
}
.button.default:active, input[type=submit].default:active,
.button.default:focus, input[type=submit].default:focus,
.button.default:hover, input[type=submit].default:hover {
background: var(--default-button-hover-bg);
}
.button[disabled].default,
input[type=submit][disabled].default,
input[type=button][disabled].default {
opacity: 0.4;
}
/* MODULES */
.module {
border: none;
margin-bottom: 30px;
background: var(--body-bg);
}
.module p, .module ul, .module h3, .module h4, .module dl, .module pre {
padding-left: 10px;
padding-right: 10px;
}
.module blockquote {
margin-left: 12px;
}
.module ul, .module ol {
margin-left: 1.5em;
}
.module h3 {
margin-top: .6em;
}
.module h2, .module caption, .inline-group h2 {
margin: 0;
padding: 8px;
font-weight: 400;
font-size: 13px;
text-align: left;
background: var(--primary);
color: var(--header-link-color);
}
.module caption,
.inline-group h2 {
font-size: 12px;
letter-spacing: 0.5px;
text-transform: uppercase;
}
.module table {
border-collapse: collapse;
}
/* MESSAGES & ERRORS */
ul.messagelist {
padding: 0;
margin: 0;
}
ul.messagelist li {
display: block;
font-weight: 400;
font-size: 13px;
padding: 10px 10px 10px 65px;
margin: 0 0 10px 0;
background: var(--message-success-bg) url("../img/icon-yes.d2f9f035226a.svg") 40px 12px no-repeat;
background-size: 16px auto;
color: var(--body-fg);
}
ul.messagelist li.warning {
background: var(--message-warning-bg) url("../img/icon-alert.034cc7d8a67f.svg") 40px 14px no-repeat;
background-size: 14px auto;
}
ul.messagelist li.error {
background: var(--message-error-bg) url("../img/icon-no.439e821418cd.svg") 40px 12px no-repeat;
background-size: 16px auto;
}
.errornote {
font-size: 14px;
font-weight: 700;
display: block;
padding: 10px 12px;
margin: 0 0 10px 0;
color: var(--error-fg);
border: 1px solid var(--error-fg);
border-radius: 4px;
background-color: var(--body-bg);
background-position: 5px 12px;
overflow-wrap: break-word;
}
ul.errorlist {
margin: 0 0 4px;
padding: 0;
color: var(--error-fg);
background: var(--body-bg);
}
ul.errorlist li {
font-size: 13px;
display: block;
margin-bottom: 4px;
overflow-wrap: break-word;
}
ul.errorlist li:first-child {
margin-top: 0;
}
ul.errorlist li a {
color: inherit;
text-decoration: underline;
}
td ul.errorlist {
margin: 0;
padding: 0;
}
td ul.errorlist li {
margin: 0;
}
.form-row.errors {
margin: 0;
border: none;
border-bottom: 1px solid var(--hairline-color);
background: none;
}
.form-row.errors ul.errorlist li {
padding-left: 0;
}
.errors input, .errors select, .errors textarea,
td ul.errorlist + input, td ul.errorlist + select, td ul.errorlist + textarea {
border: 1px solid var(--error-fg);
}
.description {
font-size: 12px;
padding: 5px 0 0 12px;
}
/* BREADCRUMBS */
div.breadcrumbs {
background: var(--breadcrumbs-bg);
padding: 10px 40px;
border: none;
color: var(--breadcrumbs-fg);
text-align: left;
}
div.breadcrumbs a {
color: var(--breadcrumbs-link-fg);
}
div.breadcrumbs a:focus, div.breadcrumbs a:hover {
color: var(--breadcrumbs-fg);
}
/* ACTION ICONS */
.viewlink, .inlineviewlink {
padding-left: 16px;
background: url("../img/icon-viewlink.41eb31f7826e.svg") 0 1px no-repeat;
}
.addlink {
padding-left: 16px;
background: url("../img/icon-addlink.d519b3bab011.svg") 0 1px no-repeat;
}
.changelink, .inlinechangelink {
padding-left: 16px;
background: url("../img/icon-changelink.18d2fd706348.svg") 0 1px no-repeat;
}
.deletelink {
padding-left: 16px;
background: url("../img/icon-deletelink.564ef9dc3854.svg") 0 1px no-repeat;
}
a.deletelink:link, a.deletelink:visited {
color: #CC3434; /* XXX Probably unused? */
}
a.deletelink:focus, a.deletelink:hover {
color: #993333; /* XXX Probably unused? */
text-decoration: none;
}
/* OBJECT TOOLS */
.object-tools {
font-size: 10px;
font-weight: bold;
padding-left: 0;
float: right;
position: relative;
margin-top: -48px;
}
.object-tools li {
display: block;
float: left;
margin-left: 5px;
height: 16px;
}
.object-tools a {
border-radius: 15px;
}
.object-tools a:link, .object-tools a:visited {
display: block;
float: left;
padding: 3px 12px;
background: var(--object-tools-bg);
color: var(--object-tools-fg);
font-weight: 400;
font-size: 11px;
text-transform: uppercase;
letter-spacing: 0.5px;
}
.object-tools a:focus, .object-tools a:hover {
background-color: var(--object-tools-hover-bg);
}
.object-tools a:focus{
text-decoration: none;
}
.object-tools a.viewsitelink, .object-tools a.addlink {
background-repeat: no-repeat;
background-position: right 7px center;
padding-right: 26px;
}
.object-tools a.viewsitelink {
background-image: url("../img/tooltag-arrowright.bbfb788a849e.svg");
}
.object-tools a.addlink {
background-image: url("../img/tooltag-add.e59d620a9742.svg");
}
/* OBJECT HISTORY */
table#change-history {
width: 100%;
}
table#change-history tbody th {
width: 16em;
}
/* PAGE STRUCTURE */
#container {
position: relative;
width: 100%;
min-width: 980px;
padding: 0;
display: flex;
flex-direction: column;
height: 100%;
}
#container > div {
flex-shrink: 0;
}
#container > .main {
display: flex;
flex: 1 0 auto;
}
.main > .content {
flex: 1 0;
max-width: 100%;
}
#content {
padding: 20px 40px;
}
.dashboard #content {
width: 600px;
}
#content-main {
float: left;
width: 100%;
}
#content-related {
float: right;
width: 260px;
position: relative;
margin-right: -300px;
}
#footer {
clear: both;
padding: 10px;
}
/* COLUMN TYPES */
.colMS {
margin-right: 300px;
}
.colSM {
margin-left: 300px;
}
.colSM #content-related {
float: left;
margin-right: 0;
margin-left: -300px;
}
.colSM #content-main {
float: right;
}
.popup .colM {
width: auto;
}
/* HEADER */
#header {
width: auto;
height: auto;
display: flex;
justify-content: space-between;
align-items: center;
padding: 10px 40px;
background: var(--header-bg);
color: var(--header-color);
overflow: hidden;
}
#header a:link, #header a:visited {
color: var(--header-link-color);
}
#header a:focus , #header a:hover {
text-decoration: underline;
}
#branding {
float: left;
}
#branding h1 {
padding: 0;
margin: 0 20px 0 0;
font-weight: 300;
font-size: 24px;
color: var(--accent);
}
#branding h1, #branding h1 a:link, #branding h1 a:visited {
color: var(--accent);
}
#branding h2 {
padding: 0 10px;
font-size: 14px;
margin: -8px 0 8px 0;
font-weight: normal;
color: var(--header-color);
}
#branding a:hover {
text-decoration: none;
}
#user-tools {
float: right;
padding: 0;
margin: 0 0 0 20px;
font-weight: 300;
font-size: 11px;
letter-spacing: 0.5px;
text-transform: uppercase;
text-align: right;
}
#user-tools a {
border-bottom: 1px solid rgba(255, 255, 255, 0.25);
}
#user-tools a:focus, #user-tools a:hover {
text-decoration: none;
border-bottom-color: var(--primary);
color: var(--primary);
}
/* SIDEBAR */
#content-related {
background: var(--darkened-bg);
}
#content-related .module {
background: none;
}
#content-related h3 {
color: var(--body-quiet-color);
padding: 0 16px;
margin: 0 0 16px;
}
#content-related h4 {
font-size: 13px;
}
#content-related p {
padding-left: 16px;
padding-right: 16px;
}
#content-related .actionlist {
padding: 0;
margin: 16px;
}
#content-related .actionlist li {
line-height: 1.2;
margin-bottom: 10px;
padding-left: 18px;
}
#content-related .module h2 {
background: none;
padding: 16px;
margin-bottom: 16px;
border-bottom: 1px solid var(--hairline-color);
font-size: 18px;
color: var(--body-fg);
}
.delete-confirmation form input[type="submit"] {
background: var(--delete-button-bg);
border-radius: 4px;
padding: 10px 15px;
color: var(--button-fg);
}
.delete-confirmation form input[type="submit"]:active,
.delete-confirmation form input[type="submit"]:focus,
.delete-confirmation form input[type="submit"]:hover {
background: var(--delete-button-hover-bg);
}
.delete-confirmation form .cancel-link {
display: inline-block;
vertical-align: middle;
height: 15px;
line-height: 15px;
border-radius: 4px;
padding: 10px 15px;
color: var(--button-fg);
background: var(--close-button-bg);
margin: 0 0 0 10px;
}
.delete-confirmation form .cancel-link:active,
.delete-confirmation form .cancel-link:focus,
.delete-confirmation form .cancel-link:hover {
background: var(--close-button-hover-bg);
}
/* POPUP */
.popup #content {
padding: 20px;
}
.popup #container {
min-width: 0;
}
.popup #header {
padding: 10px 20px;
}
| Django-locallibrary/LocalLibrary/staticfiles/admin/css/base.1f418065fc2c.css/0 | {
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/css/base.1f418065fc2c.css",
"repo_id": "Django-locallibrary",
"token_count": 8511
} | 2 |
@import url("widgets.694d845b2cb1.css");
/* FORM ROWS */
.form-row {
overflow: hidden;
padding: 10px;
font-size: 13px;
border-bottom: 1px solid var(--hairline-color);
}
.form-row img, .form-row input {
vertical-align: middle;
}
.form-row label input[type="checkbox"] {
margin-top: 0;
vertical-align: 0;
}
form .form-row p {
padding-left: 0;
}
/* FORM LABELS */
label {
font-weight: normal;
color: var(--body-quiet-color);
font-size: 13px;
}
.required label, label.required {
font-weight: bold;
color: var(--body-fg);
}
/* RADIO BUTTONS */
form ul.radiolist li {
list-style-type: none;
}
form ul.radiolist label {
float: none;
display: inline;
}
form ul.radiolist input[type="radio"] {
margin: -2px 4px 0 0;
padding: 0;
}
form ul.inline {
margin-left: 0;
padding: 0;
}
form ul.inline li {
float: left;
padding-right: 7px;
}
/* ALIGNED FIELDSETS */
.aligned label {
display: block;
padding: 4px 10px 0 0;
float: left;
width: 160px;
word-wrap: break-word;
line-height: 1;
}
.aligned label:not(.vCheckboxLabel):after {
content: '';
display: inline-block;
vertical-align: middle;
height: 26px;
}
.aligned label + p, .aligned label + div.help, .aligned label + div.readonly {
padding: 6px 0;
margin-top: 0;
margin-bottom: 0;
margin-left: 170px;
}
.aligned ul label {
display: inline;
float: none;
width: auto;
}
.aligned .form-row input {
margin-bottom: 0;
}
.colMS .aligned .vLargeTextField, .colMS .aligned .vXMLLargeTextField {
width: 350px;
}
form .aligned ul {
margin-left: 160px;
padding-left: 10px;
}
form .aligned ul.radiolist {
display: inline-block;
margin: 0;
padding: 0;
}
form .aligned p.help,
form .aligned div.help {
clear: left;
margin-top: 0;
margin-left: 160px;
padding-left: 10px;
}
form .aligned label + p.help,
form .aligned label + div.help {
margin-left: 0;
padding-left: 0;
}
form .aligned p.help:last-child,
form .aligned div.help:last-child {
margin-bottom: 0;
padding-bottom: 0;
}
form .aligned input + p.help,
form .aligned textarea + p.help,
form .aligned select + p.help,
form .aligned input + div.help,
form .aligned textarea + div.help,
form .aligned select + div.help {
margin-left: 160px;
padding-left: 10px;
}
form .aligned ul li {
list-style: none;
}
form .aligned table p {
margin-left: 0;
padding-left: 0;
}
.aligned .vCheckboxLabel {
float: none;
width: auto;
display: inline-block;
vertical-align: -3px;
padding: 0 0 5px 5px;
}
.aligned .vCheckboxLabel + p.help,
.aligned .vCheckboxLabel + div.help {
margin-top: -4px;
}
.colM .aligned .vLargeTextField, .colM .aligned .vXMLLargeTextField {
width: 610px;
}
.checkbox-row p.help,
.checkbox-row div.help {
margin-left: 0;
padding-left: 0;
}
fieldset .fieldBox {
float: left;
margin-right: 20px;
}
/* WIDE FIELDSETS */
.wide label {
width: 200px;
}
form .wide p,
form .wide input + p.help,
form .wide input + div.help {
margin-left: 200px;
}
form .wide p.help,
form .wide div.help {
padding-left: 38px;
}
form div.help ul {
padding-left: 0;
margin-left: 0;
}
.colM fieldset.wide .vLargeTextField, .colM fieldset.wide .vXMLLargeTextField {
width: 450px;
}
/* COLLAPSED FIELDSETS */
fieldset.collapsed * {
display: none;
}
fieldset.collapsed h2, fieldset.collapsed {
display: block;
}
fieldset.collapsed {
border: 1px solid var(--hairline-color);
border-radius: 4px;
overflow: hidden;
}
fieldset.collapsed h2 {
background: var(--darkened-bg);
color: var(--body-quiet-color);
}
fieldset .collapse-toggle {
color: var(--header-link-color);
}
fieldset.collapsed .collapse-toggle {
background: transparent;
display: inline;
color: var(--link-fg);
}
/* MONOSPACE TEXTAREAS */
fieldset.monospace textarea {
font-family: "Bitstream Vera Sans Mono", Monaco, "Courier New", Courier, monospace;
}
/* SUBMIT ROW */
.submit-row {
padding: 12px 14px;
margin: 0 0 20px;
background: var(--darkened-bg);
border: 1px solid var(--hairline-color);
border-radius: 4px;
text-align: right;
overflow: hidden;
}
body.popup .submit-row {
overflow: auto;
}
.submit-row input {
height: 35px;
line-height: 15px;
margin: 0 0 0 5px;
}
.submit-row input.default {
margin: 0 0 0 8px;
text-transform: uppercase;
}
.submit-row p {
margin: 0.3em;
}
.submit-row p.deletelink-box {
float: left;
margin: 0;
}
.submit-row a.deletelink {
display: block;
background: var(--delete-button-bg);
border-radius: 4px;
padding: 10px 15px;
height: 15px;
line-height: 15px;
color: var(--button-fg);
}
.submit-row a.closelink {
display: inline-block;
background: var(--close-button-bg);
border-radius: 4px;
padding: 10px 15px;
height: 15px;
line-height: 15px;
margin: 0 0 0 5px;
color: var(--button-fg);
}
.submit-row a.deletelink:focus,
.submit-row a.deletelink:hover,
.submit-row a.deletelink:active {
background: var(--delete-button-hover-bg);
}
.submit-row a.closelink:focus,
.submit-row a.closelink:hover,
.submit-row a.closelink:active {
background: var(--close-button-hover-bg);
}
/* CUSTOM FORM FIELDS */
.vSelectMultipleField {
vertical-align: top;
}
.vCheckboxField {
border: none;
}
.vDateField, .vTimeField {
margin-right: 2px;
margin-bottom: 4px;
}
.vDateField {
min-width: 6.85em;
}
.vTimeField {
min-width: 4.7em;
}
.vURLField {
width: 30em;
}
.vLargeTextField, .vXMLLargeTextField {
width: 48em;
}
.flatpages-flatpage #id_content {
height: 40.2em;
}
.module table .vPositiveSmallIntegerField {
width: 2.2em;
}
.vTextField, .vUUIDField {
width: 20em;
}
.vIntegerField {
width: 5em;
}
.vBigIntegerField {
width: 10em;
}
.vForeignKeyRawIdAdminField {
width: 5em;
}
/* INLINES */
.inline-group {
padding: 0;
margin: 0 0 30px;
}
.inline-group thead th {
padding: 8px 10px;
}
.inline-group .aligned label {
width: 160px;
}
.inline-related {
position: relative;
}
.inline-related h3 {
margin: 0;
color: var(--body-quiet-color);
padding: 5px;
font-size: 13px;
background: var(--darkened-bg);
border-top: 1px solid var(--hairline-color);
border-bottom: 1px solid var(--hairline-color);
}
.inline-related h3 span.delete {
float: right;
}
.inline-related h3 span.delete label {
margin-left: 2px;
font-size: 11px;
}
.inline-related fieldset {
margin: 0;
background: var(--body-bg);
border: none;
width: 100%;
}
.inline-related fieldset.module h3 {
margin: 0;
padding: 2px 5px 3px 5px;
font-size: 11px;
text-align: left;
font-weight: bold;
background: #bcd;
color: var(--body-bg);
}
.inline-group .tabular fieldset.module {
border: none;
}
.inline-related.tabular fieldset.module table {
width: 100%;
overflow-x: scroll;
}
.last-related fieldset {
border: none;
}
.inline-group .tabular tr.has_original td {
padding-top: 2em;
}
.inline-group .tabular tr td.original {
padding: 2px 0 0 0;
width: 0;
_position: relative;
}
.inline-group .tabular th.original {
width: 0px;
padding: 0;
}
.inline-group .tabular td.original p {
position: absolute;
left: 0;
height: 1.1em;
padding: 2px 9px;
overflow: hidden;
font-size: 9px;
font-weight: bold;
color: var(--body-quiet-color);
_width: 700px;
}
.inline-group ul.tools {
padding: 0;
margin: 0;
list-style: none;
}
.inline-group ul.tools li {
display: inline;
padding: 0 5px;
}
.inline-group div.add-row,
.inline-group .tabular tr.add-row td {
color: var(--body-quiet-color);
background: var(--darkened-bg);
padding: 8px 10px;
border-bottom: 1px solid var(--hairline-color);
}
.inline-group .tabular tr.add-row td {
padding: 8px 10px;
border-bottom: 1px solid var(--hairline-color);
}
.inline-group ul.tools a.add,
.inline-group div.add-row a,
.inline-group .tabular tr.add-row td a {
background: url("../img/icon-addlink.d519b3bab011.svg") 0 1px no-repeat;
padding-left: 16px;
font-size: 12px;
}
.empty-form {
display: none;
}
/* RELATED FIELD ADD ONE / LOOKUP */
.related-lookup {
margin-left: 5px;
display: inline-block;
vertical-align: middle;
background-repeat: no-repeat;
background-size: 14px;
}
.related-lookup {
width: 16px;
height: 16px;
background-image: url("../img/search.7cf54ff789c6.svg");
}
form .related-widget-wrapper ul {
display: inline-block;
margin-left: 0;
padding-left: 0;
}
.clearable-file-input input {
margin-top: 0;
}
| Django-locallibrary/LocalLibrary/staticfiles/admin/css/forms.1d89ec6432f5.css/0 | {
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/css/forms.1d89ec6432f5.css",
"repo_id": "Django-locallibrary",
"token_count": 3791
} | 3 |
/* TABLETS */
@media (max-width: 1024px) {
[dir="rtl"] .colMS {
margin-right: 0;
}
[dir="rtl"] #user-tools {
text-align: right;
}
[dir="rtl"] #changelist .actions label {
padding-left: 10px;
padding-right: 0;
}
[dir="rtl"] #changelist .actions select {
margin-left: 0;
margin-right: 15px;
}
[dir="rtl"] .change-list .filtered .results,
[dir="rtl"] .change-list .filtered .paginator,
[dir="rtl"] .filtered #toolbar,
[dir="rtl"] .filtered div.xfull,
[dir="rtl"] .filtered .actions,
[dir="rtl"] #changelist-filter {
margin-left: 0;
}
[dir="rtl"] .inline-group ul.tools a.add,
[dir="rtl"] .inline-group div.add-row a,
[dir="rtl"] .inline-group .tabular tr.add-row td a {
padding: 8px 26px 8px 10px;
background-position: calc(100% - 8px) 9px;
}
[dir="rtl"] .related-widget-wrapper-link + .selector {
margin-right: 0;
margin-left: 15px;
}
[dir="rtl"] .selector .selector-filter label {
margin-right: 0;
margin-left: 8px;
}
[dir="rtl"] .object-tools li {
float: right;
}
[dir="rtl"] .object-tools li + li {
margin-left: 0;
margin-right: 15px;
}
[dir="rtl"] .dashboard .module table td a {
padding-left: 0;
padding-right: 16px;
}
}
/* MOBILE */
@media (max-width: 767px) {
[dir="rtl"] .aligned .related-lookup,
[dir="rtl"] .aligned .datetimeshortcuts {
margin-left: 0;
margin-right: 15px;
}
[dir="rtl"] .aligned ul {
margin-right: 0;
}
[dir="rtl"] #changelist-filter {
margin-left: 0;
margin-right: 0;
}
}
| Django-locallibrary/LocalLibrary/staticfiles/admin/css/responsive_rtl.css/0 | {
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/css/responsive_rtl.css",
"repo_id": "Django-locallibrary",
"token_count": 860
} | 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.