From ef1b4bccd73f497cd866c75293ae6c24fdd6faa9 Mon Sep 17 00:00:00 2001 From: Dan Ryan Date: Wed, 11 Apr 2018 20:29:17 -0400 Subject: [PATCH 01/17] Implement requirements refactor Signed-off-by: Dan Ryan --- pipenv/core.py | 5 +- pipenv/requirements.py | 759 +++++++++++++++++++++++++++++++++++++++++ pipenv/utils.py | 179 +--------- 3 files changed, 774 insertions(+), 169 deletions(-) create mode 100644 pipenv/requirements.py diff --git a/pipenv/core.py b/pipenv/core.py index 60d9cdce5d..5c5783f8ab 100644 --- a/pipenv/core.py +++ b/pipenv/core.py @@ -25,6 +25,7 @@ from .cmdparse import ScriptEmptyError from .project import Project, SourceNotFound +from .requirements import PipenvRequirement from .utils import ( convert_deps_from_pip, convert_deps_to_pip, @@ -1414,7 +1415,7 @@ def pip_install( f.write(package_name) # Install dependencies when a package is a VCS dependency. try: - req = get_requirement( + req = PipenvRequirement.from_line( package_name.split('--hash')[0].split('--trusted-host')[0] ).vcs except (ParseException, ValueError) as e: @@ -2566,7 +2567,7 @@ def do_clean( ) installed_package_names = [] for installed in installed_packages: - r = get_requirement(installed) + r = PipenvRequirement.from_line(installed).requirement # Ignore editable installations. if not r.editable: installed_package_names.append(r.name.lower()) diff --git a/pipenv/requirements.py b/pipenv/requirements.py new file mode 100644 index 0000000000..3120b1ddc7 --- /dev/null +++ b/pipenv/requirements.py @@ -0,0 +1,759 @@ +# -*- coding=utf-8 -*- +from __future__ import absolute_import +import abc +import sys +from pipenv import PIPENV_VENDOR, PIPENV_PATCHED + +sys.path.insert(0, PIPENV_VENDOR) +sys.path.insert(0, PIPENV_PATCHED) +import hashlib +import os +import requirements +import six +from attr import attrs, attrib, Factory, validators +from pip9.index import Link +from pip9.download import path_to_url +from pip9.req.req_install import _strip_extras +from pip9._vendor.distlib.markers import Evaluator +from pip9._vendor.packaging.markers import Marker, InvalidMarker +from pip9._vendor.packaging.specifiers import SpecifierSet, InvalidSpecifier +from pipenv.utils import SCHEME_LIST, VCS_LIST, is_installable_file, is_vcs, multi_split, get_converted_relative_path, is_star, is_valid_url +from first import first + +try: + from pathlib import Path +except ImportError: + from pathlib2 import Path +HASH_STRING = ' --hash={0}' + + +def _strip_ssh_from_git_uri(uri): + """Return git+ssh:// formatted URI to git+git@ format""" + if isinstance(uri, six.string_types): + uri = uri.replace('git+ssh://', 'git+') + return uri + + +def _clean_git_uri(uri): + """Cleans VCS uris from pip9 format""" + if isinstance(uri, six.string_types): + # Add scheme for parsing purposes, this is also what pip does + if uri.startswith('git+') and '://' not in uri: + uri = uri.replace('git+', 'git+ssh://') + return uri + + +def _split_markers(line): + """Split markers from a dependency""" + if not any(line.startswith(uri_prefix) for uri_prefix in SCHEME_LIST): + marker_sep = ';' + else: + marker_sep = '; ' + markers = None + if marker_sep in line: + line, markers = line.split(marker_sep, 1) + markers = markers.strip() if markers else None + return line, markers + + +def _split_vcs_method(uri): + """Split a vcs+uri formatted uri into (vcs, uri)""" + vcs_start = '{0}+' + vcs = first( + [vcs for vcs in VCS_LIST if uri.startswith(vcs_start.format(vcs))] + ) + if vcs: + vcs, uri = uri.split('+', 1) + return vcs, uri + + +def _validate_vcs(instance, attr_, value): + if value not in VCS_LIST: + raise ValueError('Invalid vcs {0!r}'.format(value)) + + +def _validate_path(instance, attr_, value): + if not os.path.exists(value): + raise ValueError('Invalid path {0!r}', format(value)) + + +def _validate_markers(instance, attr_, value): + try: + Marker('{0}{1}'.format(attr_.name, value)) + except InvalidMarker: + raise ValueError('Invalid Marker {0}{1}'.format(attr_, value)) + + +def _validate_specifiers(instance, attr_, value): + if value == '': + return True + try: + SpecifierSet(value) + except InvalidMarker: + raise ValueError('Invalid Specifiers {0}'.format(value)) + + +def _filter_none(k, v): + if v: + return True + return False + + +def _optional_instance_of(cls): + return validators.optional(validators.instance_of(cls)) + + +@attrs +class Source(object): + # : URL to PyPI instance + url = attrib(default='') + # : If False, skip SSL checks + verify_ssl = attrib( + default=True, + validator=validators.optional(validators.instance_of(bool)), + ) + # : human name to refer to this source (can be referenced in packages or dev-packages) + name = attrib(default='') + + +@six.add_metaclass(abc.ABCMeta) +class BaseRequirement(): + + @classmethod + def from_line(cls, line): + """Returns a requirement from a requirements.txt or pip-compatible line""" + raise NotImplementedError + + @abc.abstractmethod + def line_part(self): + """Returns the current requirement as a pip-compatible line""" + + @classmethod + def from_pipfile(cls, name, pipfile): + """Returns a requirement from a pipfile entry""" + raise NotImplementedError + + @abc.abstractmethod + def pipfile_part(self): + """Returns the current requirement as a pipfile entry""" + + @classmethod + def attr_fields(cls): + return [field.name for field in attr.fields(cls)] + + +@attrs +class PipenvMarkers(BaseRequirement): + """System-level requirements - see PEP508 for more detail""" + os_name = attrib( + default=None, validator=validators.optional(_validate_markers) + ) + sys_platform = attrib( + default=None, validator=validators.optional(_validate_markers) + ) + platform_machine = attrib( + default=None, validator=validators.optional(_validate_markers) + ) + platform_python_implementation = attrib( + default=None, validator=validators.optional(_validate_markers) + ) + platform_release = attrib( + default=None, validator=validators.optional(_validate_markers) + ) + platform_system = attrib( + default=None, validator=validators.optional(_validate_markers) + ) + platform_version = attrib( + default=None, validator=validators.optional(_validate_markers) + ) + python_version = attrib( + default=None, validator=validators.optional(_validate_markers) + ) + python_full_version = attrib( + default=None, validator=validators.optional(_validate_markers) + ) + implementation_name = attrib( + default=None, validator=validators.optional(_validate_markers) + ) + implementation_version = attrib( + default=None, validator=validators.optional(_validate_markers) + ) + + @property + def line_part(self): + return ' and '.join( + ['{0} {1}'.format(k, v) for k, v in attr.asdict(self, filter=_filter_none).items()] + ) + + @property + def pipfile_part(self): + return {'markers': self.as_line} + + @classmethod + def make_marker(cls, marker_string): + marker = Marker(marker_string) + marker_dict = {} + for m in marker._markers: + if isinstance(m, six.string_types): + continue + var, op, val = m + if var.value in cls.attr_fields(): + marker_dict[var.value] = '{0} "{1}"'.format(op, val) + return marker_dict + + @classmethod + def from_line(cls, line): + if ';' in line: + line = line.rsplit(';', 1)[1].strip() + marker_dict = cls.make_marker(line) + return cls(**marker_dict) + + @classmethod + def from_pipfile(cls, name, pipfile): + found_keys = [k for k in pipfile.keys() if k in cls.attr_fields()] + marker_strings = ['{0} {1}'.format(k, pipfile[k]) for k in found_keys] + if pipfile.get('markers'): + marker_strings.append(pipfile.get('markers')) + markers = {} + for marker in marker_strings: + marker_dict = cls.make_marker(marker) + if marker_dict: + markers.update(marker_dict) + return cls(**markers) + + +@attrs +class NamedRequirement(BaseRequirement): + name = attrib() + version = attrib(validator=validators.optional(_validate_specifiers)) + req = attrib() + + @req.default + def get_requirement(self): + return first(requirements.parse('{0}{1}'.format(self.name, self.version))) + + @classmethod + def from_line(cls, line): + req = first(requirements.parse(line)) + specifiers = None + if req.specifier: + specifiers = _specs_to_string(req.specs) + return cls(name=req.name, version=specifiers, req=req) + + @classmethod + def from_pipfile(cls, name, pipfile): + creation_args = {} + if hasattr(pipfile, 'keys'): + creation_args = {k: v for k, v in pipfile.items() if k in cls.attr_fields()} + creation_args['name'] = name + version = _get_version(pipfile) + creation_args['version'] = version + creation_args['req'] = first(requirements.parse('{0}{1}'.format(name, version))) + return cls(**creation_args) + + @property + def line_part(self): + return '{self.name}'.format(self=self) + + @property + def pipfile_part(self): + pipfile_dict = attr.asdict(self, filter=_filter_none) + if 'version' not in pipfile_dict: + pipfile_dict['version'] = '*' + name = pipfile_dict.pop('name') + return {name: pipfile_dict} + + +@attrs +class FileRequirement(BaseRequirement): + """File requirements for tar.gz installable files or wheels or setup.py + containing directories.""" + path = attrib(default=None, validator=validators.optional(_validate_path)) + # : path to hit - without any of the VCS prefixes (like git+ / http+ / etc) + uri = attrib() + name = attrib() + link = attrib() + editable = attrib(default=None) + req = attrib() + _has_hashed_name = False + _uri_scheme = None + + @uri.default + def get_uri(self): + if self.path and not self.uri: + self._uri_scheme = 'path' + self.uri = path_to_url(os.path.abspath(self.path)) + + @name.default + def get_name(self): + loc = self.path or self.uri + if loc: + self._uri_scheme = 'path' if self.path else 'uri' + hashed_loc = hashlib.sha256(loc.encode('utf-8')).hexdigest() + hash_fragment = hashed_loc[-7:] + self._has_hashed_name = True + return hash_fragment + + @link.default + def get_link(self): + target = '{0}#egg={1}'.format(self.uri, self.name) + return Link(target) + + @req.default + def get_requirement(self): + base = '{0}'.format(self.link) + req = first(requirements.parse(base)) + if self.editable: + req.editable = True + if self.link and self.link.scheme.startswith('file'): + if self.path: + req.path = self.path + req.local_file = True + self._uri_scheme = 'file' + req.uri = None + req.link = self.link + return req + + @property + def is_remote_artifact(self): + return any(self.link.scheme.startswith(scheme) for scheme in ('http', 'https', 'ftp', 'ftps', 'uri')) and not self.req.local_file and (self.link.is_artifact or self.link.is_wheel) and not self.req.editable + + @classmethod + def from_line(cls, line): + link = None + path = None + editable = line.startswith('-e ') + line = line.split(' ', 1)[1] if editable else line + if not any([is_installable_file(line), is_valid_url(line)]): + raise ValueError( + 'Supplied requirement is not installable: {0!r}'.format(line) + ) + + if is_valid_url(line): + link = Link(line) + else: + _path = Path(line) + link = Link(_path.absolute().as_uri()) + if _path.is_absolute() or _path.as_posix() == '.': + path = _path.as_posix() + else: + path = get_converted_relative_path(line) + arg_dict = { + 'path': path, + 'uri': link.url_without_fragment, + 'link': link, + 'editable': editable + } + if link.egg_fragment: + arg_dict['name'] = link.egg_fragment + created = cls(**arg_dict) + return created + + @classmethod + def from_pipfile(cls, name, pipfile): + uri_key = first((k for k in ['uri', 'file'] if k in pipfile)) + uri = pipfile.get(uri_key, pipfile.get('path')) + if not uri_key: + abs_path = os.path.abspath(uri) + uri = path_to_url(abs_path) if os.path.exists(abs_path) else None + link = Link(uri) if uri else None + arg_dict = { + 'name': name, + 'path': pipfile.get('path'), + 'uri': link.url_without_fragment, + 'editable': pipfile.get('editable'), + 'link': link + } + return cls(**arg_dict) + + @req.default + def get_requirement(self): + base = '{0}'.format(self.link) + req = first(requirements.parse(base)) + if self.editable: + req.editable = True + if self.link and self.link.scheme.startswith('file') and self.path: + req.path = self.path + req.local_file = True + req.uri = None + req.link = self.link + return req + + @property + def line_part(self): + seed = self.path or self.link.url or self.uri + editable = '-e ' if self.editable else '' + return '{0}{1}'.format(editable, seed) + + @property + def pipfile_part(self): + pipfile_dict = {k: v for k, v in attr.asdict(self, filter=_filter_none).items()} + name = pipfile_dict.pop('name') + req = self.req + if self._has_hashed_name and self.is_remote_artifact: + dict_key = 'file' + # Look for uri first because file is a uri format and this is designed + # to make sure we add file keys to the pipfile as a replacement of uri + target_keys = [k for k in pipfile_dict.keys() if k in ['uri', 'path']] + pipfile_dict[dict_key] = pipfile_dict.pop(first(target_keys)) + if len(target_keys) > 1: + _ = pipfile_dict.pop(target_keys[1]) + else: + collisions = [key for key in ['path', 'uri', 'file'] if key in pipfile_dict] + if len(collisions) > 1: + for k in collisions[1:]: + _ = pipfile_dict.pop(k) + return {name: pipfile_dict} + + +@attrs +class VCSRequirement(FileRequirement): + editable = attrib(default=None) + uri = attrib(default=None) + path = attrib(default=None, validator=validators.optional(_validate_path)) + vcs = attrib(validator=validators.optional(_validate_vcs), default=None) + # : vcs reference name (branch / commit / tag) + ref = attrib(default=None) + #: path to hit - without any of the VCS prefixes (like git+ / http+ / etc) + uri = attrib(default=None) + subdirectory = attrib(default=None) + name = attrib() + link = attrib() + req = attrib() + _INCLUDE_FIELDS = ('editable', 'uri', 'path', 'vcs', 'ref', 'subdirectory', 'name', 'link', 'req') + + @link.default + def get_link(self): + return build_vcs_link(self.vcs, _clean_git_uri(self.uri), name=self.name, ref=self.ref, subdirectory=self.subdirectory) + + @name.default + def get_name(self): + return self.link.egg_fragment or self.req.name if self.req else '' + + @property + def vcs_uri(self): + uri = self.uri + if not any(uri.startswith('{0}+'.format(vcs)) for vcs in VCS_LIST): + uri = '{0}+{1}'.format(self.vcs, uri) + return uri + + @req.default + def get_requirement(self): + prefix = '-e ' if self.editable else '' + line = '{0}{1}'.format(prefix, self.link.url) + req = first(requirements.parse(line)) + if self.path and self.link and self.link.scheme.startswith('file'): + req.local_file = True + req.path = self.path + if self.editable: + req.editable = True + req.link = self.link + if self.uri != self.link.url and 'git+ssh://' in self.link.url and 'git+git@' in self.uri: + req.line = _strip_ssh_from_git_uri(req.line) + req.uri = _strip_ssh_from_git_uri(req.uri) + if not req.name: + raise ValueError( + 'pipenv requires an #egg fragment for version controlled ' + 'dependencies. Please install remote dependency ' + 'in the form {0}#egg=.'.format(req.uri) + ) + if self.vcs and not req.vcs: + req.vcs = self.vcs + if self.ref and not req.revision: + req.revision = self.ref + return req + + @classmethod + def from_pipfile(cls, name, pipfile): + creation_args = {} + pipfile_keys = [ + k + for k in ( + 'ref', 'vcs', 'subdirectory', 'path', 'editable', 'file', 'uri' + ) + + VCS_LIST + if k in pipfile + ] + for key in pipfile_keys: + if key in VCS_LIST: + creation_args['vcs'] = key + composed_uri = _clean_git_uri('{0}+{1}'.format(key, pipfile.get(key))).lstrip('{0}+'.format(key)) + is_url = is_valid_url(pipfile.get(key)) or is_valid_url(composed_uri) + target_key = 'uri' if is_url else 'path' + creation_args[target_key] = pipfile.get(key) + else: + creation_args[key] = pipfile.get(key) + creation_args['name'] = name + return cls(**creation_args) + + @classmethod + def from_line(cls, line, editable=None): + if line.startswith('-e '): + editable = True + line = line.split(' ', 1)[1] + vcs_line = _clean_git_uri(line) + vcs_method, vcs_location = _split_vcs_method(vcs_line) + if not is_valid_url(vcs_location) and os.path.exists(vcs_location): + path = get_converted_relative_path(vcs_location) + vcs_location = path_to_url(os.path.abspath(vcs_location)) + link = Link(vcs_line) + name = link.egg_fragment + uri = link.url_without_fragment + if 'git+git@' in line: + uri = _strip_ssh_from_git_uri(uri) + subdirectory = link.subdirectory_fragment + ref = None + if '@' in link.show_url: + uri, ref = uri.rsplit('@', 1) + return cls( + name=name, + ref=ref, + vcs=vcs_method, + subdirectory=subdirectory, + link=link, + path=path, + editable=editable, + uri=uri, + ) + + @property + def line_part(self): + """requirements.txt compatible line part sans-extras""" + if self.req: + return self.req.line + base = '{0}'.format(self.link) + if self.editable: + base = '-e {0}'.format(base) + return base + + @staticmethod + def _choose_vcs_source(pipfile): + src_keys = [k for k in pipfile.keys() if k in ['path', 'uri', 'file']] + if src_keys: + chosen_key = first(src_keys) + vcs_type = pipfile.pop('vcs') + _, pipfile_url = _split_vcs_method(pipfile.get(chosen_key)) + pipfile[vcs_type] = pipfile_url + for removed in src_keys: + _ = pipfile.pop(removed) + return pipfile + + @property + def pipfile_part(self): + pipfile_dict = attr.asdict(self, filter=_filter_none).copy() + if 'vcs' in pipfile_dict: + pipfile_dict = self._choose_vcs_source(pipfile_dict) + name = pipfile_dict.pop('name') + return {name: pipfile_dict} + + +@attrs +class PipenvRequirement(object): + name = attrib() + vcs = attrib(default=None, validator=validators.optional(_validate_vcs)) + req = attrib( + default=None, validator=_optional_instance_of(BaseRequirement) + ) + markers = attrib(default=None) + specifiers = attrib( + validator=validators.optional(_validate_specifiers) + ) + index = attrib(default=None) + editable = attrib(default=None) + hashes = attrib(default=Factory(list), converter=list) + extras = attrib(default=Factory(list)) + _INCLUDE_FIELDS = ('name', 'markers', 'index', 'editable', 'hashes', 'extras') + + @name.default + def get_name(self): + return self.req.name + + @property + def requirement(self): + return self.req.req + + @property + def hashes_as_pip(self): + if self.hashes: + return ''.join([HASH_STRING.format(h) for h in self.hashes]) + + return '' + + @property + def markers_as_pip(self): + if self.markers: + return '; {0}'.format(self.markers) + + return '' + + @property + def extras_as_pip(self): + if self.extras: + return '[{0}]'.format(','.join(self.extras)) + + return '' + + @specifiers.default + def get_specifiers(self): + if self.req and self.req.req.specifier: + return _specs_to_string(self.req.req.specs) + return + + @classmethod + def from_line(cls, line): + hashes = None + if '--hash=' in line: + hashes = line.split(' --hash=') + line, hashes = hashes[0], hashes[1:] + editable = line.startswith('-e ') + stripped_line = line.split(' ', 1)[1] if editable else line + line, markers = _split_markers(line) + line, extras = _strip_extras(line) + vcs = None + # Installable local files and installable non-vcs urls are handled + # as files, generally speaking + if is_installable_file(stripped_line) or (is_valid_url(stripped_line) and not is_vcs(stripped_line)): + r = FileRequirement.from_line(line) + elif is_vcs(stripped_line): + r = VCSRequirement.from_line(line) + else: + name = multi_split(stripped_line, '!=<>~')[0] + if not extras: + name, extras = _strip_extras(name) + r = NamedRequirement.from_line(stripped_line) + if extras: + extras = first( + requirements.parse('fakepkg{0}'.format(_extras_to_string(extras))) + ).extras + r.req.extras = extras + if markers: + r.req.markers = markers + args = { + 'name': r.name, + 'vcs': vcs, + 'req': r, + 'markers': markers, + 'editable': editable, + } + if extras: + args['extras'] = extras + if hashes: + args['hashes'] = hashes + return cls(**args) + + @classmethod + def from_pipfile(cls, name, indexes, pipfile): + _pipfile = {} + if hasattr(pipfile, 'keys'): + _pipfile = dict(pipfile).copy() + _pipfile['version'] = _get_version(pipfile) + vcs = first([vcs for vcs in VCS_LIST if vcs in _pipfile]) + if vcs: + _pipfile['vcs'] = vcs + r = VCSRequirement.from_pipfile(name, pipfile) + elif any(key in _pipfile for key in ['path', 'file', 'uri']): + r = FileRequirement.from_pipfile(name, pipfile) + else: + r = NamedRequirement.from_pipfile(name, pipfile) + args = { + 'name': r.name, + 'vcs': vcs, + 'req': r, + 'markers': PipenvMarkers.from_pipfile(name, _pipfile).line_part, + 'extras': _pipfile.get('extras'), + 'editable': _pipfile.get('editable', False), + 'index': _pipfile.get('index') + } + if any(key in _pipfile for key in ['hash', 'hashes']): + args['hashes'] = _pipfile.get('hashes', [pipfile.get('hash')]) + return cls(**args) + + def as_line(self, include_index=False, project=None): + line = '{0}{1}{2}{3}{4}'.format( + self.req.line_part, + self.extras_as_pip, + self.specifiers if self.specifiers else '', + self.markers_as_pip, + self.hashes_as_pip, + ) + if include_index and not (self.requirement.local_file or self.vcs): + from .utils import prepare_pip_source_args + if self.index: + pip_src_args = [project.get_source(self.index)] + else: + pip_src_args = project.sources + index_string = ' '.join(prepare_pip_source_args(pip_src_args)) + line = '{0} {1}'.format(line, index_string) + return line + + def as_pipfile(self, include_index=False): + good_keys = ('hashes', 'extras', 'markers', 'editable', 'version', 'index') + VCS_LIST + req_dict = {k: v for k, v in attr.asdict(self, recurse=False, filter=_filter_none).items() if k in good_keys} + name = self.name + base_dict = {k: v for k, v in self.req.pipfile_part[name].items() if k not in ['req', 'link']} + base_dict.update(req_dict) + conflicting_keys = ('file', 'path', 'uri') + if 'file' in base_dict and any(k in base_dict for k in conflicting_keys[1:]): + conflicts = [k for k in (conflicting_keys[1:],) if k in base_dict] + for k in conflicts: + _ = base_dict.pop(k) + if 'hashes' in base_dict and len(base_dict['hashes']) == 1: + base_dict['hash'] = base_dict.pop('hashes')[0] + if len(base_dict.keys()) == 1 and 'version' in base_dict: + base_dict = base_dict.get('version') + return {name: base_dict} + + +def _extras_to_string(extras): + """Turn a list of extras into a string""" + if isinstance(extras, six.string_types): + if extras.startswith('['): + return extras + + else: + extras = [extras] + return '[{0}]'.format(','.join(extras)) + + +def _specs_to_string(specs): + """Turn a list of specifier tuples into a string""" + if specs: + if isinstance(specs, six.string_types): + return specs + return ','.join([''.join(spec) for spec in specs]) + return '' + + +def build_vcs_link( + vcs, uri, name=None, ref=None, subdirectory=None, extras=None +): + if extras is None: + extras = [] + vcs_start = '{0}+'.format(vcs) + if not uri.startswith(vcs_start): + uri = '{0}{1}'.format(vcs_start, uri) + uri = _clean_git_uri(uri) + if ref: + uri = '{0}@{1}'.format(uri, ref) + if name: + uri = '{0}#egg={1}'.format(uri, name) + if extras: + extras = _extras_to_string(extras) + uri = '{0}{1}'.format(uri, extras) + if subdirectory: + uri = '{0}&subdirectory={1}'.format(uri, subdirectory) + return Link(uri) + + +def _get_version(pipfile_entry): + if str(pipfile_entry) == '{}' or is_star(pipfile_entry): + return '' + + elif hasattr(pipfile_entry, 'keys') and 'version' in pipfile_entry: + if is_star(pipfile_entry.get('version')): + return '' + return pipfile_entry.get('version', '') + + if isinstance(pipfile_entry, six.string_types): + return pipfile_entry + return '' diff --git a/pipenv/utils.py b/pipenv/utils.py index e7619fa421..614b333c84 100644 --- a/pipenv/utils.py +++ b/pipenv/utils.py @@ -55,6 +55,7 @@ def detach(self): from collections.abc import Mapping except ImportError: from collections import Mapping +from .requirements import PipenvRequirement if six.PY2: @@ -583,84 +584,8 @@ def multi_split(s, split): def convert_deps_from_pip(dep): """"Converts a pip-formatted dependency to a Pipfile-formatted one.""" - try: - from collections.abc import Mapping - except ImportError: - from collections import Mapping - dependency = {} - req = get_requirement(dep) - extras = {'extras': req.extras} - # File installs. - if (req.uri or req.path or is_installable_file(req.name)) and not req.vcs: - # Assign a package name to the file, last 7 of it's sha256 hex digest. - - if not req.uri and not req.path: - req.path = os.path.abspath(req.name) - - hashable_path = req.uri if req.uri else req.path - if not req.name: - req.name = hashlib.sha256(hashable_path.encode('utf-8')).hexdigest() - req.name = req.name[len(req.name) - 7:] - # {path: uri} TOML (spec 4 I guess...) - if req.uri: - dependency[req.name] = {'file': hashable_path} - else: - dependency[req.name] = {'path': hashable_path} - if req.extras: - dependency[req.name].update(extras) - # Add --editable if applicable - if req.editable: - dependency[req.name].update({'editable': True}) - # VCS Installs. - elif req.vcs: - if req.name is None: - raise ValueError( - 'pipenv requires an #egg fragment for version controlled ' - 'dependencies. Please install remote dependency ' - 'in the form {0}#egg=.'.format(req.uri) - ) - - # Crop off the git+, etc part. - if req.uri.startswith('{0}+'.format(req.vcs)): - req.uri = req.uri[len(req.vcs) + 1:] - dependency.setdefault(req.name, {}).update({req.vcs: req.uri}) - # Add --editable, if it's there. - if req.editable: - dependency[req.name].update({'editable': True}) - # Add subdirectory, if it's there - if req.subdirectory: - dependency[req.name].update({'subdirectory': req.subdirectory}) - # Add the specifier, if it was provided. - if req.revision: - dependency[req.name].update({'ref': req.revision}) - # Extras: e.g. #egg=requests[security] - if req.extras: - dependency[req.name].update({'extras': req.extras}) - elif req.extras or req.specs or hasattr(req, 'markers'): - specs = None - # Comparison operators: e.g. Django>1.10 - if req.specs: - r = multi_split(dep, '!=<>~') - specs = dep[len(r[0]):] - dependency[req.name] = specs - # Extras: e.g. requests[socks] - if req.extras: - dependency[req.name] = extras - if specs: - dependency[req.name].update({'version': specs}) - if hasattr(req, 'markers'): - if isinstance(dependency[req.name], six.string_types): - dependency[req.name] = {'version': specs} - dependency[req.name].update({'markers': req.markers}) - # Bare dependencies: e.g. requests - else: - dependency[dep] = '*' - # Cleanup when there's multiple values, e.g. -e. - if len(dependency) > 1: - for key in dependency.copy(): - if not hasattr(dependency[key], 'keys'): - del dependency[key] - return dependency + req = PipenvRequirement.from_line(dep) + return req.as_pipfile() def is_star(val): @@ -677,96 +602,16 @@ def convert_deps_to_pip(deps, project=None, r=True, include_index=False): """"Converts a Pipfile-formatted dependency to a pip-formatted one.""" from ._compat import NamedTemporaryFile dependencies = [] - for dep in deps.keys(): - # Default (e.g. '>1.10'). - extra = deps[dep] if isinstance(deps[dep], six.string_types) else '' - editable = False - extras = '' - version = '' - index = '' - # Get rid of '*'. - if is_star(deps[dep]) or str(extra) == '{}': - extra = '' - hash = '' - # Support for single hash (spec 1). - if 'hash' in deps[dep]: - hash = ' --hash={0}'.format(deps[dep]['hash']) - # Support for multiple hashes (spec 2). - if 'hashes' in deps[dep]: - hash = '{0} '.format( - ''.join( - [' --hash={0} '.format(h) for h in deps[dep]['hashes']] - ) - ) - # Support for extras (e.g. requests[socks]) - if 'extras' in deps[dep]: - extras = '[{0}]'.format(','.join(deps[dep]['extras'])) - if 'version' in deps[dep]: - if not is_star(deps[dep]['version']): - version = deps[dep]['version'] - # For lockfile format. - if 'markers' in deps[dep]: - specs = '; {0}'.format(deps[dep]['markers']) - else: - # For pipfile format. - specs = [] - for specifier in specifiers: - if specifier in deps[dep]: - if not is_star(deps[dep][specifier]): - specs.append( - '{0} {1}'.format(specifier, deps[dep][specifier]) - ) - if specs: - specs = '; {0}'.format(' and '.join(specs)) - else: - specs = '' - if include_index and not is_file(deps[dep]) and not is_vcs(deps[dep]): - pip_src_args = [] - if 'index' in deps[dep]: - pip_src_args = [project.get_source(deps[dep]['index'])] - for idx in project.sources: - if idx['url'] != pip_src_args[0]['url']: - pip_src_args.append(idx) - else: - pip_src_args = project.sources - pip_args = prepare_pip_source_args(pip_src_args) - index = ' '.join(pip_args) - # Support for version control - maybe_vcs = [vcs for vcs in VCS_LIST if vcs in deps[dep]] - vcs = maybe_vcs[0] if maybe_vcs else None - if not any(key in deps[dep] for key in ['path', 'vcs', 'file']): - extra += extras - if isinstance(deps[dep], Mapping): - editable = bool(deps[dep].get('editable', False)) - # Support for files. - if 'file' in deps[dep]: - dep_file = deps[dep]['file'] - if is_valid_url(dep_file) and dep_file.startswith('http'): - dep_file += '#egg={0}'.format(dep) - extra = '{0}{1}'.format(dep_file, extras).strip() - # Flag the file as editable if it is a local relative path - dep = '-e ' if editable else '' - # Support for paths. - elif 'path' in deps[dep]: - extra = '{1}{0}'.format(extras, deps[dep]['path']).strip() - # Flag the file as editable if it is a local relative path - dep = '-e ' if editable else '' - if vcs: - extra = '{0}+{1}'.format(vcs, deps[dep][vcs]) - # Support for @refs. - if 'ref' in deps[dep]: - extra += '@{0}'.format(deps[dep]['ref']) - extra += '#egg={0}{1}'.format(dep, extras) - # Support for subdirectory - if 'subdirectory' in deps[dep]: - extra += '&subdirectory={0}'.format(deps[dep]['subdirectory']) - # Support for editable. - dep = '-e ' if editable else '' - - s = '{0}{1}{2}{3}{4} {5}'.format( - dep, extra, version, specs, hash, index + for dep_name, dep in deps.items(): + indexes = project.sources if hasattr(project, 'sources') else None + if hasattr(dep, 'keys') and dep.get('index'): + indexes = project.get_source(dep['index']) + new_dep = PipenvRequirement.from_pipfile(dep_name, indexes, dep) + req = new_dep.as_line( + project=project, + include_index=include_index ).strip() - dependencies.append(s) + dependencies.append(req) if not r: return dependencies From cf1220ddeb78227c1b856102b4fab55071b35d3c Mon Sep 17 00:00:00 2001 From: Dan Ryan Date: Wed, 11 Apr 2018 21:15:00 -0400 Subject: [PATCH 02/17] vendor attrs Signed-off-by: Dan Ryan --- pipenv/vendor/attr/__init__.py | 55 ++ pipenv/vendor/attr/_compat.py | 139 +++ pipenv/vendor/attr/_config.py | 23 + pipenv/vendor/attr/_funcs.py | 212 +++++ pipenv/vendor/attr/_make.py | 1511 ++++++++++++++++++++++++++++++ pipenv/vendor/attr/converters.py | 24 + pipenv/vendor/attr/exceptions.py | 48 + pipenv/vendor/attr/filters.py | 52 + pipenv/vendor/attr/validators.py | 166 ++++ 9 files changed, 2230 insertions(+) create mode 100644 pipenv/vendor/attr/__init__.py create mode 100644 pipenv/vendor/attr/_compat.py create mode 100644 pipenv/vendor/attr/_config.py create mode 100644 pipenv/vendor/attr/_funcs.py create mode 100644 pipenv/vendor/attr/_make.py create mode 100644 pipenv/vendor/attr/converters.py create mode 100644 pipenv/vendor/attr/exceptions.py create mode 100644 pipenv/vendor/attr/filters.py create mode 100644 pipenv/vendor/attr/validators.py diff --git a/pipenv/vendor/attr/__init__.py b/pipenv/vendor/attr/__init__.py new file mode 100644 index 0000000000..45a5548a0b --- /dev/null +++ b/pipenv/vendor/attr/__init__.py @@ -0,0 +1,55 @@ +from __future__ import absolute_import, division, print_function + +from functools import partial + +from . import converters, exceptions, filters, validators +from ._config import get_run_validators, set_run_validators +from ._funcs import asdict, assoc, astuple, evolve, has +from ._make import ( + NOTHING, Attribute, Factory, attrib, attrs, fields, make_class, validate +) + + +__version__ = "17.4.0" + +__title__ = "attrs" +__description__ = "Classes Without Boilerplate" +__uri__ = "http://www.attrs.org/" +__doc__ = __description__ + " <" + __uri__ + ">" + +__author__ = "Hynek Schlawack" +__email__ = "hs@ox.cx" + +__license__ = "MIT" +__copyright__ = "Copyright (c) 2015 Hynek Schlawack" + + +s = attributes = attrs +ib = attr = attrib +dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) + +__all__ = [ + "Attribute", + "Factory", + "NOTHING", + "asdict", + "assoc", + "astuple", + "attr", + "attrib", + "attributes", + "attrs", + "converters", + "evolve", + "exceptions", + "fields", + "filters", + "get_run_validators", + "has", + "ib", + "make_class", + "s", + "set_run_validators", + "validate", + "validators", +] diff --git a/pipenv/vendor/attr/_compat.py b/pipenv/vendor/attr/_compat.py new file mode 100644 index 0000000000..8a49341b25 --- /dev/null +++ b/pipenv/vendor/attr/_compat.py @@ -0,0 +1,139 @@ +from __future__ import absolute_import, division, print_function + +import platform +import sys +import types +import warnings + + +PY2 = sys.version_info[0] == 2 +PYPY = platform.python_implementation() == "PyPy" + + +if PY2: + from UserDict import IterableUserDict + + # We 'bundle' isclass instead of using inspect as importing inspect is + # fairly expensive (order of 10-15 ms for a modern machine in 2016) + def isclass(klass): + return isinstance(klass, (type, types.ClassType)) + + # TYPE is used in exceptions, repr(int) is different on Python 2 and 3. + TYPE = "type" + + def iteritems(d): + return d.iteritems() + + # Python 2 is bereft of a read-only dict proxy, so we make one! + class ReadOnlyDict(IterableUserDict): + """ + Best-effort read-only dict wrapper. + """ + + def __setitem__(self, key, val): + # We gently pretend we're a Python 3 mappingproxy. + raise TypeError("'mappingproxy' object does not support item " + "assignment") + + def update(self, _): + # We gently pretend we're a Python 3 mappingproxy. + raise AttributeError("'mappingproxy' object has no attribute " + "'update'") + + def __delitem__(self, _): + # We gently pretend we're a Python 3 mappingproxy. + raise TypeError("'mappingproxy' object does not support item " + "deletion") + + def clear(self): + # We gently pretend we're a Python 3 mappingproxy. + raise AttributeError("'mappingproxy' object has no attribute " + "'clear'") + + def pop(self, key, default=None): + # We gently pretend we're a Python 3 mappingproxy. + raise AttributeError("'mappingproxy' object has no attribute " + "'pop'") + + def popitem(self): + # We gently pretend we're a Python 3 mappingproxy. + raise AttributeError("'mappingproxy' object has no attribute " + "'popitem'") + + def setdefault(self, key, default=None): + # We gently pretend we're a Python 3 mappingproxy. + raise AttributeError("'mappingproxy' object has no attribute " + "'setdefault'") + + def __repr__(self): + # Override to be identical to the Python 3 version. + return "mappingproxy(" + repr(self.data) + ")" + + def metadata_proxy(d): + res = ReadOnlyDict() + res.data.update(d) # We blocked update, so we have to do it like this. + return res + +else: + def isclass(klass): + return isinstance(klass, type) + + TYPE = "class" + + def iteritems(d): + return d.items() + + def metadata_proxy(d): + return types.MappingProxyType(dict(d)) + + +def import_ctypes(): # pragma: nocover + """ + Moved into a function for testability. + """ + try: + import ctypes + return ctypes + except ImportError: + return None + + +if not PY2: + def just_warn(*args, **kw): + """ + We only warn on Python 3 because we are not aware of any concrete + consequences of not setting the cell on Python 2. + """ + warnings.warn( + "Missing ctypes. Some features like bare super() or accessing " + "__class__ will not work with slots classes.", + RuntimeWarning, + stacklevel=2, + ) +else: + def just_warn(*args, **kw): # pragma: nocover + """ + We only warn on Python 3 because we are not aware of any concrete + consequences of not setting the cell on Python 2. + """ + + +def make_set_closure_cell(): + """ + Moved into a function for testability. + """ + if PYPY: # pragma: no cover + def set_closure_cell(cell, value): + cell.__setstate__((value,)) + else: + ctypes = import_ctypes() + if ctypes is not None: + set_closure_cell = ctypes.pythonapi.PyCell_Set + set_closure_cell.argtypes = (ctypes.py_object, ctypes.py_object) + set_closure_cell.restype = ctypes.c_int + else: + set_closure_cell = just_warn + return set_closure_cell + + +set_closure_cell = make_set_closure_cell() diff --git a/pipenv/vendor/attr/_config.py b/pipenv/vendor/attr/_config.py new file mode 100644 index 0000000000..8ec920962d --- /dev/null +++ b/pipenv/vendor/attr/_config.py @@ -0,0 +1,23 @@ +from __future__ import absolute_import, division, print_function + + +__all__ = ["set_run_validators", "get_run_validators"] + +_run_validators = True + + +def set_run_validators(run): + """ + Set whether or not validators are run. By default, they are run. + """ + if not isinstance(run, bool): + raise TypeError("'run' must be bool.") + global _run_validators + _run_validators = run + + +def get_run_validators(): + """ + Return whether or not validators are run. + """ + return _run_validators diff --git a/pipenv/vendor/attr/_funcs.py b/pipenv/vendor/attr/_funcs.py new file mode 100644 index 0000000000..798043af3f --- /dev/null +++ b/pipenv/vendor/attr/_funcs.py @@ -0,0 +1,212 @@ +from __future__ import absolute_import, division, print_function + +import copy + +from ._compat import iteritems +from ._make import NOTHING, _obj_setattr, fields +from .exceptions import AttrsAttributeNotFoundError + + +def asdict(inst, recurse=True, filter=None, dict_factory=dict, + retain_collection_types=False): + """ + Return the ``attrs`` attribute values of *inst* as a dict. + + Optionally recurse into other ``attrs``-decorated classes. + + :param inst: Instance of an ``attrs``-decorated class. + :param bool recurse: Recurse into classes that are also + ``attrs``-decorated. + :param callable filter: A callable whose return code determines whether an + attribute or element is included (``True``) or dropped (``False``). Is + called with the :class:`attr.Attribute` as the first argument and the + value as the second argument. + :param callable dict_factory: A callable to produce dictionaries from. For + example, to produce ordered dictionaries instead of normal Python + dictionaries, pass in ``collections.OrderedDict``. + :param bool retain_collection_types: Do not convert to ``list`` when + encountering an attribute whose type is ``tuple`` or ``set``. Only + meaningful if ``recurse`` is ``True``. + + :rtype: return type of *dict_factory* + + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. versionadded:: 16.0.0 *dict_factory* + .. versionadded:: 16.1.0 *retain_collection_types* + """ + attrs = fields(inst.__class__) + rv = dict_factory() + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + if recurse is True: + if has(v.__class__): + rv[a.name] = asdict(v, recurse=True, filter=filter, + dict_factory=dict_factory) + elif isinstance(v, (tuple, list, set)): + cf = v.__class__ if retain_collection_types is True else list + rv[a.name] = cf([ + asdict(i, recurse=True, filter=filter, + dict_factory=dict_factory) + if has(i.__class__) else i + for i in v + ]) + elif isinstance(v, dict): + df = dict_factory + rv[a.name] = df(( + asdict(kk, dict_factory=df) if has(kk.__class__) else kk, + asdict(vv, dict_factory=df) if has(vv.__class__) else vv) + for kk, vv in iteritems(v)) + else: + rv[a.name] = v + else: + rv[a.name] = v + return rv + + +def astuple(inst, recurse=True, filter=None, tuple_factory=tuple, + retain_collection_types=False): + """ + Return the ``attrs`` attribute values of *inst* as a tuple. + + Optionally recurse into other ``attrs``-decorated classes. + + :param inst: Instance of an ``attrs``-decorated class. + :param bool recurse: Recurse into classes that are also + ``attrs``-decorated. + :param callable filter: A callable whose return code determines whether an + attribute or element is included (``True``) or dropped (``False``). Is + called with the :class:`attr.Attribute` as the first argument and the + value as the second argument. + :param callable tuple_factory: A callable to produce tuples from. For + example, to produce lists instead of tuples. + :param bool retain_collection_types: Do not convert to ``list`` + or ``dict`` when encountering an attribute which type is + ``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is + ``True``. + + :rtype: return type of *tuple_factory* + + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. versionadded:: 16.2.0 + """ + attrs = fields(inst.__class__) + rv = [] + retain = retain_collection_types # Very long. :/ + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + if recurse is True: + if has(v.__class__): + rv.append(astuple(v, recurse=True, filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain)) + elif isinstance(v, (tuple, list, set)): + cf = v.__class__ if retain is True else list + rv.append(cf([ + astuple(j, recurse=True, filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain) + if has(j.__class__) else j + for j in v + ])) + elif isinstance(v, dict): + df = v.__class__ if retain is True else dict + rv.append(df( + ( + astuple( + kk, + tuple_factory=tuple_factory, + retain_collection_types=retain + ) if has(kk.__class__) else kk, + astuple( + vv, + tuple_factory=tuple_factory, + retain_collection_types=retain + ) if has(vv.__class__) else vv + ) + for kk, vv in iteritems(v))) + else: + rv.append(v) + else: + rv.append(v) + return rv if tuple_factory is list else tuple_factory(rv) + + +def has(cls): + """ + Check whether *cls* is a class with ``attrs`` attributes. + + :param type cls: Class to introspect. + :raise TypeError: If *cls* is not a class. + + :rtype: :class:`bool` + """ + return getattr(cls, "__attrs_attrs__", None) is not None + + +def assoc(inst, **changes): + """ + Copy *inst* and apply *changes*. + + :param inst: Instance of a class with ``attrs`` attributes. + :param changes: Keyword changes in the new copy. + + :return: A copy of inst with *changes* incorporated. + + :raise attr.exceptions.AttrsAttributeNotFoundError: If *attr_name* couldn't + be found on *cls*. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. deprecated:: 17.1.0 + Use :func:`evolve` instead. + """ + import warnings + warnings.warn("assoc is deprecated and will be removed after 2018/01.", + DeprecationWarning, stacklevel=2) + new = copy.copy(inst) + attrs = fields(inst.__class__) + for k, v in iteritems(changes): + a = getattr(attrs, k, NOTHING) + if a is NOTHING: + raise AttrsAttributeNotFoundError( + "{k} is not an attrs attribute on {cl}." + .format(k=k, cl=new.__class__) + ) + _obj_setattr(new, k, v) + return new + + +def evolve(inst, **changes): + """ + Create a new instance, based on *inst* with *changes* applied. + + :param inst: Instance of a class with ``attrs`` attributes. + :param changes: Keyword changes in the new copy. + + :return: A copy of inst with *changes* incorporated. + + :raise TypeError: If *attr_name* couldn't be found in the class + ``__init__``. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. versionadded:: 17.1.0 + """ + cls = inst.__class__ + attrs = fields(cls) + for a in attrs: + if not a.init: + continue + attr_name = a.name # To deal with private attributes. + init_name = attr_name if attr_name[0] != "_" else attr_name[1:] + if init_name not in changes: + changes[init_name] = getattr(inst, attr_name) + return cls(**changes) diff --git a/pipenv/vendor/attr/_make.py b/pipenv/vendor/attr/_make.py new file mode 100644 index 0000000000..74042e793f --- /dev/null +++ b/pipenv/vendor/attr/_make.py @@ -0,0 +1,1511 @@ +from __future__ import absolute_import, division, print_function + +import hashlib +import linecache +import sys +import warnings + +from operator import itemgetter + +from . import _config +from ._compat import PY2, isclass, iteritems, metadata_proxy, set_closure_cell +from .exceptions import ( + DefaultAlreadySetError, FrozenInstanceError, NotAnAttrsClassError, + UnannotatedAttributeError +) + + +# This is used at least twice, so cache it here. +_obj_setattr = object.__setattr__ +_init_converter_pat = "__attr_converter_{}" +_init_factory_pat = "__attr_factory_{}" +_tuple_property_pat = " {attr_name} = property(itemgetter({index}))" +_empty_metadata_singleton = metadata_proxy({}) + + +class _Nothing(object): + """ + Sentinel class to indicate the lack of a value when ``None`` is ambiguous. + + All instances of `_Nothing` are equal. + """ + def __copy__(self): + return self + + def __deepcopy__(self, _): + return self + + def __eq__(self, other): + return other.__class__ == _Nothing + + def __ne__(self, other): + return not self == other + + def __repr__(self): + return "NOTHING" + + def __hash__(self): + return 0xdeadbeef + + +NOTHING = _Nothing() +""" +Sentinel to indicate the lack of a value when ``None`` is ambiguous. +""" + + +def attrib(default=NOTHING, validator=None, + repr=True, cmp=True, hash=None, init=True, + convert=None, metadata=None, type=None, converter=None): + """ + Create a new attribute on a class. + + .. warning:: + + Does *not* do anything unless the class is also decorated with + :func:`attr.s`! + + :param default: A value that is used if an ``attrs``-generated ``__init__`` + is used and no value is passed while instantiating or the attribute is + excluded using ``init=False``. + + If the value is an instance of :class:`Factory`, its callable will be + used to construct a new value (useful for mutable data types like lists + or dicts). + + If a default is not set (or set manually to ``attr.NOTHING``), a value + *must* be supplied when instantiating; otherwise a :exc:`TypeError` + will be raised. + + The default can also be set using decorator notation as shown below. + + :type default: Any value. + + :param validator: :func:`callable` that is called by ``attrs``-generated + ``__init__`` methods after the instance has been initialized. They + receive the initialized instance, the :class:`Attribute`, and the + passed value. + + The return value is *not* inspected so the validator has to throw an + exception itself. + + If a ``list`` is passed, its items are treated as validators and must + all pass. + + Validators can be globally disabled and re-enabled using + :func:`get_run_validators`. + + The validator can also be set using decorator notation as shown below. + + :type validator: ``callable`` or a ``list`` of ``callable``\ s. + + :param bool repr: Include this attribute in the generated ``__repr__`` + method. + :param bool cmp: Include this attribute in the generated comparison methods + (``__eq__`` et al). + :param hash: Include this attribute in the generated ``__hash__`` + method. If ``None`` (default), mirror *cmp*'s value. This is the + correct behavior according the Python spec. Setting this value to + anything else than ``None`` is *discouraged*. + :type hash: ``bool`` or ``None`` + :param bool init: Include this attribute in the generated ``__init__`` + method. It is possible to set this to ``False`` and set a default + value. In that case this attributed is unconditionally initialized + with the specified default value or factory. + :param callable converter: :func:`callable` that is called by + ``attrs``-generated ``__init__`` methods to converter attribute's value + to the desired format. It is given the passed-in value, and the + returned value will be used as the new value of the attribute. The + value is converted before being passed to the validator, if any. + :param metadata: An arbitrary mapping, to be used by third-party + components. See :ref:`extending_metadata`. + :param type: The type of the attribute. In Python 3.6 or greater, the + preferred method to specify the type is using a variable annotation + (see `PEP 526 `_). + This argument is provided for backward compatibility. + Regardless of the approach used, the type will be stored on + ``Attribute.type``. + + .. versionadded:: 15.2.0 *convert* + .. versionadded:: 16.3.0 *metadata* + .. versionchanged:: 17.1.0 *validator* can be a ``list`` now. + .. versionchanged:: 17.1.0 + *hash* is ``None`` and therefore mirrors *cmp* by default. + .. versionadded:: 17.3.0 *type* + .. deprecated:: 17.4.0 *convert* + .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated + *convert* to achieve consistency with other noun-based arguments. + """ + if hash is not None and hash is not True and hash is not False: + raise TypeError( + "Invalid value for hash. Must be True, False, or None." + ) + + if convert is not None: + if converter is not None: + raise RuntimeError( + "Can't pass both `convert` and `converter`. " + "Please use `converter` only." + ) + warnings.warn( + "The `convert` argument is deprecated in favor of `converter`. " + "It will be removed after 2019/01.", + DeprecationWarning, stacklevel=2 + ) + converter = convert + + if metadata is None: + metadata = {} + + return _CountingAttr( + default=default, + validator=validator, + repr=repr, + cmp=cmp, + hash=hash, + init=init, + converter=converter, + metadata=metadata, + type=type, + ) + + +def _make_attr_tuple_class(cls_name, attr_names): + """ + Create a tuple subclass to hold `Attribute`s for an `attrs` class. + + The subclass is a bare tuple with properties for names. + + class MyClassAttributes(tuple): + __slots__ = () + x = property(itemgetter(0)) + """ + attr_class_name = "{}Attributes".format(cls_name) + attr_class_template = [ + "class {}(tuple):".format(attr_class_name), + " __slots__ = ()", + ] + if attr_names: + for i, attr_name in enumerate(attr_names): + attr_class_template.append(_tuple_property_pat.format( + index=i, + attr_name=attr_name, + )) + else: + attr_class_template.append(" pass") + globs = {"itemgetter": itemgetter} + eval(compile("\n".join(attr_class_template), "", "exec"), globs) + return globs[attr_class_name] + + +# Tuple class for extracted attributes from a class definition. +# `super_attrs` is a subset of `attrs`. +_Attributes = _make_attr_tuple_class("_Attributes", [ + "attrs", # all attributes to build dunder methods for + "super_attrs", # attributes that have been inherited from super classes +]) + + +def _is_class_var(annot): + """ + Check whether *annot* is a typing.ClassVar. + + The implementation is gross but importing `typing` is slow and there are + discussions to remove it from the stdlib alltogether. + """ + return str(annot).startswith("typing.ClassVar") + + +def _get_annotations(cls): + """ + Get annotations for *cls*. + """ + anns = getattr(cls, "__annotations__", None) + if anns is None: + return {} + + # Verify that the annotations aren't merely inherited. + for super_cls in cls.__mro__[1:]: + if anns is getattr(super_cls, "__annotations__", None): + return {} + + return anns + + +def _transform_attrs(cls, these, auto_attribs): + """ + Transform all `_CountingAttr`s on a class into `Attribute`s. + + If *these* is passed, use that and don't look for them on the class. + + Return an `_Attributes`. + """ + cd = cls.__dict__ + anns = _get_annotations(cls) + + if these is not None: + ca_list = sorted(( + (name, ca) + for name, ca + in iteritems(these) + ), key=lambda e: e[1].counter) + elif auto_attribs is True: + ca_names = { + name + for name, attr + in cd.items() + if isinstance(attr, _CountingAttr) + } + ca_list = [] + annot_names = set() + for attr_name, type in anns.items(): + if _is_class_var(type): + continue + annot_names.add(attr_name) + a = cd.get(attr_name, NOTHING) + if not isinstance(a, _CountingAttr): + if a is NOTHING: + a = attrib() + else: + a = attrib(default=a) + ca_list.append((attr_name, a)) + + unannotated = ca_names - annot_names + if len(unannotated) > 0: + raise UnannotatedAttributeError( + "The following `attr.ib`s lack a type annotation: " + + ", ".join(sorted( + unannotated, + key=lambda n: cd.get(n).counter + )) + "." + ) + else: + ca_list = sorted(( + (name, attr) + for name, attr + in cd.items() + if isinstance(attr, _CountingAttr) + ), key=lambda e: e[1].counter) + + own_attrs = [ + Attribute.from_counting_attr( + name=attr_name, + ca=ca, + type=anns.get(attr_name), + ) + for attr_name, ca + in ca_list + ] + + super_attrs = [] + taken_attr_names = {a.name: a for a in own_attrs} + + # Traverse the MRO and collect attributes. + for super_cls in cls.__mro__[1:-1]: + sub_attrs = getattr(super_cls, "__attrs_attrs__", None) + if sub_attrs is not None: + for a in sub_attrs: + prev_a = taken_attr_names.get(a.name) + # Only add an attribute if it hasn't been defined before. This + # allows for overwriting attribute definitions by subclassing. + if prev_a is None: + super_attrs.append(a) + taken_attr_names[a.name] = a + + attr_names = [a.name for a in super_attrs + own_attrs] + + AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names) + + attrs = AttrsClass( + super_attrs + [ + Attribute.from_counting_attr( + name=attr_name, + ca=ca, + type=anns.get(attr_name) + ) + for attr_name, ca + in ca_list + ] + ) + + had_default = False + for a in attrs: + if had_default is True and a.default is NOTHING and a.init is True: + raise ValueError( + "No mandatory attributes allowed after an attribute with a " + "default value or factory. Attribute in question: {a!r}" + .format(a=a) + ) + elif had_default is False and \ + a.default is not NOTHING and \ + a.init is not False: + had_default = True + + return _Attributes((attrs, super_attrs)) + + +def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + raise FrozenInstanceError() + + +def _frozen_delattrs(self, name): + """ + Attached to frozen classes as __delattr__. + """ + raise FrozenInstanceError() + + +class _ClassBuilder(object): + """ + Iteratively build *one* class. + """ + __slots__ = ( + "_cls", "_cls_dict", "_attrs", "_super_names", "_attr_names", "_slots", + "_frozen", "_has_post_init", + ) + + def __init__(self, cls, these, slots, frozen, auto_attribs): + attrs, super_attrs = _transform_attrs(cls, these, auto_attribs) + + self._cls = cls + self._cls_dict = dict(cls.__dict__) if slots else {} + self._attrs = attrs + self._super_names = set(a.name for a in super_attrs) + self._attr_names = tuple(a.name for a in attrs) + self._slots = slots + self._frozen = frozen or _has_frozen_superclass(cls) + self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False)) + + self._cls_dict["__attrs_attrs__"] = self._attrs + + if frozen: + self._cls_dict["__setattr__"] = _frozen_setattrs + self._cls_dict["__delattr__"] = _frozen_delattrs + + def __repr__(self): + return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__) + + def build_class(self): + """ + Finalize class based on the accumulated configuration. + + Builder cannot be used anymore after calling this method. + """ + if self._slots is True: + return self._create_slots_class() + else: + return self._patch_original_class() + + def _patch_original_class(self): + """ + Apply accumulated methods and return the class. + """ + cls = self._cls + super_names = self._super_names + + # Clean class of attribute definitions (`attr.ib()`s). + for name in self._attr_names: + if name not in super_names and \ + getattr(cls, name, None) is not None: + delattr(cls, name) + + # Attach our dunder methods. + for name, value in self._cls_dict.items(): + setattr(cls, name, value) + + return cls + + def _create_slots_class(self): + """ + Build and return a new class with a `__slots__` attribute. + """ + super_names = self._super_names + cd = { + k: v + for k, v in iteritems(self._cls_dict) + if k not in tuple(self._attr_names) + ("__dict__",) + } + + # We only add the names of attributes that aren't inherited. + # Settings __slots__ to inherited attributes wastes memory. + cd["__slots__"] = tuple( + name + for name in self._attr_names + if name not in super_names + ) + + qualname = getattr(self._cls, "__qualname__", None) + if qualname is not None: + cd["__qualname__"] = qualname + + attr_names = tuple(self._attr_names) + + def slots_getstate(self): + """ + Automatically created by attrs. + """ + return tuple(getattr(self, name) for name in attr_names) + + def slots_setstate(self, state): + """ + Automatically created by attrs. + """ + __bound_setattr = _obj_setattr.__get__(self, Attribute) + for name, value in zip(attr_names, state): + __bound_setattr(name, value) + + # slots and frozen require __getstate__/__setstate__ to work + cd["__getstate__"] = slots_getstate + cd["__setstate__"] = slots_setstate + + # Create new class based on old class and our methods. + cls = type(self._cls)( + self._cls.__name__, + self._cls.__bases__, + cd, + ) + + # The following is a fix for + # https://github.com/python-attrs/attrs/issues/102. On Python 3, + # if a method mentions `__class__` or uses the no-arg super(), the + # compiler will bake a reference to the class in the method itself + # as `method.__closure__`. Since we replace the class with a + # clone, we rewrite these references so it keeps working. + for item in cls.__dict__.values(): + if isinstance(item, (classmethod, staticmethod)): + # Class- and staticmethods hide their functions inside. + # These might need to be rewritten as well. + closure_cells = getattr(item.__func__, "__closure__", None) + else: + closure_cells = getattr(item, "__closure__", None) + + if not closure_cells: # Catch None or the empty list. + continue + for cell in closure_cells: + if cell.cell_contents is self._cls: + set_closure_cell(cell, cls) + + return cls + + def add_repr(self, ns): + self._cls_dict["__repr__"] = self._add_method_dunders( + _make_repr(self._attrs, ns=ns) + ) + return self + + def add_str(self): + repr = self._cls_dict.get("__repr__") + if repr is None: + raise ValueError( + "__str__ can only be generated if a __repr__ exists." + ) + + def __str__(self): + return self.__repr__() + + self._cls_dict["__str__"] = self._add_method_dunders(__str__) + return self + + def make_unhashable(self): + self._cls_dict["__hash__"] = None + return self + + def add_hash(self): + self._cls_dict["__hash__"] = self._add_method_dunders( + _make_hash(self._attrs) + ) + + return self + + def add_init(self): + self._cls_dict["__init__"] = self._add_method_dunders( + _make_init( + self._attrs, + self._has_post_init, + self._frozen, + ) + ) + + return self + + def add_cmp(self): + cd = self._cls_dict + + cd["__eq__"], cd["__ne__"], cd["__lt__"], cd["__le__"], cd["__gt__"], \ + cd["__ge__"] = ( + self._add_method_dunders(meth) + for meth in _make_cmp(self._attrs) + ) + + return self + + def _add_method_dunders(self, method): + """ + Add __module__ and __qualname__ to a *method* if possible. + """ + try: + method.__module__ = self._cls.__module__ + except AttributeError: + pass + + try: + method.__qualname__ = ".".join( + (self._cls.__qualname__, method.__name__,) + ) + except AttributeError: + pass + + return method + + +def attrs(maybe_cls=None, these=None, repr_ns=None, + repr=True, cmp=True, hash=None, init=True, + slots=False, frozen=False, str=False, auto_attribs=False): + r""" + A class decorator that adds `dunder + `_\ -methods according to the + specified attributes using :func:`attr.ib` or the *these* argument. + + :param these: A dictionary of name to :func:`attr.ib` mappings. This is + useful to avoid the definition of your attributes within the class body + because you can't (e.g. if you want to add ``__repr__`` methods to + Django models) or don't want to. + + If *these* is not ``None``, ``attrs`` will *not* search the class body + for attributes. + + :type these: :class:`dict` of :class:`str` to :func:`attr.ib` + + :param str repr_ns: When using nested classes, there's no way in Python 2 + to automatically detect that. Therefore it's possible to set the + namespace explicitly for a more meaningful ``repr`` output. + :param bool repr: Create a ``__repr__`` method with a human readable + representation of ``attrs`` attributes.. + :param bool str: Create a ``__str__`` method that is identical to + ``__repr__``. This is usually not necessary except for + :class:`Exception`\ s. + :param bool cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``, + ``__gt__``, and ``__ge__`` methods that compare the class as if it were + a tuple of its ``attrs`` attributes. But the attributes are *only* + compared, if the type of both classes is *identical*! + :param hash: If ``None`` (default), the ``__hash__`` method is generated + according how *cmp* and *frozen* are set. + + 1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you. + 2. If *cmp* is True and *frozen* is False, ``__hash__`` will be set to + None, marking it unhashable (which it is). + 3. If *cmp* is False, ``__hash__`` will be left untouched meaning the + ``__hash__`` method of the superclass will be used (if superclass is + ``object``, this means it will fall back to id-based hashing.). + + Although not recommended, you can decide for yourself and force + ``attrs`` to create one (e.g. if the class is immutable even though you + didn't freeze it programmatically) by passing ``True`` or not. Both of + these cases are rather special and should be used carefully. + + See the `Python documentation \ + `_ + and the `GitHub issue that led to the default behavior \ + `_ for more details. + :type hash: ``bool`` or ``None`` + :param bool init: Create a ``__init__`` method that initializes the + ``attrs`` attributes. Leading underscores are stripped for the + argument name. If a ``__attrs_post_init__`` method exists on the + class, it will be called after the class is fully initialized. + :param bool slots: Create a slots_-style class that's more + memory-efficient. See :ref:`slots` for further ramifications. + :param bool frozen: Make instances immutable after initialization. If + someone attempts to modify a frozen instance, + :exc:`attr.exceptions.FrozenInstanceError` is raised. + + Please note: + + 1. This is achieved by installing a custom ``__setattr__`` method + on your class so you can't implement an own one. + + 2. True immutability is impossible in Python. + + 3. This *does* have a minor a runtime performance :ref:`impact + ` when initializing new instances. In other words: + ``__init__`` is slightly slower with ``frozen=True``. + + 4. If a class is frozen, you cannot modify ``self`` in + ``__attrs_post_init__`` or a self-written ``__init__``. You can + circumvent that limitation by using + ``object.__setattr__(self, "attribute_name", value)``. + + .. _slots: https://docs.python.org/3/reference/datamodel.html#slots + :param bool auto_attribs: If True, collect `PEP 526`_-annotated attributes + (Python 3.6 and later only) from the class body. + + In this case, you **must** annotate every field. If ``attrs`` + encounters a field that is set to an :func:`attr.ib` but lacks a type + annotation, an :exc:`attr.exceptions.UnannotatedAttributeError` is + raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't + want to set a type. + + If you assign a value to those attributes (e.g. ``x: int = 42``), that + value becomes the default value like if it were passed using + ``attr.ib(default=42)``. Passing an instance of :class:`Factory` also + works as expected. + + Attributes annotated as :data:`typing.ClassVar` are **ignored**. + + .. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/ + + .. versionadded:: 16.0.0 *slots* + .. versionadded:: 16.1.0 *frozen* + .. versionadded:: 16.3.0 *str*, and support for ``__attrs_post_init__``. + .. versionchanged:: + 17.1.0 *hash* supports ``None`` as value which is also the default + now. + .. versionadded:: 17.3.0 *auto_attribs* + """ + def wrap(cls): + if getattr(cls, "__class__", None) is None: + raise TypeError("attrs only works with new-style classes.") + + builder = _ClassBuilder(cls, these, slots, frozen, auto_attribs) + + if repr is True: + builder.add_repr(repr_ns) + if str is True: + builder.add_str() + if cmp is True: + builder.add_cmp() + + if hash is not True and hash is not False and hash is not None: + # Can't use `hash in` because 1 == True for example. + raise TypeError( + "Invalid value for hash. Must be True, False, or None." + ) + elif hash is False or (hash is None and cmp is False): + pass + elif hash is True or (hash is None and cmp is True and frozen is True): + builder.add_hash() + else: + builder.make_unhashable() + + if init is True: + builder.add_init() + + return builder.build_class() + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but ``None`` if used as `@attrs()`. + if maybe_cls is None: + return wrap + else: + return wrap(maybe_cls) + + +_attrs = attrs +""" +Internal alias so we can use it in functions that take an argument called +*attrs*. +""" + + +if PY2: + def _has_frozen_superclass(cls): + """ + Check whether *cls* has a frozen ancestor by looking at its + __setattr__. + """ + return ( + getattr( + cls.__setattr__, "__module__", None + ) == _frozen_setattrs.__module__ and + cls.__setattr__.__name__ == _frozen_setattrs.__name__ + ) +else: + def _has_frozen_superclass(cls): + """ + Check whether *cls* has a frozen ancestor by looking at its + __setattr__. + """ + return cls.__setattr__ == _frozen_setattrs + + +def _attrs_to_tuple(obj, attrs): + """ + Create a tuple of all values of *obj*'s *attrs*. + """ + return tuple(getattr(obj, a.name) for a in attrs) + + +def _make_hash(attrs): + attrs = tuple( + a + for a in attrs + if a.hash is True or (a.hash is None and a.cmp is True) + ) + + # We cache the generated hash methods for the same kinds of attributes. + sha1 = hashlib.sha1() + sha1.update(repr(attrs).encode("utf-8")) + unique_filename = "" % (sha1.hexdigest(),) + type_hash = hash(unique_filename) + lines = [ + "def __hash__(self):", + " return hash((", + " %d," % (type_hash,), + ] + for a in attrs: + lines.append(" self.%s," % (a.name)) + + lines.append(" ))") + + script = "\n".join(lines) + globs = {} + locs = {} + bytecode = compile(script, unique_filename, "exec") + eval(bytecode, globs, locs) + + # In order of debuggers like PDB being able to step through the code, + # we add a fake linecache entry. + linecache.cache[unique_filename] = ( + len(script), + None, + script.splitlines(True), + unique_filename, + ) + + return locs["__hash__"] + + +def _add_hash(cls, attrs): + """ + Add a hash method to *cls*. + """ + cls.__hash__ = _make_hash(attrs) + return cls + + +def __ne__(self, other): + """ + Check equality and either forward a NotImplemented or return the result + negated. + """ + result = self.__eq__(other) + if result is NotImplemented: + return NotImplemented + + return not result + + +def _make_cmp(attrs): + attrs = [a for a in attrs if a.cmp] + + # We cache the generated eq methods for the same kinds of attributes. + sha1 = hashlib.sha1() + sha1.update(repr(attrs).encode("utf-8")) + unique_filename = "" % (sha1.hexdigest(),) + lines = [ + "def __eq__(self, other):", + " if other.__class__ is not self.__class__:", + " return NotImplemented", + ] + # We can't just do a big self.x = other.x and... clause due to + # irregularities like nan == nan is false but (nan,) == (nan,) is true. + if attrs: + lines.append(" return (") + others = [ + " ) == (", + ] + for a in attrs: + lines.append(" self.%s," % (a.name,)) + others.append(" other.%s," % (a.name,)) + + lines += others + [" )"] + else: + lines.append(" return True") + + script = "\n".join(lines) + globs = {} + locs = {} + bytecode = compile(script, unique_filename, "exec") + eval(bytecode, globs, locs) + + # In order of debuggers like PDB being able to step through the code, + # we add a fake linecache entry. + linecache.cache[unique_filename] = ( + len(script), + None, + script.splitlines(True), + unique_filename, + ) + eq = locs["__eq__"] + ne = __ne__ + + def attrs_to_tuple(obj): + """ + Save us some typing. + """ + return _attrs_to_tuple(obj, attrs) + + def __lt__(self, other): + """ + Automatically created by attrs. + """ + if isinstance(other, self.__class__): + return attrs_to_tuple(self) < attrs_to_tuple(other) + else: + return NotImplemented + + def __le__(self, other): + """ + Automatically created by attrs. + """ + if isinstance(other, self.__class__): + return attrs_to_tuple(self) <= attrs_to_tuple(other) + else: + return NotImplemented + + def __gt__(self, other): + """ + Automatically created by attrs. + """ + if isinstance(other, self.__class__): + return attrs_to_tuple(self) > attrs_to_tuple(other) + else: + return NotImplemented + + def __ge__(self, other): + """ + Automatically created by attrs. + """ + if isinstance(other, self.__class__): + return attrs_to_tuple(self) >= attrs_to_tuple(other) + else: + return NotImplemented + + return eq, ne, __lt__, __le__, __gt__, __ge__ + + +def _add_cmp(cls, attrs=None): + """ + Add comparison methods to *cls*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__eq__, cls.__ne__, cls.__lt__, cls.__le__, cls.__gt__, cls.__ge__ = \ + _make_cmp(attrs) + + return cls + + +def _make_repr(attrs, ns): + """ + Make a repr method for *attr_names* adding *ns* to the full name. + """ + attr_names = tuple( + a.name + for a in attrs + if a.repr + ) + + def __repr__(self): + """ + Automatically created by attrs. + """ + real_cls = self.__class__ + if ns is None: + qualname = getattr(real_cls, "__qualname__", None) + if qualname is not None: + class_name = qualname.rsplit(">.", 1)[-1] + else: + class_name = real_cls.__name__ + else: + class_name = ns + "." + real_cls.__name__ + + return "{0}({1})".format( + class_name, + ", ".join( + name + "=" + repr(getattr(self, name, NOTHING)) + for name in attr_names + ) + ) + return __repr__ + + +def _add_repr(cls, ns=None, attrs=None): + """ + Add a repr method to *cls*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__repr__ = _make_repr(attrs, ns) + return cls + + +def _make_init(attrs, post_init, frozen): + attrs = [ + a + for a in attrs + if a.init or a.default is not NOTHING + ] + + # We cache the generated init methods for the same kinds of attributes. + sha1 = hashlib.sha1() + sha1.update(repr(attrs).encode("utf-8")) + unique_filename = "".format( + sha1.hexdigest() + ) + + script, globs = _attrs_to_init_script( + attrs, + frozen, + post_init, + ) + locs = {} + bytecode = compile(script, unique_filename, "exec") + attr_dict = dict((a.name, a) for a in attrs) + globs.update({ + "NOTHING": NOTHING, + "attr_dict": attr_dict, + }) + if frozen is True: + # Save the lookup overhead in __init__ if we need to circumvent + # immutability. + globs["_cached_setattr"] = _obj_setattr + eval(bytecode, globs, locs) + + # In order of debuggers like PDB being able to step through the code, + # we add a fake linecache entry. + linecache.cache[unique_filename] = ( + len(script), + None, + script.splitlines(True), + unique_filename, + ) + + return locs["__init__"] + + +def _add_init(cls, frozen): + """ + Add a __init__ method to *cls*. If *frozen* is True, make it immutable. + """ + cls.__init__ = _make_init( + cls.__attrs_attrs__, + getattr(cls, "__attrs_post_init__", False), + frozen, + ) + return cls + + +def fields(cls): + """ + Returns the tuple of ``attrs`` attributes for a class. + + The tuple also allows accessing the fields by their names (see below for + examples). + + :param type cls: Class to introspect. + + :raise TypeError: If *cls* is not a class. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + :rtype: tuple (with name accessors) of :class:`attr.Attribute` + + .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields + by name. + """ + if not isclass(cls): + raise TypeError("Passed object must be a class.") + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: + raise NotAnAttrsClassError( + "{cls!r} is not an attrs-decorated class.".format(cls=cls) + ) + return attrs + + +def validate(inst): + """ + Validate all attributes on *inst* that have a validator. + + Leaves all exceptions through. + + :param inst: Instance of a class with ``attrs`` attributes. + """ + if _config._run_validators is False: + return + + for a in fields(inst.__class__): + v = a.validator + if v is not None: + v(inst, a, getattr(inst, a.name)) + + +def _attrs_to_init_script(attrs, frozen, post_init): + """ + Return a script of an initializer for *attrs* and a dict of globals. + + The globals are expected by the generated script. + + If *frozen* is True, we cannot set the attributes directly so we use + a cached ``object.__setattr__``. + """ + lines = [] + if frozen is True: + lines.append( + # Circumvent the __setattr__ descriptor to save one lookup per + # assignment. + "_setattr = _cached_setattr.__get__(self, self.__class__)" + ) + + def fmt_setter(attr_name, value_var): + return "_setattr('%(attr_name)s', %(value_var)s)" % { + "attr_name": attr_name, + "value_var": value_var, + } + + def fmt_setter_with_converter(attr_name, value_var): + conv_name = _init_converter_pat.format(attr_name) + return "_setattr('%(attr_name)s', %(conv)s(%(value_var)s))" % { + "attr_name": attr_name, + "value_var": value_var, + "conv": conv_name, + } + else: + def fmt_setter(attr_name, value): + return "self.%(attr_name)s = %(value)s" % { + "attr_name": attr_name, + "value": value, + } + + def fmt_setter_with_converter(attr_name, value_var): + conv_name = _init_converter_pat.format(attr_name) + return "self.%(attr_name)s = %(conv)s(%(value_var)s)" % { + "attr_name": attr_name, + "value_var": value_var, + "conv": conv_name, + } + + args = [] + attrs_to_validate = [] + + # This is a dictionary of names to validator and converter callables. + # Injecting this into __init__ globals lets us avoid lookups. + names_for_globals = {} + + for a in attrs: + if a.validator: + attrs_to_validate.append(a) + attr_name = a.name + arg_name = a.name.lstrip("_") + has_factory = isinstance(a.default, Factory) + if has_factory and a.default.takes_self: + maybe_self = "self" + else: + maybe_self = "" + if a.init is False: + if has_factory: + init_factory_name = _init_factory_pat.format(a.name) + if a.converter is not None: + lines.append(fmt_setter_with_converter( + attr_name, + init_factory_name + "({0})".format(maybe_self))) + conv_name = _init_converter_pat.format(a.name) + names_for_globals[conv_name] = a.converter + else: + lines.append(fmt_setter( + attr_name, + init_factory_name + "({0})".format(maybe_self) + )) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.converter is not None: + lines.append(fmt_setter_with_converter( + attr_name, + "attr_dict['{attr_name}'].default" + .format(attr_name=attr_name) + )) + conv_name = _init_converter_pat.format(a.name) + names_for_globals[conv_name] = a.converter + else: + lines.append(fmt_setter( + attr_name, + "attr_dict['{attr_name}'].default" + .format(attr_name=attr_name) + )) + elif a.default is not NOTHING and not has_factory: + args.append( + "{arg_name}=attr_dict['{attr_name}'].default".format( + arg_name=arg_name, + attr_name=attr_name, + ) + ) + if a.converter is not None: + lines.append(fmt_setter_with_converter(attr_name, arg_name)) + names_for_globals[_init_converter_pat.format(a.name)] = ( + a.converter + ) + else: + lines.append(fmt_setter(attr_name, arg_name)) + elif has_factory: + args.append("{arg_name}=NOTHING".format(arg_name=arg_name)) + lines.append("if {arg_name} is not NOTHING:" + .format(arg_name=arg_name)) + init_factory_name = _init_factory_pat.format(a.name) + if a.converter is not None: + lines.append(" " + fmt_setter_with_converter( + attr_name, arg_name + )) + lines.append("else:") + lines.append(" " + fmt_setter_with_converter( + attr_name, + init_factory_name + "({0})".format(maybe_self) + )) + names_for_globals[_init_converter_pat.format(a.name)] = ( + a.converter + ) + else: + lines.append(" " + fmt_setter(attr_name, arg_name)) + lines.append("else:") + lines.append(" " + fmt_setter( + attr_name, + init_factory_name + "({0})".format(maybe_self) + )) + names_for_globals[init_factory_name] = a.default.factory + else: + args.append(arg_name) + if a.converter is not None: + lines.append(fmt_setter_with_converter(attr_name, arg_name)) + names_for_globals[_init_converter_pat.format(a.name)] = ( + a.converter + ) + else: + lines.append(fmt_setter(attr_name, arg_name)) + + if attrs_to_validate: # we can skip this if there are no validators. + names_for_globals["_config"] = _config + lines.append("if _config._run_validators is True:") + for a in attrs_to_validate: + val_name = "__attr_validator_{}".format(a.name) + attr_name = "__attr_{}".format(a.name) + lines.append(" {}(self, {}, self.{})".format( + val_name, attr_name, a.name)) + names_for_globals[val_name] = a.validator + names_for_globals[attr_name] = a + if post_init: + lines.append("self.__attrs_post_init__()") + + return """\ +def __init__(self, {args}): + {lines} +""".format( + args=", ".join(args), + lines="\n ".join(lines) if lines else "pass", + ), names_for_globals + + +class Attribute(object): + """ + *Read-only* representation of an attribute. + + :attribute name: The name of the attribute. + + Plus *all* arguments of :func:`attr.ib`. + + For the version history of the fields, see :func:`attr.ib`. + """ + __slots__ = ( + "name", "default", "validator", "repr", "cmp", "hash", "init", + "metadata", "type", "converter", + ) + + def __init__(self, name, default, validator, repr, cmp, hash, init, + convert=None, metadata=None, type=None, converter=None): + # Cache this descriptor here to speed things up later. + bound_setattr = _obj_setattr.__get__(self, Attribute) + + # Despite the big red warning, people *do* instantiate `Attribute` + # themselves. + if convert is not None: + if converter is not None: + raise RuntimeError( + "Can't pass both `convert` and `converter`. " + "Please use `converter` only." + ) + warnings.warn( + "The `convert` argument is deprecated in favor of `converter`." + " It will be removed after 2019/01.", + DeprecationWarning, stacklevel=2 + ) + converter = convert + + bound_setattr("name", name) + bound_setattr("default", default) + bound_setattr("validator", validator) + bound_setattr("repr", repr) + bound_setattr("cmp", cmp) + bound_setattr("hash", hash) + bound_setattr("init", init) + bound_setattr("converter", converter) + bound_setattr("metadata", ( + metadata_proxy(metadata) if metadata + else _empty_metadata_singleton + )) + bound_setattr("type", type) + + def __setattr__(self, name, value): + raise FrozenInstanceError() + + @property + def convert(self): + warnings.warn( + "The `convert` attribute is deprecated in favor of `converter`. " + "It will be removed after 2019/01.", + DeprecationWarning, stacklevel=2, + ) + return self.converter + + @classmethod + def from_counting_attr(cls, name, ca, type=None): + # type holds the annotated value. deal with conflicts: + if type is None: + type = ca.type + elif ca.type is not None: + raise ValueError( + "Type annotation and type argument cannot both be present" + ) + inst_dict = { + k: getattr(ca, k) + for k + in Attribute.__slots__ + if k not in ( + "name", "validator", "default", "type", "convert", + ) # exclude methods and deprecated alias + } + return cls( + name=name, validator=ca._validator, default=ca._default, type=type, + **inst_dict + ) + + # Don't use _add_pickle since fields(Attribute) doesn't work + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple(getattr(self, name) if name != "metadata" + else dict(self.metadata) + for name in self.__slots__) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + bound_setattr = _obj_setattr.__get__(self, Attribute) + for name, value in zip(self.__slots__, state): + if name != "metadata": + bound_setattr(name, value) + else: + bound_setattr(name, metadata_proxy(value) if value else + _empty_metadata_singleton) + + +_a = [ + Attribute(name=name, default=NOTHING, validator=None, + repr=True, cmp=True, hash=(name != "metadata"), init=True) + for name in Attribute.__slots__ + if name != "convert" # XXX: remove once `convert` is gone +] + +Attribute = _add_hash( + _add_cmp(_add_repr(Attribute, attrs=_a), attrs=_a), + attrs=[a for a in _a if a.hash] +) + + +class _CountingAttr(object): + """ + Intermediate representation of attributes that uses a counter to preserve + the order in which the attributes have been defined. + + *Internal* data structure of the attrs library. Running into is most + likely the result of a bug like a forgotten `@attr.s` decorator. + """ + __slots__ = ("counter", "_default", "repr", "cmp", "hash", "init", + "metadata", "_validator", "converter", "type") + __attrs_attrs__ = tuple( + Attribute(name=name, default=NOTHING, validator=None, + repr=True, cmp=True, hash=True, init=True) + for name + in ("counter", "_default", "repr", "cmp", "hash", "init",) + ) + ( + Attribute(name="metadata", default=None, validator=None, + repr=True, cmp=True, hash=False, init=True), + ) + cls_counter = 0 + + def __init__(self, default, validator, repr, cmp, hash, init, converter, + metadata, type): + _CountingAttr.cls_counter += 1 + self.counter = _CountingAttr.cls_counter + self._default = default + # If validator is a list/tuple, wrap it using helper validator. + if validator and isinstance(validator, (list, tuple)): + self._validator = and_(*validator) + else: + self._validator = validator + self.repr = repr + self.cmp = cmp + self.hash = hash + self.init = init + self.converter = converter + self.metadata = metadata + self.type = type + + def validator(self, meth): + """ + Decorator that adds *meth* to the list of validators. + + Returns *meth* unchanged. + + .. versionadded:: 17.1.0 + """ + if self._validator is None: + self._validator = meth + else: + self._validator = and_(self._validator, meth) + return meth + + def default(self, meth): + """ + Decorator that allows to set the default for an attribute. + + Returns *meth* unchanged. + + :raises DefaultAlreadySetError: If default has been set before. + + .. versionadded:: 17.1.0 + """ + if self._default is not NOTHING: + raise DefaultAlreadySetError() + + self._default = Factory(meth, takes_self=True) + + return meth + + +_CountingAttr = _add_cmp(_add_repr(_CountingAttr)) + + +@attrs(slots=True, init=False, hash=True) +class Factory(object): + """ + Stores a factory callable. + + If passed as the default value to :func:`attr.ib`, the factory is used to + generate a new value. + + :param callable factory: A callable that takes either none or exactly one + mandatory positional argument depending on *takes_self*. + :param bool takes_self: Pass the partially initialized instance that is + being initialized as a positional argument. + + .. versionadded:: 17.1.0 *takes_self* + """ + factory = attrib() + takes_self = attrib() + + def __init__(self, factory, takes_self=False): + """ + `Factory` is part of the default machinery so if we want a default + value here, we have to implement it ourselves. + """ + self.factory = factory + self.takes_self = takes_self + + +def make_class(name, attrs, bases=(object,), **attributes_arguments): + """ + A quick way to create a new class called *name* with *attrs*. + + :param name: The name for the new class. + :type name: str + + :param attrs: A list of names or a dictionary of mappings of names to + attributes. + :type attrs: :class:`list` or :class:`dict` + + :param tuple bases: Classes that the new class will subclass. + + :param attributes_arguments: Passed unmodified to :func:`attr.s`. + + :return: A new class with *attrs*. + :rtype: type + + .. versionadded:: 17.1.0 *bases* + """ + if isinstance(attrs, dict): + cls_dict = attrs + elif isinstance(attrs, (list, tuple)): + cls_dict = dict((a, attrib()) for a in attrs) + else: + raise TypeError("attrs argument must be a dict or a list.") + + post_init = cls_dict.pop("__attrs_post_init__", None) + type_ = type( + name, + bases, + {} if post_init is None else {"__attrs_post_init__": post_init} + ) + # For pickling to work, the __module__ variable needs to be set to the + # frame where the class is created. Bypass this step in environments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython). + try: + type_.__module__ = sys._getframe(1).f_globals.get( + "__name__", "__main__", + ) + except (AttributeError, ValueError): + pass + + return _attrs(these=cls_dict, **attributes_arguments)(type_) + + +# These are required by within this module so we define them here and merely +# import into .validators. + + +@attrs(slots=True, hash=True) +class _AndValidator(object): + """ + Compose many validators to a single one. + """ + _validators = attrib() + + def __call__(self, inst, attr, value): + for v in self._validators: + v(inst, attr, value) + + +def and_(*validators): + """ + A validator that composes multiple validators into one. + + When called on a value, it runs all wrapped validators. + + :param validators: Arbitrary number of validators. + :type validators: callables + + .. versionadded:: 17.1.0 + """ + vals = [] + for validator in validators: + vals.extend( + validator._validators if isinstance(validator, _AndValidator) + else [validator] + ) + + return _AndValidator(tuple(vals)) diff --git a/pipenv/vendor/attr/converters.py b/pipenv/vendor/attr/converters.py new file mode 100644 index 0000000000..3b3bac92be --- /dev/null +++ b/pipenv/vendor/attr/converters.py @@ -0,0 +1,24 @@ +""" +Commonly useful converters. +""" + +from __future__ import absolute_import, division, print_function + + +def optional(converter): + """ + A converter that allows an attribute to be optional. An optional attribute + is one which can be set to ``None``. + + :param callable converter: the converter that is used for non-``None`` + values. + + .. versionadded:: 17.1.0 + """ + + def optional_converter(val): + if val is None: + return None + return converter(val) + + return optional_converter diff --git a/pipenv/vendor/attr/exceptions.py b/pipenv/vendor/attr/exceptions.py new file mode 100644 index 0000000000..f949f3c9c0 --- /dev/null +++ b/pipenv/vendor/attr/exceptions.py @@ -0,0 +1,48 @@ +from __future__ import absolute_import, division, print_function + + +class FrozenInstanceError(AttributeError): + """ + A frozen/immutable instance has been attempted to be modified. + + It mirrors the behavior of ``namedtuples`` by using the same error message + and subclassing :exc:`AttributeError`. + + .. versionadded:: 16.1.0 + """ + msg = "can't set attribute" + args = [msg] + + +class AttrsAttributeNotFoundError(ValueError): + """ + An ``attrs`` function couldn't find an attribute that the user asked for. + + .. versionadded:: 16.2.0 + """ + + +class NotAnAttrsClassError(ValueError): + """ + A non-``attrs`` class has been passed into an ``attrs`` function. + + .. versionadded:: 16.2.0 + """ + + +class DefaultAlreadySetError(RuntimeError): + """ + A default has been set using ``attr.ib()`` and is attempted to be reset + using the decorator. + + .. versionadded:: 17.1.0 + """ + + +class UnannotatedAttributeError(RuntimeError): + """ + A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type + annotation. + + .. versionadded:: 17.3.0 + """ diff --git a/pipenv/vendor/attr/filters.py b/pipenv/vendor/attr/filters.py new file mode 100644 index 0000000000..d1bad35e36 --- /dev/null +++ b/pipenv/vendor/attr/filters.py @@ -0,0 +1,52 @@ +""" +Commonly useful filters for :func:`attr.asdict`. +""" + +from __future__ import absolute_import, division, print_function + +from ._compat import isclass +from ._make import Attribute + + +def _split_what(what): + """ + Returns a tuple of `frozenset`s of classes and attributes. + """ + return ( + frozenset(cls for cls in what if isclass(cls)), + frozenset(cls for cls in what if isinstance(cls, Attribute)), + ) + + +def include(*what): + """ + Whitelist *what*. + + :param what: What to whitelist. + :type what: :class:`list` of :class:`type` or :class:`attr.Attribute`\ s + + :rtype: :class:`callable` + """ + cls, attrs = _split_what(what) + + def include_(attribute, value): + return value.__class__ in cls or attribute in attrs + + return include_ + + +def exclude(*what): + """ + Blacklist *what*. + + :param what: What to blacklist. + :type what: :class:`list` of classes or :class:`attr.Attribute`\ s. + + :rtype: :class:`callable` + """ + cls, attrs = _split_what(what) + + def exclude_(attribute, value): + return value.__class__ not in cls and attribute not in attrs + + return exclude_ diff --git a/pipenv/vendor/attr/validators.py b/pipenv/vendor/attr/validators.py new file mode 100644 index 0000000000..f8892fcdfe --- /dev/null +++ b/pipenv/vendor/attr/validators.py @@ -0,0 +1,166 @@ +""" +Commonly useful validators. +""" + +from __future__ import absolute_import, division, print_function + +from ._make import _AndValidator, and_, attrib, attrs + + +__all__ = [ + "and_", + "in_", + "instance_of", + "optional", + "provides", +] + + +@attrs(repr=False, slots=True, hash=True) +class _InstanceOfValidator(object): + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not isinstance(value, self.type): + raise TypeError( + "'{name}' must be {type!r} (got {value!r} that is a " + "{actual!r})." + .format(name=attr.name, type=self.type, + actual=value.__class__, value=value), + attr, self.type, value, + ) + + def __repr__(self): + return ( + "" + .format(type=self.type) + ) + + +def instance_of(type): + """ + A validator that raises a :exc:`TypeError` if the initializer is called + with a wrong type for this particular attribute (checks are performed using + :func:`isinstance` therefore it's also valid to pass a tuple of types). + + :param type: The type to check for. + :type type: type or tuple of types + + :raises TypeError: With a human readable error message, the attribute + (of type :class:`attr.Attribute`), the expected type, and the value it + got. + """ + return _InstanceOfValidator(type) + + +@attrs(repr=False, slots=True, hash=True) +class _ProvidesValidator(object): + interface = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.interface.providedBy(value): + raise TypeError( + "'{name}' must provide {interface!r} which {value!r} " + "doesn't." + .format(name=attr.name, interface=self.interface, value=value), + attr, self.interface, value, + ) + + def __repr__(self): + return ( + "" + .format(interface=self.interface) + ) + + +def provides(interface): + """ + A validator that raises a :exc:`TypeError` if the initializer is called + with an object that does not provide the requested *interface* (checks are + performed using ``interface.providedBy(value)`` (see `zope.interface + `_). + + :param zope.interface.Interface interface: The interface to check for. + + :raises TypeError: With a human readable error message, the attribute + (of type :class:`attr.Attribute`), the expected interface, and the + value it got. + """ + return _ProvidesValidator(interface) + + +@attrs(repr=False, slots=True, hash=True) +class _OptionalValidator(object): + validator = attrib() + + def __call__(self, inst, attr, value): + if value is None: + return + + self.validator(inst, attr, value) + + def __repr__(self): + return ( + "" + .format(what=repr(self.validator)) + ) + + +def optional(validator): + """ + A validator that makes an attribute optional. An optional attribute is one + which can be set to ``None`` in addition to satisfying the requirements of + the sub-validator. + + :param validator: A validator (or a list of validators) that is used for + non-``None`` values. + :type validator: callable or :class:`list` of callables. + + .. versionadded:: 15.1.0 + .. versionchanged:: 17.1.0 *validator* can be a list of validators. + """ + if isinstance(validator, list): + return _OptionalValidator(_AndValidator(validator)) + return _OptionalValidator(validator) + + +@attrs(repr=False, slots=True, hash=True) +class _InValidator(object): + options = attrib() + + def __call__(self, inst, attr, value): + if value not in self.options: + raise ValueError( + "'{name}' must be in {options!r} (got {value!r})" + .format(name=attr.name, options=self.options, value=value) + ) + + def __repr__(self): + return ( + "" + .format(options=self.options) + ) + + +def in_(options): + """ + A validator that raises a :exc:`ValueError` if the initializer is called + with a value that does not belong in the options provided. The check is + performed using ``value in options``. + + :param options: Allowed options. + :type options: list, tuple, :class:`enum.Enum`, ... + + :raises ValueError: With a human readable error message, the attribute (of + type :class:`attr.Attribute`), the expected options, and the value it + got. + + .. versionadded:: 17.1.0 + """ + return _InValidator(options) From 5bd3e2dcfa6d26480ed2cd0fa8ad1bbb959d92af Mon Sep 17 00:00:00 2001 From: Dan Ryan Date: Thu, 12 Apr 2018 00:16:22 -0400 Subject: [PATCH 03/17] Implement new requirement parser - Leverage functionality where possible to avoid rework Signed-off-by: Dan Ryan --- pipenv/core.py | 32 ++++-------- pipenv/project.py | 20 +++----- pipenv/requirements.py | 50 ++++++++++-------- pipenv/utils.py | 113 +++-------------------------------------- 4 files changed, 54 insertions(+), 161 deletions(-) diff --git a/pipenv/core.py b/pipenv/core.py index 5c5783f8ab..d295708cec 100644 --- a/pipenv/core.py +++ b/pipenv/core.py @@ -27,7 +27,6 @@ from .project import Project, SourceNotFound from .requirements import PipenvRequirement from .utils import ( - convert_deps_from_pip, convert_deps_to_pip, is_required_version, proper_case, @@ -36,14 +35,12 @@ merge_deps, venv_resolve_deps, escape_grouped_arguments, - is_vcs, python_version, find_windows_executable, prepare_pip_source_args, temp_environ, is_valid_url, download_file, - get_requirement, is_pinned, is_star, rmtree, @@ -976,7 +973,7 @@ def get_downloads_info(names_map, section): p = project.parsed_pipfile for fname in os.listdir(project.download_location): # Get name from filename mapping. - name = list(convert_deps_from_pip(names_map[fname]))[0] + name = PipenvRequirement.from_line(names_map[fname]).name # Get the version info from the filenames. version = parse_download_fname(fname, name) # Get the hash of each file. @@ -1243,14 +1240,12 @@ def do_purge(bare=False, downloads=False, allow_global=False, verbose=False): actually_installed = [] for package in installed: try: - dep = convert_deps_from_pip(package) + dep = PipenvRequirement.from_line(package) except AssertionError: dep = None - if dep and not is_vcs(dep): - dep = [k for k in dep.keys()][0] - # TODO: make this smarter later. - if not dep.startswith('-e ') and not dep.startswith('git+'): - actually_installed.append(dep) + if dep and not dep.is_vcs and not dep.editable: + dep = dep.name + actually_installed.append(dep) if not bare: click.echo( u'Found {0} installed package(s), purging…'.format( @@ -1712,7 +1707,8 @@ def do_outdated(): ) results = filter(bool, results) for result in results: - packages.update(convert_deps_from_pip(result)) + dep = PipenvRequirement.from_line(result) + packages.update(dep.as_pipfile()) updated_packages = {} lockfile = do_lock(write=False) for section in ('develop', 'default'): @@ -1936,9 +1932,8 @@ def do_install( if selective_upgrade: for i, package_name in enumerate(package_names[:]): section = project.packages if not dev else project.dev_packages - package = convert_deps_from_pip(package_name) - package__name = list(package.keys())[0] - package__val = list(package.values())[0] + package = PipenvRequirement.from_line(package_name) + package__name, package__val = package.pipfile_entry try: if not is_star(section[package__name]) and is_star( package__val @@ -1978,17 +1973,12 @@ def do_install( ) # Warn if --editable wasn't passed. try: - converted = convert_deps_from_pip(package_name) + converted = PipenvRequirement.from_line(package_name) except ValueError as e: click.echo('{0}: {1}'.format(crayons.red('WARNING'), e)) requirements_directory.cleanup() sys.exit(1) - key = [k for k in converted.keys()][0] - if is_vcs(key) or is_vcs(converted[key]) and not converted[ - key - ].get( - 'editable' - ): + if converted.is_vcs and not converted.editable: click.echo( '{0}: You installed a VCS dependency in non–editable mode. ' 'This will work fine, but sub-dependencies will not be resolved by {1}.' diff --git a/pipenv/project.py b/pipenv/project.py index 43ac92d00e..f8fcfe1791 100644 --- a/pipenv/project.py +++ b/pipenv/project.py @@ -20,6 +20,7 @@ from pathlib2 import Path from .cmdparse import Script +from .requirements import PipenvRequirement from .utils import ( atomic_open_for_write, mkdir_p, @@ -27,7 +28,6 @@ proper_case, find_requirements, is_editable, - is_file, is_vcs, cleanup_toml, is_installable_file, @@ -35,6 +35,7 @@ normalize_drive, python_version, safe_expandvars, + is_star, ) from .environments import ( PIPENV_MAX_DEPTH, @@ -45,6 +46,7 @@ PIPENV_PYTHON, PIPENV_DEFAULT_PYTHON_VERSION, ) +from .vendor.first import first def _normalized(p): @@ -727,24 +729,18 @@ def add_package_to_pipfile(self, package_name, dev=False): # Read and append Pipfile. p = self.parsed_pipfile # Don't re-capitalize file URLs or VCSs. - converted = convert_deps_from_pip(package_name) - converted = converted[first(k for k in converted.keys())] - if not ( - is_file(package_name) or is_vcs(converted) or 'path' in converted - ): - package_name = pep423_name(package_name) + package = PipenvRequirement.from_line(package_name) + converted = first(package.as_pipfile().values()) key = 'dev-packages' if dev else 'packages' # Set empty group if it doesn't exist yet. if key not in p: p[key] = {} - package = convert_deps_from_pip(package_name) - package_name = first(k for k in package.keys()) - name = self.get_package_name_in_pipfile(package_name, dev) - if name and converted == '*': + name = self.get_package_name_in_pipfile(package.name, dev) + if name and is_star(converted): # Skip for wildcard version return # Add the package to the group. - p[key][name or package_name] = package[package_name] + p[key][name or package.normalized_name] = converted # Write Pipfile. self.write_toml(p) diff --git a/pipenv/requirements.py b/pipenv/requirements.py index 3120b1ddc7..616b7af4af 100644 --- a/pipenv/requirements.py +++ b/pipenv/requirements.py @@ -2,23 +2,20 @@ from __future__ import absolute_import import abc import sys -from pipenv import PIPENV_VENDOR, PIPENV_PATCHED - -sys.path.insert(0, PIPENV_VENDOR) -sys.path.insert(0, PIPENV_PATCHED) import hashlib import os import requirements import six -from attr import attrs, attrib, Factory, validators +from .vendor.attr import attrs, attrib, Factory, validators +from .vendor import attr from pip9.index import Link from pip9.download import path_to_url from pip9.req.req_install import _strip_extras from pip9._vendor.distlib.markers import Evaluator from pip9._vendor.packaging.markers import Marker, InvalidMarker from pip9._vendor.packaging.specifiers import SpecifierSet, InvalidSpecifier -from pipenv.utils import SCHEME_LIST, VCS_LIST, is_installable_file, is_vcs, multi_split, get_converted_relative_path, is_star, is_valid_url -from first import first +from .utils import SCHEME_LIST, VCS_LIST, is_installable_file, is_vcs, multi_split, get_converted_relative_path, is_star, is_valid_url, pep423_name +from .vendor.first import first try: from pathlib import Path @@ -366,19 +363,6 @@ def from_pipfile(cls, name, pipfile): } return cls(**arg_dict) - @req.default - def get_requirement(self): - base = '{0}'.format(self.link) - req = first(requirements.parse(base)) - if self.editable: - req.editable = True - if self.link and self.link.scheme.startswith('file') and self.path: - req.path = self.path - req.local_file = True - req.uri = None - req.link = self.link - return req - @property def line_part(self): seed = self.path or self.link.url or self.uri @@ -414,8 +398,6 @@ class VCSRequirement(FileRequirement): vcs = attrib(validator=validators.optional(_validate_vcs), default=None) # : vcs reference name (branch / commit / tag) ref = attrib(default=None) - #: path to hit - without any of the VCS prefixes (like git+ / http+ / etc) - uri = attrib(default=None) subdirectory = attrib(default=None) name = attrib() link = attrib() @@ -488,6 +470,7 @@ def from_pipfile(cls, name, pipfile): @classmethod def from_line(cls, line, editable=None): + path = None if line.startswith('-e '): editable = True line = line.split(' ', 1)[1] @@ -599,6 +582,24 @@ def get_specifiers(self): return _specs_to_string(self.req.req.specs) return + @property + def is_vcs(self): + return isinstance(self, VCSRequirement) + + @property + def is_file_or_url(self): + return isinstance(self, FileRequirement) + + @property + def is_named(self): + return isinstance(self, NamedRequirement) + + @property + def normalized_name(self): + if not self.is_vcs and not self.is_file_or_url: + return pep423_name(self.name) + return self.name + @classmethod def from_line(cls, line): hashes = None @@ -616,6 +617,7 @@ def from_line(cls, line): r = FileRequirement.from_line(line) elif is_vcs(stripped_line): r = VCSRequirement.from_line(line) + vcs = r.vcs else: name = multi_split(stripped_line, '!=<>~')[0] if not extras: @@ -703,6 +705,10 @@ def as_pipfile(self, include_index=False): base_dict = base_dict.get('version') return {name: base_dict} + @property + def pipfile_entry(self): + return self.as_pipfile().copy().popitem() + def _extras_to_string(extras): """Turn a list of extras into a string""" diff --git a/pipenv/utils.py b/pipenv/utils.py index 614b333c84..f8cf9192a5 100644 --- a/pipenv/utils.py +++ b/pipenv/utils.py @@ -82,97 +82,6 @@ def _get_requests_session(): return requests_session -def get_requirement(dep): - from .patched.notpip._internal.req.req_install import _strip_extras, Wheel - from .patched.notpip._internal.index import Link - from .vendor import requirements - """Pre-clean requirement strings passed to the requirements parser. - - Ensures that we can accept both local and relative paths, file and VCS URIs, - remote URIs, and package names, and that we pass only valid requirement strings - to the requirements parser. Performs necessary modifications to requirements - object if the user input was a local relative path. - - :param str dep: A requirement line - :returns: :class:`requirements.Requirement` object - """ - path = None - uri = None - cleaned_uri = None - editable = False - dep_link = None - # check for editable dep / vcs dep - if dep.startswith('-e '): - editable = True - # Use the user supplied path as the written dependency - dep = dep.split(' ', 1)[1] - # Split out markers if they are present - similar to how pip does it - # See notpip.req.req_install.InstallRequirement.from_line - if not any(dep.startswith(uri_prefix) for uri_prefix in SCHEME_LIST): - marker_sep = ';' - else: - marker_sep = '; ' - if marker_sep in dep: - dep, markers = dep.split(marker_sep, 1) - markers = markers.strip() - if not markers: - markers = None - else: - markers = None - # Strip extras from the requirement so we can make a properly parseable req - dep, extras = _strip_extras(dep) - # Only operate on local, existing, non-URI formatted paths which are installable - if is_installable_file(dep): - dep_path = Path(dep) - dep_link = Link(dep_path.absolute().as_uri()) - if dep_path.is_absolute() or dep_path.as_posix() == '.': - path = dep_path.as_posix() - else: - path = get_converted_relative_path(dep) - dep = dep_link.egg_fragment if dep_link.egg_fragment else dep_link.url_without_fragment - elif is_vcs(dep): - # Generate a Link object for parsing egg fragments - dep_link = Link(dep) - # Save the original path to store in the pipfile - uri = dep_link.url - # Construct the requirement using proper git+ssh:// replaced uris or names if available - cleaned_uri = clean_git_uri(dep) - dep = cleaned_uri - if editable: - dep = '-e {0}'.format(dep) - req = [r for r in requirements.parse(dep)][0] - # if all we built was the requirement name and still need everything else - if req.name and not any([req.uri, req.path]): - if dep_link: - if dep_link.scheme.startswith('file') and path and not req.path: - req.path = path - req.local_file = True - req.uri = None - else: - req.uri = dep_link.url_without_fragment - # If the result is a local file with a URI and we have a local path, unset the URI - # and set the path instead -- note that local files may have 'path' set by accident - elif req.local_file and path and not req.vcs: - req.path = path - req.uri = None - if dep_link and dep_link.is_wheel and not req.name: - req.name = os.path.basename(Wheel(dep_link.path).name) - elif req.vcs and req.uri and cleaned_uri and cleaned_uri != uri: - req.uri = strip_ssh_from_git_uri(req.uri) - req.line = strip_ssh_from_git_uri(req.line) - req.editable = editable - if markers: - req.markers = markers - if extras: - # Bizarrely this is also what pip does... - req.extras = [ - r for r in requirements.parse('fakepkg{0}'.format(extras)) - ][ - 0 - ].extras - return req - - def cleanup_toml(tml): toml = tml.split('\n') new_toml = [] @@ -582,12 +491,6 @@ def multi_split(s, split): return [i for i in s.split('|') if len(i) > 0] -def convert_deps_from_pip(dep): - """"Converts a pip-formatted dependency to a Pipfile-formatted one.""" - req = PipenvRequirement.from_line(dep) - return req.as_pipfile() - - def is_star(val): return isinstance(val, six.string_types) and val == '*' @@ -601,6 +504,7 @@ def is_pinned(val): def convert_deps_to_pip(deps, project=None, r=True, include_index=False): """"Converts a Pipfile-formatted dependency to a pip-formatted one.""" from ._compat import NamedTemporaryFile + from .requirements import PipenvRequirement dependencies = [] for dep_name, dep in deps.items(): indexes = project.sources if hasattr(project, 'sources') else None @@ -1265,19 +1169,16 @@ def get_vcs_deps(project, pip_freeze=None, which=None, verbose=False, clear=Fals backend = vcs_registry._registry[first(b for b in vcs_registry if b == backend_name)] __vcs = backend(url=_pip_uri) - installed = convert_deps_from_pip(line) - if not hasattr(installed, 'keys'): - pass - lock_name = first(installed.keys()) - names.add(lock_name) + installed = PipenvRequirement.from_line(line) + names.add(installed.normalized_name) locked_rev = None for _name in names: locked_rev = install_or_update_vcs(__vcs, src_dir, _name, rev=pipfile_rev) - if is_vcs(installed[lock_name]): - installed[lock_name]['ref'] = locked_rev - lockfiles.append({pipfile_name: installed[lock_name]}) + if installed.is_vcs: + installed.req.ref = locked_rev + lockfiles.append({pipfile_name: installed.pipfile_entry[1]}) pipfile_srcdir = os.path.join(src_dir, pipfile_name) - lockfile_srcdir = os.path.join(src_dir, lock_name) + lockfile_srcdir = os.path.join(src_dir, installed.normalized_name) lines.append(line) if os.path.exists(pipfile_srcdir): lockfiles.extend(venv_resolve_deps(['-e {0}'.format(pipfile_srcdir)], which=which, verbose=verbose, project=project, clear=clear, pre=pre, allow_global=allow_global)) From 31672852868e92308e243a295e964e0b177fb210 Mon Sep 17 00:00:00 2001 From: Dan Ryan Date: Thu, 12 Apr 2018 08:48:52 -0400 Subject: [PATCH 04/17] Update tests for new requirements format Signed-off-by: Dan Ryan --- tests/unit/test_utils.py | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 82bdc2f12f..9d1ec9d42a 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -4,6 +4,7 @@ from mock import patch, Mock from first import first import pipenv.utils +import pipenv.requirements # Pipfile format <-> requirements.txt format. @@ -90,6 +91,13 @@ def test_convert_deps_to_pip(deps, expected): }}, 'FooProject[stuff]==1.2 --hash=sha256:2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824' ), + ( + {'requests': { + 'git': 'https://github.com/requests/requests.git', + 'ref': 'master', 'extras': ['security'], + }}, + 'git+https://github.com/requests/requests.git@master#egg=requests[security]', + ), ]) def test_convert_deps_to_pip_one_way(deps, expected): assert pipenv.utils.convert_deps_to_pip(deps, r=False) == [expected] @@ -113,7 +121,7 @@ def test_convert_from_pip(expected, requirement): package = first(expected.keys()) if hasattr(expected[package], 'keys') and expected[package].get('editable') is False: del expected[package]['editable'] - assert pipenv.utils.convert_deps_from_pip(requirement) == expected + assert pipenv.requirements.PipenvRequirement.from_line(dep).as_pipfile() == expected @pytest.mark.utils @@ -339,39 +347,39 @@ def test_nix_normalize_drive(self, input_path, expected): @pytest.mark.requirements def test_get_requirements(self): # Test eggs in URLs - url_with_egg = pipenv.utils.get_requirement( + url_with_egg = pipenv.requirements.PipenvRequirement.from_line( 'https://github.com/IndustriaTech/django-user-clipboard/archive/0.6.1.zip#egg=django-user-clipboard' - ) + ).requirement assert url_with_egg.uri == 'https://github.com/IndustriaTech/django-user-clipboard/archive/0.6.1.zip' assert url_with_egg.name == 'django-user-clipboard' # Test URLs without eggs pointing at installable zipfiles - url = pipenv.utils.get_requirement( + url = pipenv.requirements.PipenvRequirement.from_line( 'https://github.com/kennethreitz/tablib/archive/0.12.1.zip' - ) + ).requirement assert url.uri == 'https://github.com/kennethreitz/tablib/archive/0.12.1.zip' # Test VCS urls with refs and eggnames - vcs_url = pipenv.utils.get_requirement( + vcs_url = pipenv.requirements.PipenvRequirement.from_line( 'git+https://github.com/kennethreitz/tablib.git@master#egg=tablib' - ) + ).requirement assert vcs_url.vcs == 'git' and vcs_url.name == 'tablib' and vcs_url.revision == 'master' assert vcs_url.uri == 'git+https://github.com/kennethreitz/tablib.git' # Test normal package requirement - normal = pipenv.utils.get_requirement('tablib') + normal = pipenv.requirements.PipenvRequirement.from_line('tablib').requirement assert normal.name == 'tablib' # Pinned package requirement - spec = pipenv.utils.get_requirement('tablib==0.12.1') + spec = pipenv.requirements.PipenvRequirement.from_line('tablib==0.12.1').requirement assert spec.name == 'tablib' and spec.specs == [('==', '0.12.1')] # Test complex package with both extras and markers - extras_markers = pipenv.utils.get_requirement( + extras_markers = pipenv.requirements.PipenvRequirement.from_line( "requests[security]; os_name=='posix'" - ) + ).requirement assert extras_markers.extras == ['security'] assert extras_markers.name == 'requests' assert extras_markers.markers == "os_name=='posix'" # Test VCS uris get generated correctly, retain git+git@ if supplied that way, and are named according to egg fragment - git_reformat = pipenv.utils.get_requirement( + git_reformat = pipenv.requirements.PipenvRequirement.from_line( '-e git+git@github.com:pypa/pipenv.git#egg=pipenv' - ) + ).requirement assert git_reformat.uri == 'git+git@github.com:pypa/pipenv.git' assert git_reformat.name == 'pipenv' assert git_reformat.editable From eb4423b208bb468ba0e7f0583ad028868ca4c98e Mon Sep 17 00:00:00 2001 From: Dan Ryan Date: Thu, 19 Apr 2018 23:33:42 -0400 Subject: [PATCH 05/17] Fix test references to old requirements parser Signed-off-by: Dan Ryan --- pipenv/project.py | 1 - tests/unit/test_utils.py | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/pipenv/project.py b/pipenv/project.py index f8fcfe1791..149aa802ee 100644 --- a/pipenv/project.py +++ b/pipenv/project.py @@ -725,7 +725,6 @@ def remove_package_from_pipfile(self, package_name, dev=False): self.write_toml(p) def add_package_to_pipfile(self, package_name, dev=False): - from .utils import convert_deps_from_pip # Read and append Pipfile. p = self.parsed_pipfile # Don't re-capitalize file URLs or VCSs. diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 9d1ec9d42a..ee71b14736 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -10,7 +10,7 @@ # Pipfile format <-> requirements.txt format. DEP_PIP_PAIRS = [ ({'requests': '*'}, 'requests'), - ({'requests': {'extras': ['socks']}}, 'requests[socks]'), + ({'requests': {'extras': ['socks'], 'version': '*'}}, 'requests[socks]'), ({'django': '>1.10'}, 'django>1.10'), ({'Django': '>1.10'}, 'Django>1.10'), ( @@ -121,7 +121,7 @@ def test_convert_from_pip(expected, requirement): package = first(expected.keys()) if hasattr(expected[package], 'keys') and expected[package].get('editable') is False: del expected[package]['editable'] - assert pipenv.requirements.PipenvRequirement.from_line(dep).as_pipfile() == expected + assert pipenv.requirements.PipenvRequirement.from_line(requirement).as_pipfile() == expected @pytest.mark.utils @@ -130,7 +130,7 @@ def test_convert_from_pip_fail_if_no_egg(): """ dep = 'git+https://github.com/kennethreitz/requests.git' with pytest.raises(ValueError) as e: - dep = pipenv.utils.convert_deps_from_pip(dep) + dep = pipenv.requirements.PipenvRequirement.from_line(dep).as_pipfile() assert 'pipenv requires an #egg fragment for vcs' in str(e) @@ -139,7 +139,7 @@ def test_convert_from_pip_git_uri_normalize(): """Pip does not parse this correctly, but we can (by converting to ssh://). """ dep = 'git+git@host:user/repo.git#egg=myname' - dep = pipenv.utils.convert_deps_from_pip(dep) + dep = pipenv.requirements.PipenvRequirement.from_line(dep).as_pipfile() assert dep == { 'myname': { 'git': 'git@host:user/repo.git', From b2085cb05877e9656f65d2d3692f11c04583b49f Mon Sep 17 00:00:00 2001 From: Dan Ryan Date: Thu, 19 Apr 2018 23:55:32 -0400 Subject: [PATCH 06/17] Mark pipenv check as flaky Signed-off-by: Dan Ryan --- tests/integration/test_cli.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_cli.py b/tests/integration/test_cli.py index caccc57bc5..aed10aded0 100644 --- a/tests/integration/test_cli.py +++ b/tests/integration/test_cli.py @@ -5,7 +5,7 @@ import re import pytest - +from flaky import flaky from pipenv.utils import normalize_drive @@ -78,6 +78,7 @@ def test_pipenv_graph_reverse(PipenvInstance, pypi): @pytest.mark.cli @pytest.mark.needs_internet(reason='required by check') +@flaky def test_pipenv_check(PipenvInstance, pypi): with PipenvInstance(pypi=pypi) as p: p.pipenv('install requests==1.0.0') From 4ce55b4c1f0bec787520098da922c5644d49b028 Mon Sep 17 00:00:00 2001 From: Dan Ryan Date: Mon, 23 Apr 2018 17:59:19 -0400 Subject: [PATCH 07/17] Add attrs to vendored deps and rebuild Signed-off-by: Dan Ryan --- pipenv/vendor/attr/LICENSE | 21 +++++++++++++++++++++ pipenv/vendor/vendor.txt | 1 + tasks/vendoring/__init__.py | 1 + 3 files changed, 23 insertions(+) create mode 100644 pipenv/vendor/attr/LICENSE diff --git a/pipenv/vendor/attr/LICENSE b/pipenv/vendor/attr/LICENSE new file mode 100644 index 0000000000..7ae3df9309 --- /dev/null +++ b/pipenv/vendor/attr/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Hynek Schlawack + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/pipenv/vendor/vendor.txt b/pipenv/vendor/vendor.txt index 4e053706f3..2336fa514c 100644 --- a/pipenv/vendor/vendor.txt +++ b/pipenv/vendor/vendor.txt @@ -1,4 +1,5 @@ appdirs==1.4.3 +attrs==17.4.0 backports.shutil_get_terminal_size==1.0.0 backports.weakref==1.0.post1 blindspin==2.0.1 diff --git a/tasks/vendoring/__init__.py b/tasks/vendoring/__init__.py index b0ffa46cbf..1d58badd99 100644 --- a/tasks/vendoring/__init__.py +++ b/tasks/vendoring/__init__.py @@ -26,6 +26,7 @@ 'pip-tools': 'piptools', 'setuptools': 'pkg_resources', 'msgpack-python': 'msgpack', + 'attrs': 'attr', } # from time to time, remove the no longer needed ones From c50259f494c1408b83a279b876290eae4ba30d49 Mon Sep 17 00:00:00 2001 From: Dan Ryan Date: Thu, 10 May 2018 12:13:55 -0400 Subject: [PATCH 08/17] Update vendoring task Signed-off-by: Dan Ryan --- pipenv/vendor/vendor.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pipenv/vendor/vendor.txt b/pipenv/vendor/vendor.txt index 2336fa514c..a8b1fd6358 100644 --- a/pipenv/vendor/vendor.txt +++ b/pipenv/vendor/vendor.txt @@ -8,18 +8,21 @@ click-completion==0.2.1 click-didyoumean==0.0.3 colorama==0.3.9 delegator.py==0.1.0 +distlib docopt==0.6.2 python-dotenv==0.8.2 first==2.0.1 iso8601==0.1.12 jinja2==2.9.5 markupsafe==1.0 +packaging parse==1.8.0 pathlib2==2.1.0 pexpect==4.5.0 git+https://github.com/naiquevin/pipdeptree.git@2e9e5119160184f359131ea99993f0158a20cd31#egg=pipdeptree pipreqs==0.4.9 ptyprocess==0.5.2 +pythonfinder pytoml==0.1.14 requests==2.18.4 chardet==3.0.4 @@ -27,6 +30,7 @@ requests==2.18.4 urllib3==1.22 certifi==2018.1.18 requirements-parser==0.2.0 +requirementslib six==1.10.0 semver==2.7.8 shutilwhich==1.1.0 From c4060637554d9c5e7be4878e3993376ec1817432 Mon Sep 17 00:00:00 2001 From: Dan Ryan Date: Thu, 10 May 2018 12:31:30 -0400 Subject: [PATCH 09/17] Vendor requirementslib and cut old requirements Signed-off-by: Dan Ryan --- pipenv/core.py | 16 +- pipenv/project.py | 4 +- pipenv/utils.py | 8 +- pipenv/vendor/distlib/LICENSE.txt | 284 ++ pipenv/vendor/distlib/__init__.py | 23 + pipenv/vendor/distlib/_backport/__init__.py | 6 + pipenv/vendor/distlib/_backport/misc.py | 41 + pipenv/vendor/distlib/_backport/shutil.py | 761 +++++ pipenv/vendor/distlib/_backport/sysconfig.cfg | 84 + pipenv/vendor/distlib/_backport/sysconfig.py | 788 +++++ pipenv/vendor/distlib/_backport/tarfile.py | 2607 +++++++++++++++++ pipenv/vendor/distlib/compat.py | 1120 +++++++ pipenv/vendor/distlib/database.py | 1336 +++++++++ pipenv/vendor/distlib/index.py | 516 ++++ pipenv/vendor/distlib/locators.py | 1292 ++++++++ pipenv/vendor/distlib/manifest.py | 393 +++ pipenv/vendor/distlib/markers.py | 131 + pipenv/vendor/distlib/metadata.py | 1091 +++++++ pipenv/vendor/distlib/resources.py | 355 +++ pipenv/vendor/distlib/scripts.py | 415 +++ pipenv/vendor/distlib/t32.exe | Bin 0 -> 92672 bytes pipenv/vendor/distlib/t64.exe | Bin 0 -> 102400 bytes pipenv/vendor/distlib/util.py | 1755 +++++++++++ pipenv/vendor/distlib/version.py | 736 +++++ pipenv/vendor/distlib/w32.exe | Bin 0 -> 89088 bytes pipenv/vendor/distlib/w64.exe | Bin 0 -> 99328 bytes pipenv/vendor/distlib/wheel.py | 984 +++++++ pipenv/vendor/packaging/LICENSE | 3 + pipenv/vendor/packaging/LICENSE.APACHE | 177 ++ pipenv/vendor/packaging/LICENSE.BSD | 23 + pipenv/vendor/packaging/__about__.py | 21 + pipenv/vendor/packaging/__init__.py | 14 + pipenv/vendor/packaging/_compat.py | 30 + pipenv/vendor/packaging/_structures.py | 70 + pipenv/vendor/packaging/markers.py | 301 ++ pipenv/vendor/packaging/requirements.py | 130 + pipenv/vendor/packaging/specifiers.py | 774 +++++ pipenv/vendor/packaging/utils.py | 63 + pipenv/vendor/packaging/version.py | 441 +++ pipenv/vendor/pythonfinder/LICENSE.txt | 20 + pipenv/vendor/pythonfinder/__init__.py | 1 + pipenv/vendor/pythonfinder/__main__.py | 12 + .../vendor/pythonfinder/_vendor/__init__.py | 0 .../_vendor/pep514tools/__init__.py | 11 + .../_vendor/pep514tools/__main__.py | 7 + .../_vendor/pep514tools/_registry.py | 198 ++ .../_vendor/pep514tools/environment.py | 123 + pipenv/vendor/pythonfinder/cli.py | 39 + pipenv/vendor/pythonfinder/pythonfinder.py | 234 ++ pipenv/vendor/requirementslib/LICENSE | 21 + pipenv/vendor/requirementslib/__init__.py | 4 + pipenv/vendor/requirementslib/_compat.py | 40 + .../requirementslib}/requirements.py | 415 +-- pipenv/vendor/requirementslib/utils.py | 103 + tasks/vendoring/__init__.py | 3 + 55 files changed, 17820 insertions(+), 204 deletions(-) create mode 100644 pipenv/vendor/distlib/LICENSE.txt create mode 100644 pipenv/vendor/distlib/__init__.py create mode 100644 pipenv/vendor/distlib/_backport/__init__.py create mode 100644 pipenv/vendor/distlib/_backport/misc.py create mode 100644 pipenv/vendor/distlib/_backport/shutil.py create mode 100644 pipenv/vendor/distlib/_backport/sysconfig.cfg create mode 100644 pipenv/vendor/distlib/_backport/sysconfig.py create mode 100644 pipenv/vendor/distlib/_backport/tarfile.py create mode 100644 pipenv/vendor/distlib/compat.py create mode 100644 pipenv/vendor/distlib/database.py create mode 100644 pipenv/vendor/distlib/index.py create mode 100644 pipenv/vendor/distlib/locators.py create mode 100644 pipenv/vendor/distlib/manifest.py create mode 100644 pipenv/vendor/distlib/markers.py create mode 100644 pipenv/vendor/distlib/metadata.py create mode 100644 pipenv/vendor/distlib/resources.py create mode 100644 pipenv/vendor/distlib/scripts.py create mode 100644 pipenv/vendor/distlib/t32.exe create mode 100644 pipenv/vendor/distlib/t64.exe create mode 100644 pipenv/vendor/distlib/util.py create mode 100644 pipenv/vendor/distlib/version.py create mode 100644 pipenv/vendor/distlib/w32.exe create mode 100644 pipenv/vendor/distlib/w64.exe create mode 100644 pipenv/vendor/distlib/wheel.py create mode 100644 pipenv/vendor/packaging/LICENSE create mode 100644 pipenv/vendor/packaging/LICENSE.APACHE create mode 100644 pipenv/vendor/packaging/LICENSE.BSD create mode 100644 pipenv/vendor/packaging/__about__.py create mode 100644 pipenv/vendor/packaging/__init__.py create mode 100644 pipenv/vendor/packaging/_compat.py create mode 100644 pipenv/vendor/packaging/_structures.py create mode 100644 pipenv/vendor/packaging/markers.py create mode 100644 pipenv/vendor/packaging/requirements.py create mode 100644 pipenv/vendor/packaging/specifiers.py create mode 100644 pipenv/vendor/packaging/utils.py create mode 100644 pipenv/vendor/packaging/version.py create mode 100644 pipenv/vendor/pythonfinder/LICENSE.txt create mode 100644 pipenv/vendor/pythonfinder/__init__.py create mode 100644 pipenv/vendor/pythonfinder/__main__.py create mode 100644 pipenv/vendor/pythonfinder/_vendor/__init__.py create mode 100644 pipenv/vendor/pythonfinder/_vendor/pep514tools/__init__.py create mode 100644 pipenv/vendor/pythonfinder/_vendor/pep514tools/__main__.py create mode 100644 pipenv/vendor/pythonfinder/_vendor/pep514tools/_registry.py create mode 100644 pipenv/vendor/pythonfinder/_vendor/pep514tools/environment.py create mode 100644 pipenv/vendor/pythonfinder/cli.py create mode 100644 pipenv/vendor/pythonfinder/pythonfinder.py create mode 100644 pipenv/vendor/requirementslib/LICENSE create mode 100644 pipenv/vendor/requirementslib/__init__.py create mode 100644 pipenv/vendor/requirementslib/_compat.py rename pipenv/{ => vendor/requirementslib}/requirements.py (63%) create mode 100644 pipenv/vendor/requirementslib/utils.py diff --git a/pipenv/core.py b/pipenv/core.py index d295708cec..75bc627686 100644 --- a/pipenv/core.py +++ b/pipenv/core.py @@ -25,7 +25,7 @@ from .cmdparse import ScriptEmptyError from .project import Project, SourceNotFound -from .requirements import PipenvRequirement +from .vendor.requirementslib import Requirement from .utils import ( convert_deps_to_pip, is_required_version, @@ -973,7 +973,7 @@ def get_downloads_info(names_map, section): p = project.parsed_pipfile for fname in os.listdir(project.download_location): # Get name from filename mapping. - name = PipenvRequirement.from_line(names_map[fname]).name + name = Requirement.from_line(names_map[fname]).name # Get the version info from the filenames. version = parse_download_fname(fname, name) # Get the hash of each file. @@ -1240,7 +1240,7 @@ def do_purge(bare=False, downloads=False, allow_global=False, verbose=False): actually_installed = [] for package in installed: try: - dep = PipenvRequirement.from_line(package) + dep = Requirement.from_line(package) except AssertionError: dep = None if dep and not dep.is_vcs and not dep.editable: @@ -1410,7 +1410,7 @@ def pip_install( f.write(package_name) # Install dependencies when a package is a VCS dependency. try: - req = PipenvRequirement.from_line( + req = Requirement.from_line( package_name.split('--hash')[0].split('--trusted-host')[0] ).vcs except (ParseException, ValueError) as e: @@ -1707,7 +1707,7 @@ def do_outdated(): ) results = filter(bool, results) for result in results: - dep = PipenvRequirement.from_line(result) + dep = Requirement.from_line(result) packages.update(dep.as_pipfile()) updated_packages = {} lockfile = do_lock(write=False) @@ -1932,7 +1932,7 @@ def do_install( if selective_upgrade: for i, package_name in enumerate(package_names[:]): section = project.packages if not dev else project.dev_packages - package = PipenvRequirement.from_line(package_name) + package = Requirement.from_line(package_name) package__name, package__val = package.pipfile_entry try: if not is_star(section[package__name]) and is_star( @@ -1973,7 +1973,7 @@ def do_install( ) # Warn if --editable wasn't passed. try: - converted = PipenvRequirement.from_line(package_name) + converted = Requirement.from_line(package_name) except ValueError as e: click.echo('{0}: {1}'.format(crayons.red('WARNING'), e)) requirements_directory.cleanup() @@ -2557,7 +2557,7 @@ def do_clean( ) installed_package_names = [] for installed in installed_packages: - r = PipenvRequirement.from_line(installed).requirement + r = Requirement.from_line(installed).requirement # Ignore editable installations. if not r.editable: installed_package_names.append(r.name.lower()) diff --git a/pipenv/project.py b/pipenv/project.py index 149aa802ee..98dd4b226a 100644 --- a/pipenv/project.py +++ b/pipenv/project.py @@ -20,7 +20,7 @@ from pathlib2 import Path from .cmdparse import Script -from .requirements import PipenvRequirement +from .vendor.requirementslib import Requirement from .utils import ( atomic_open_for_write, mkdir_p, @@ -728,7 +728,7 @@ def add_package_to_pipfile(self, package_name, dev=False): # Read and append Pipfile. p = self.parsed_pipfile # Don't re-capitalize file URLs or VCSs. - package = PipenvRequirement.from_line(package_name) + package = Requirement.from_line(package_name) converted = first(package.as_pipfile().values()) key = 'dev-packages' if dev else 'packages' # Set empty group if it doesn't exist yet. diff --git a/pipenv/utils.py b/pipenv/utils.py index f8cf9192a5..42ae82bc73 100644 --- a/pipenv/utils.py +++ b/pipenv/utils.py @@ -55,7 +55,7 @@ def detach(self): from collections.abc import Mapping except ImportError: from collections import Mapping -from .requirements import PipenvRequirement +from .vendor.requirementslib import Requirement if six.PY2: @@ -504,13 +504,13 @@ def is_pinned(val): def convert_deps_to_pip(deps, project=None, r=True, include_index=False): """"Converts a Pipfile-formatted dependency to a pip-formatted one.""" from ._compat import NamedTemporaryFile - from .requirements import PipenvRequirement + from .vendor.requirementslib import Requirement dependencies = [] for dep_name, dep in deps.items(): indexes = project.sources if hasattr(project, 'sources') else None if hasattr(dep, 'keys') and dep.get('index'): indexes = project.get_source(dep['index']) - new_dep = PipenvRequirement.from_pipfile(dep_name, indexes, dep) + new_dep = Requirement.from_pipfile(dep_name, indexes, dep) req = new_dep.as_line( project=project, include_index=include_index @@ -1169,7 +1169,7 @@ def get_vcs_deps(project, pip_freeze=None, which=None, verbose=False, clear=Fals backend = vcs_registry._registry[first(b for b in vcs_registry if b == backend_name)] __vcs = backend(url=_pip_uri) - installed = PipenvRequirement.from_line(line) + installed = Requirement.from_line(line) names.add(installed.normalized_name) locked_rev = None for _name in names: diff --git a/pipenv/vendor/distlib/LICENSE.txt b/pipenv/vendor/distlib/LICENSE.txt new file mode 100644 index 0000000000..c31ac56d77 --- /dev/null +++ b/pipenv/vendor/distlib/LICENSE.txt @@ -0,0 +1,284 @@ +A. HISTORY OF THE SOFTWARE +========================== + +Python was created in the early 1990s by Guido van Rossum at Stichting +Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands +as a successor of a language called ABC. Guido remains Python's +principal author, although it includes many contributions from others. + +In 1995, Guido continued his work on Python at the Corporation for +National Research Initiatives (CNRI, see http://www.cnri.reston.va.us) +in Reston, Virginia where he released several versions of the +software. + +In May 2000, Guido and the Python core development team moved to +BeOpen.com to form the BeOpen PythonLabs team. In October of the same +year, the PythonLabs team moved to Digital Creations (now Zope +Corporation, see http://www.zope.com). In 2001, the Python Software +Foundation (PSF, see http://www.python.org/psf/) was formed, a +non-profit organization created specifically to own Python-related +Intellectual Property. Zope Corporation is a sponsoring member of +the PSF. + +All Python releases are Open Source (see http://www.opensource.org for +the Open Source Definition). Historically, most, but not all, Python +releases have also been GPL-compatible; the table below summarizes +the various releases. + + Release Derived Year Owner GPL- + from compatible? (1) + + 0.9.0 thru 1.2 1991-1995 CWI yes + 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes + 1.6 1.5.2 2000 CNRI no + 2.0 1.6 2000 BeOpen.com no + 1.6.1 1.6 2001 CNRI yes (2) + 2.1 2.0+1.6.1 2001 PSF no + 2.0.1 2.0+1.6.1 2001 PSF yes + 2.1.1 2.1+2.0.1 2001 PSF yes + 2.2 2.1.1 2001 PSF yes + 2.1.2 2.1.1 2002 PSF yes + 2.1.3 2.1.2 2002 PSF yes + 2.2.1 2.2 2002 PSF yes + 2.2.2 2.2.1 2002 PSF yes + 2.2.3 2.2.2 2003 PSF yes + 2.3 2.2.2 2002-2003 PSF yes + 2.3.1 2.3 2002-2003 PSF yes + 2.3.2 2.3.1 2002-2003 PSF yes + 2.3.3 2.3.2 2002-2003 PSF yes + 2.3.4 2.3.3 2004 PSF yes + 2.3.5 2.3.4 2005 PSF yes + 2.4 2.3 2004 PSF yes + 2.4.1 2.4 2005 PSF yes + 2.4.2 2.4.1 2005 PSF yes + 2.4.3 2.4.2 2006 PSF yes + 2.4.4 2.4.3 2006 PSF yes + 2.5 2.4 2006 PSF yes + 2.5.1 2.5 2007 PSF yes + 2.5.2 2.5.1 2008 PSF yes + 2.5.3 2.5.2 2008 PSF yes + 2.6 2.5 2008 PSF yes + 2.6.1 2.6 2008 PSF yes + 2.6.2 2.6.1 2009 PSF yes + 2.6.3 2.6.2 2009 PSF yes + 2.6.4 2.6.3 2009 PSF yes + 2.6.5 2.6.4 2010 PSF yes + 3.0 2.6 2008 PSF yes + 3.0.1 3.0 2009 PSF yes + 3.1 3.0.1 2009 PSF yes + 3.1.1 3.1 2009 PSF yes + 3.1.2 3.1 2010 PSF yes + 3.2 3.1 2010 PSF yes + +Footnotes: + +(1) GPL-compatible doesn't mean that we're distributing Python under + the GPL. All Python licenses, unlike the GPL, let you distribute + a modified version without making your changes open source. The + GPL-compatible licenses make it possible to combine Python with + other software that is released under the GPL; the others don't. + +(2) According to Richard Stallman, 1.6.1 is not GPL-compatible, + because its license has a choice of law clause. According to + CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 + is "not incompatible" with the GPL. + +Thanks to the many outside volunteers who have worked under Guido's +direction to make these releases possible. + + +B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON +=============================================================== + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 +Python Software Foundation; All Rights Reserved" are retained in Python alone or +in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 +------------------------------------------- + +BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 + +1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an +office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the +Individual or Organization ("Licensee") accessing and otherwise using +this software in source or binary form and its associated +documentation ("the Software"). + +2. Subject to the terms and conditions of this BeOpen Python License +Agreement, BeOpen hereby grants Licensee a non-exclusive, +royalty-free, world-wide license to reproduce, analyze, test, perform +and/or display publicly, prepare derivative works, distribute, and +otherwise use the Software alone or in any derivative version, +provided, however, that the BeOpen Python License is retained in the +Software, alone or in any derivative version prepared by Licensee. + +3. BeOpen is making the Software available to Licensee on an "AS IS" +basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE +SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS +AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY +DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +5. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +6. This License Agreement shall be governed by and interpreted in all +respects by the law of the State of California, excluding conflict of +law provisions. Nothing in this License Agreement shall be deemed to +create any relationship of agency, partnership, or joint venture +between BeOpen and Licensee. This License Agreement does not grant +permission to use BeOpen trademarks or trade names in a trademark +sense to endorse or promote products or services of Licensee, or any +third party. As an exception, the "BeOpen Python" logos available at +http://www.pythonlabs.com/logos.html may be used according to the +permissions granted on that web page. + +7. By copying, installing or otherwise using the software, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 +--------------------------------------- + +1. This LICENSE AGREEMENT is between the Corporation for National +Research Initiatives, having an office at 1895 Preston White Drive, +Reston, VA 20191 ("CNRI"), and the Individual or Organization +("Licensee") accessing and otherwise using Python 1.6.1 software in +source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, CNRI +hereby grants Licensee a nonexclusive, royalty-free, world-wide +license to reproduce, analyze, test, perform and/or display publicly, +prepare derivative works, distribute, and otherwise use Python 1.6.1 +alone or in any derivative version, provided, however, that CNRI's +License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) +1995-2001 Corporation for National Research Initiatives; All Rights +Reserved" are retained in Python 1.6.1 alone or in any derivative +version prepared by Licensee. Alternately, in lieu of CNRI's License +Agreement, Licensee may substitute the following text (omitting the +quotes): "Python 1.6.1 is made available subject to the terms and +conditions in CNRI's License Agreement. This Agreement together with +Python 1.6.1 may be located on the Internet using the following +unique, persistent identifier (known as a handle): 1895.22/1013. This +Agreement may also be obtained from a proxy server on the Internet +using the following URL: http://hdl.handle.net/1895.22/1013". + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python 1.6.1 or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python 1.6.1. + +4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" +basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. This License Agreement shall be governed by the federal +intellectual property law of the United States, including without +limitation the federal copyright law, and, to the extent such +U.S. federal law does not apply, by the law of the Commonwealth of +Virginia, excluding Virginia's conflict of law provisions. +Notwithstanding the foregoing, with regard to derivative works based +on Python 1.6.1 that incorporate non-separable material that was +previously distributed under the GNU General Public License (GPL), the +law of the Commonwealth of Virginia shall govern this License +Agreement only as to issues arising under or with respect to +Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this +License Agreement shall be deemed to create any relationship of +agency, partnership, or joint venture between CNRI and Licensee. This +License Agreement does not grant permission to use CNRI trademarks or +trade name in a trademark sense to endorse or promote products or +services of Licensee, or any third party. + +8. By clicking on the "ACCEPT" button where indicated, or by copying, +installing or otherwise using Python 1.6.1, Licensee agrees to be +bound by the terms and conditions of this License Agreement. + + ACCEPT + + +CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 +-------------------------------------------------- + +Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, +The Netherlands. All rights reserved. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of Stichting Mathematisch +Centrum or CWI not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior +permission. + +STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO +THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE +FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/pipenv/vendor/distlib/__init__.py b/pipenv/vendor/distlib/__init__.py new file mode 100644 index 0000000000..d4aab453ae --- /dev/null +++ b/pipenv/vendor/distlib/__init__.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2017 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import logging + +__version__ = '0.2.7' + +class DistlibException(Exception): + pass + +try: + from logging import NullHandler +except ImportError: # pragma: no cover + class NullHandler(logging.Handler): + def handle(self, record): pass + def emit(self, record): pass + def createLock(self): self.lock = None + +logger = logging.getLogger(__name__) +logger.addHandler(NullHandler()) diff --git a/pipenv/vendor/distlib/_backport/__init__.py b/pipenv/vendor/distlib/_backport/__init__.py new file mode 100644 index 0000000000..f7dbf4c9aa --- /dev/null +++ b/pipenv/vendor/distlib/_backport/__init__.py @@ -0,0 +1,6 @@ +"""Modules copied from Python 3 standard libraries, for internal use only. + +Individual classes and functions are found in d2._backport.misc. Intended +usage is to always import things missing from 3.1 from that module: the +built-in/stdlib objects will be used if found. +""" diff --git a/pipenv/vendor/distlib/_backport/misc.py b/pipenv/vendor/distlib/_backport/misc.py new file mode 100644 index 0000000000..cfb318d34f --- /dev/null +++ b/pipenv/vendor/distlib/_backport/misc.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Backports for individual classes and functions.""" + +import os +import sys + +__all__ = ['cache_from_source', 'callable', 'fsencode'] + + +try: + from imp import cache_from_source +except ImportError: + def cache_from_source(py_file, debug=__debug__): + ext = debug and 'c' or 'o' + return py_file + ext + + +try: + callable = callable +except NameError: + from collections import Callable + + def callable(obj): + return isinstance(obj, Callable) + + +try: + fsencode = os.fsencode +except AttributeError: + def fsencode(filename): + if isinstance(filename, bytes): + return filename + elif isinstance(filename, str): + return filename.encode(sys.getfilesystemencoding()) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) diff --git a/pipenv/vendor/distlib/_backport/shutil.py b/pipenv/vendor/distlib/_backport/shutil.py new file mode 100644 index 0000000000..159e49ee8c --- /dev/null +++ b/pipenv/vendor/distlib/_backport/shutil.py @@ -0,0 +1,761 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Utility functions for copying and archiving files and directory trees. + +XXX The functions here don't copy the resource fork or other metadata on Mac. + +""" + +import os +import sys +import stat +from os.path import abspath +import fnmatch +import collections +import errno +from . import tarfile + +try: + import bz2 + _BZ2_SUPPORTED = True +except ImportError: + _BZ2_SUPPORTED = False + +try: + from pwd import getpwnam +except ImportError: + getpwnam = None + +try: + from grp import getgrnam +except ImportError: + getgrnam = None + +__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2", + "copytree", "move", "rmtree", "Error", "SpecialFileError", + "ExecError", "make_archive", "get_archive_formats", + "register_archive_format", "unregister_archive_format", + "get_unpack_formats", "register_unpack_format", + "unregister_unpack_format", "unpack_archive", "ignore_patterns"] + +class Error(EnvironmentError): + pass + +class SpecialFileError(EnvironmentError): + """Raised when trying to do a kind of operation (e.g. copying) which is + not supported on a special file (e.g. a named pipe)""" + +class ExecError(EnvironmentError): + """Raised when a command could not be executed""" + +class ReadError(EnvironmentError): + """Raised when an archive cannot be read""" + +class RegistryError(Exception): + """Raised when a registry operation with the archiving + and unpacking registries fails""" + + +try: + WindowsError +except NameError: + WindowsError = None + +def copyfileobj(fsrc, fdst, length=16*1024): + """copy data from file-like object fsrc to file-like object fdst""" + while 1: + buf = fsrc.read(length) + if not buf: + break + fdst.write(buf) + +def _samefile(src, dst): + # Macintosh, Unix. + if hasattr(os.path, 'samefile'): + try: + return os.path.samefile(src, dst) + except OSError: + return False + + # All other platforms: check for same pathname. + return (os.path.normcase(os.path.abspath(src)) == + os.path.normcase(os.path.abspath(dst))) + +def copyfile(src, dst): + """Copy data from src to dst""" + if _samefile(src, dst): + raise Error("`%s` and `%s` are the same file" % (src, dst)) + + for fn in [src, dst]: + try: + st = os.stat(fn) + except OSError: + # File most likely does not exist + pass + else: + # XXX What about other special files? (sockets, devices...) + if stat.S_ISFIFO(st.st_mode): + raise SpecialFileError("`%s` is a named pipe" % fn) + + with open(src, 'rb') as fsrc: + with open(dst, 'wb') as fdst: + copyfileobj(fsrc, fdst) + +def copymode(src, dst): + """Copy mode bits from src to dst""" + if hasattr(os, 'chmod'): + st = os.stat(src) + mode = stat.S_IMODE(st.st_mode) + os.chmod(dst, mode) + +def copystat(src, dst): + """Copy all stat info (mode bits, atime, mtime, flags) from src to dst""" + st = os.stat(src) + mode = stat.S_IMODE(st.st_mode) + if hasattr(os, 'utime'): + os.utime(dst, (st.st_atime, st.st_mtime)) + if hasattr(os, 'chmod'): + os.chmod(dst, mode) + if hasattr(os, 'chflags') and hasattr(st, 'st_flags'): + try: + os.chflags(dst, st.st_flags) + except OSError as why: + if (not hasattr(errno, 'EOPNOTSUPP') or + why.errno != errno.EOPNOTSUPP): + raise + +def copy(src, dst): + """Copy data and mode bits ("cp src dst"). + + The destination may be a directory. + + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + copyfile(src, dst) + copymode(src, dst) + +def copy2(src, dst): + """Copy data and all stat info ("cp -p src dst"). + + The destination may be a directory. + + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + copyfile(src, dst) + copystat(src, dst) + +def ignore_patterns(*patterns): + """Function that can be used as copytree() ignore parameter. + + Patterns is a sequence of glob-style patterns + that are used to exclude files""" + def _ignore_patterns(path, names): + ignored_names = [] + for pattern in patterns: + ignored_names.extend(fnmatch.filter(names, pattern)) + return set(ignored_names) + return _ignore_patterns + +def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, + ignore_dangling_symlinks=False): + """Recursively copy a directory tree. + + The destination directory must not already exist. + If exception(s) occur, an Error is raised with a list of reasons. + + If the optional symlinks flag is true, symbolic links in the + source tree result in symbolic links in the destination tree; if + it is false, the contents of the files pointed to by symbolic + links are copied. If the file pointed by the symlink doesn't + exist, an exception will be added in the list of errors raised in + an Error exception at the end of the copy process. + + You can set the optional ignore_dangling_symlinks flag to true if you + want to silence this exception. Notice that this has no effect on + platforms that don't support os.symlink. + + The optional ignore argument is a callable. If given, it + is called with the `src` parameter, which is the directory + being visited by copytree(), and `names` which is the list of + `src` contents, as returned by os.listdir(): + + callable(src, names) -> ignored_names + + Since copytree() is called recursively, the callable will be + called once for each directory that is copied. It returns a + list of names relative to the `src` directory that should + not be copied. + + The optional copy_function argument is a callable that will be used + to copy each file. It will be called with the source path and the + destination path as arguments. By default, copy2() is used, but any + function that supports the same signature (like copy()) can be used. + + """ + names = os.listdir(src) + if ignore is not None: + ignored_names = ignore(src, names) + else: + ignored_names = set() + + os.makedirs(dst) + errors = [] + for name in names: + if name in ignored_names: + continue + srcname = os.path.join(src, name) + dstname = os.path.join(dst, name) + try: + if os.path.islink(srcname): + linkto = os.readlink(srcname) + if symlinks: + os.symlink(linkto, dstname) + else: + # ignore dangling symlink if the flag is on + if not os.path.exists(linkto) and ignore_dangling_symlinks: + continue + # otherwise let the copy occurs. copy2 will raise an error + copy_function(srcname, dstname) + elif os.path.isdir(srcname): + copytree(srcname, dstname, symlinks, ignore, copy_function) + else: + # Will raise a SpecialFileError for unsupported file types + copy_function(srcname, dstname) + # catch the Error from the recursive copytree so that we can + # continue with other files + except Error as err: + errors.extend(err.args[0]) + except EnvironmentError as why: + errors.append((srcname, dstname, str(why))) + try: + copystat(src, dst) + except OSError as why: + if WindowsError is not None and isinstance(why, WindowsError): + # Copying file access times may fail on Windows + pass + else: + errors.extend((src, dst, str(why))) + if errors: + raise Error(errors) + +def rmtree(path, ignore_errors=False, onerror=None): + """Recursively delete a directory tree. + + If ignore_errors is set, errors are ignored; otherwise, if onerror + is set, it is called to handle the error with arguments (func, + path, exc_info) where func is os.listdir, os.remove, or os.rmdir; + path is the argument to that function that caused it to fail; and + exc_info is a tuple returned by sys.exc_info(). If ignore_errors + is false and onerror is None, an exception is raised. + + """ + if ignore_errors: + def onerror(*args): + pass + elif onerror is None: + def onerror(*args): + raise + try: + if os.path.islink(path): + # symlinks to directories are forbidden, see bug #1669 + raise OSError("Cannot call rmtree on a symbolic link") + except OSError: + onerror(os.path.islink, path, sys.exc_info()) + # can't continue even if onerror hook returns + return + names = [] + try: + names = os.listdir(path) + except os.error: + onerror(os.listdir, path, sys.exc_info()) + for name in names: + fullname = os.path.join(path, name) + try: + mode = os.lstat(fullname).st_mode + except os.error: + mode = 0 + if stat.S_ISDIR(mode): + rmtree(fullname, ignore_errors, onerror) + else: + try: + os.remove(fullname) + except os.error: + onerror(os.remove, fullname, sys.exc_info()) + try: + os.rmdir(path) + except os.error: + onerror(os.rmdir, path, sys.exc_info()) + + +def _basename(path): + # A basename() variant which first strips the trailing slash, if present. + # Thus we always get the last component of the path, even for directories. + return os.path.basename(path.rstrip(os.path.sep)) + +def move(src, dst): + """Recursively move a file or directory to another location. This is + similar to the Unix "mv" command. + + If the destination is a directory or a symlink to a directory, the source + is moved inside the directory. The destination path must not already + exist. + + If the destination already exists but is not a directory, it may be + overwritten depending on os.rename() semantics. + + If the destination is on our current filesystem, then rename() is used. + Otherwise, src is copied to the destination and then removed. + A lot more could be done here... A look at a mv.c shows a lot of + the issues this implementation glosses over. + + """ + real_dst = dst + if os.path.isdir(dst): + if _samefile(src, dst): + # We might be on a case insensitive filesystem, + # perform the rename anyway. + os.rename(src, dst) + return + + real_dst = os.path.join(dst, _basename(src)) + if os.path.exists(real_dst): + raise Error("Destination path '%s' already exists" % real_dst) + try: + os.rename(src, real_dst) + except OSError: + if os.path.isdir(src): + if _destinsrc(src, dst): + raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst)) + copytree(src, real_dst, symlinks=True) + rmtree(src) + else: + copy2(src, real_dst) + os.unlink(src) + +def _destinsrc(src, dst): + src = abspath(src) + dst = abspath(dst) + if not src.endswith(os.path.sep): + src += os.path.sep + if not dst.endswith(os.path.sep): + dst += os.path.sep + return dst.startswith(src) + +def _get_gid(name): + """Returns a gid, given a group name.""" + if getgrnam is None or name is None: + return None + try: + result = getgrnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _get_uid(name): + """Returns an uid, given a user name.""" + if getpwnam is None or name is None: + return None + try: + result = getpwnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, + owner=None, group=None, logger=None): + """Create a (possibly compressed) tar file from all the files under + 'base_dir'. + + 'compress' must be "gzip" (the default), "bzip2", or None. + + 'owner' and 'group' can be used to define an owner and a group for the + archive that is being built. If not provided, the current owner and group + will be used. + + The output tar file will be named 'base_name' + ".tar", possibly plus + the appropriate compression extension (".gz", or ".bz2"). + + Returns the output filename. + """ + tar_compression = {'gzip': 'gz', None: ''} + compress_ext = {'gzip': '.gz'} + + if _BZ2_SUPPORTED: + tar_compression['bzip2'] = 'bz2' + compress_ext['bzip2'] = '.bz2' + + # flags for compression program, each element of list will be an argument + if compress is not None and compress not in compress_ext: + raise ValueError("bad value for 'compress', or compression format not " + "supported : {0}".format(compress)) + + archive_name = base_name + '.tar' + compress_ext.get(compress, '') + archive_dir = os.path.dirname(archive_name) + + if not os.path.exists(archive_dir): + if logger is not None: + logger.info("creating %s", archive_dir) + if not dry_run: + os.makedirs(archive_dir) + + # creating the tarball + if logger is not None: + logger.info('Creating tar archive') + + uid = _get_uid(owner) + gid = _get_gid(group) + + def _set_uid_gid(tarinfo): + if gid is not None: + tarinfo.gid = gid + tarinfo.gname = group + if uid is not None: + tarinfo.uid = uid + tarinfo.uname = owner + return tarinfo + + if not dry_run: + tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress]) + try: + tar.add(base_dir, filter=_set_uid_gid) + finally: + tar.close() + + return archive_name + +def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False): + # XXX see if we want to keep an external call here + if verbose: + zipoptions = "-r" + else: + zipoptions = "-rq" + from distutils.errors import DistutilsExecError + from distutils.spawn import spawn + try: + spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run) + except DistutilsExecError: + # XXX really should distinguish between "couldn't find + # external 'zip' command" and "zip failed". + raise ExecError("unable to create zip file '%s': " + "could neither import the 'zipfile' module nor " + "find a standalone zip utility") % zip_filename + +def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): + """Create a zip file from all the files under 'base_dir'. + + The output zip file will be named 'base_name' + ".zip". Uses either the + "zipfile" Python module (if available) or the InfoZIP "zip" utility + (if installed and found on the default search path). If neither tool is + available, raises ExecError. Returns the name of the output zip + file. + """ + zip_filename = base_name + ".zip" + archive_dir = os.path.dirname(base_name) + + if not os.path.exists(archive_dir): + if logger is not None: + logger.info("creating %s", archive_dir) + if not dry_run: + os.makedirs(archive_dir) + + # If zipfile module is not available, try spawning an external 'zip' + # command. + try: + import zipfile + except ImportError: + zipfile = None + + if zipfile is None: + _call_external_zip(base_dir, zip_filename, verbose, dry_run) + else: + if logger is not None: + logger.info("creating '%s' and adding '%s' to it", + zip_filename, base_dir) + + if not dry_run: + zip = zipfile.ZipFile(zip_filename, "w", + compression=zipfile.ZIP_DEFLATED) + + for dirpath, dirnames, filenames in os.walk(base_dir): + for name in filenames: + path = os.path.normpath(os.path.join(dirpath, name)) + if os.path.isfile(path): + zip.write(path, path) + if logger is not None: + logger.info("adding '%s'", path) + zip.close() + + return zip_filename + +_ARCHIVE_FORMATS = { + 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), + 'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), + 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"), + 'zip': (_make_zipfile, [], "ZIP file"), + } + +if _BZ2_SUPPORTED: + _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], + "bzip2'ed tar-file") + +def get_archive_formats(): + """Returns a list of supported formats for archiving and unarchiving. + + Each element of the returned sequence is a tuple (name, description) + """ + formats = [(name, registry[2]) for name, registry in + _ARCHIVE_FORMATS.items()] + formats.sort() + return formats + +def register_archive_format(name, function, extra_args=None, description=''): + """Registers an archive format. + + name is the name of the format. function is the callable that will be + used to create archives. If provided, extra_args is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_archive_formats() function. + """ + if extra_args is None: + extra_args = [] + if not isinstance(function, collections.Callable): + raise TypeError('The %s object is not callable' % function) + if not isinstance(extra_args, (tuple, list)): + raise TypeError('extra_args needs to be a sequence') + for element in extra_args: + if not isinstance(element, (tuple, list)) or len(element) !=2: + raise TypeError('extra_args elements are : (arg_name, value)') + + _ARCHIVE_FORMATS[name] = (function, extra_args, description) + +def unregister_archive_format(name): + del _ARCHIVE_FORMATS[name] + +def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, + dry_run=0, owner=None, group=None, logger=None): + """Create an archive file (eg. zip or tar). + + 'base_name' is the name of the file to create, minus any format-specific + extension; 'format' is the archive format: one of "zip", "tar", "bztar" + or "gztar". + + 'root_dir' is a directory that will be the root directory of the + archive; ie. we typically chdir into 'root_dir' before creating the + archive. 'base_dir' is the directory where we start archiving from; + ie. 'base_dir' will be the common prefix of all files and + directories in the archive. 'root_dir' and 'base_dir' both default + to the current directory. Returns the name of the archive file. + + 'owner' and 'group' are used when creating a tar archive. By default, + uses the current owner and group. + """ + save_cwd = os.getcwd() + if root_dir is not None: + if logger is not None: + logger.debug("changing into '%s'", root_dir) + base_name = os.path.abspath(base_name) + if not dry_run: + os.chdir(root_dir) + + if base_dir is None: + base_dir = os.curdir + + kwargs = {'dry_run': dry_run, 'logger': logger} + + try: + format_info = _ARCHIVE_FORMATS[format] + except KeyError: + raise ValueError("unknown archive format '%s'" % format) + + func = format_info[0] + for arg, val in format_info[1]: + kwargs[arg] = val + + if format != 'zip': + kwargs['owner'] = owner + kwargs['group'] = group + + try: + filename = func(base_name, base_dir, **kwargs) + finally: + if root_dir is not None: + if logger is not None: + logger.debug("changing back to '%s'", save_cwd) + os.chdir(save_cwd) + + return filename + + +def get_unpack_formats(): + """Returns a list of supported formats for unpacking. + + Each element of the returned sequence is a tuple + (name, extensions, description) + """ + formats = [(name, info[0], info[3]) for name, info in + _UNPACK_FORMATS.items()] + formats.sort() + return formats + +def _check_unpack_options(extensions, function, extra_args): + """Checks what gets registered as an unpacker.""" + # first make sure no other unpacker is registered for this extension + existing_extensions = {} + for name, info in _UNPACK_FORMATS.items(): + for ext in info[0]: + existing_extensions[ext] = name + + for extension in extensions: + if extension in existing_extensions: + msg = '%s is already registered for "%s"' + raise RegistryError(msg % (extension, + existing_extensions[extension])) + + if not isinstance(function, collections.Callable): + raise TypeError('The registered function must be a callable') + + +def register_unpack_format(name, extensions, function, extra_args=None, + description=''): + """Registers an unpack format. + + `name` is the name of the format. `extensions` is a list of extensions + corresponding to the format. + + `function` is the callable that will be + used to unpack archives. The callable will receive archives to unpack. + If it's unable to handle an archive, it needs to raise a ReadError + exception. + + If provided, `extra_args` is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_unpack_formats() function. + """ + if extra_args is None: + extra_args = [] + _check_unpack_options(extensions, function, extra_args) + _UNPACK_FORMATS[name] = extensions, function, extra_args, description + +def unregister_unpack_format(name): + """Removes the pack format from the registry.""" + del _UNPACK_FORMATS[name] + +def _ensure_directory(path): + """Ensure that the parent directory of `path` exists""" + dirname = os.path.dirname(path) + if not os.path.isdir(dirname): + os.makedirs(dirname) + +def _unpack_zipfile(filename, extract_dir): + """Unpack zip `filename` to `extract_dir` + """ + try: + import zipfile + except ImportError: + raise ReadError('zlib not supported, cannot unpack this archive.') + + if not zipfile.is_zipfile(filename): + raise ReadError("%s is not a zip file" % filename) + + zip = zipfile.ZipFile(filename) + try: + for info in zip.infolist(): + name = info.filename + + # don't extract absolute paths or ones with .. in them + if name.startswith('/') or '..' in name: + continue + + target = os.path.join(extract_dir, *name.split('/')) + if not target: + continue + + _ensure_directory(target) + if not name.endswith('/'): + # file + data = zip.read(info.filename) + f = open(target, 'wb') + try: + f.write(data) + finally: + f.close() + del data + finally: + zip.close() + +def _unpack_tarfile(filename, extract_dir): + """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` + """ + try: + tarobj = tarfile.open(filename) + except tarfile.TarError: + raise ReadError( + "%s is not a compressed or uncompressed tar file" % filename) + try: + tarobj.extractall(extract_dir) + finally: + tarobj.close() + +_UNPACK_FORMATS = { + 'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"), + 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"), + 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file") + } + +if _BZ2_SUPPORTED: + _UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [], + "bzip2'ed tar-file") + +def _find_unpack_format(filename): + for name, info in _UNPACK_FORMATS.items(): + for extension in info[0]: + if filename.endswith(extension): + return name + return None + +def unpack_archive(filename, extract_dir=None, format=None): + """Unpack an archive. + + `filename` is the name of the archive. + + `extract_dir` is the name of the target directory, where the archive + is unpacked. If not provided, the current working directory is used. + + `format` is the archive format: one of "zip", "tar", or "gztar". Or any + other registered format. If not provided, unpack_archive will use the + filename extension and see if an unpacker was registered for that + extension. + + In case none is found, a ValueError is raised. + """ + if extract_dir is None: + extract_dir = os.getcwd() + + if format is not None: + try: + format_info = _UNPACK_FORMATS[format] + except KeyError: + raise ValueError("Unknown unpack format '{0}'".format(format)) + + func = format_info[1] + func(filename, extract_dir, **dict(format_info[2])) + else: + # we need to look at the registered unpackers supported extensions + format = _find_unpack_format(filename) + if format is None: + raise ReadError("Unknown archive format '{0}'".format(filename)) + + func = _UNPACK_FORMATS[format][1] + kwargs = dict(_UNPACK_FORMATS[format][2]) + func(filename, extract_dir, **kwargs) diff --git a/pipenv/vendor/distlib/_backport/sysconfig.cfg b/pipenv/vendor/distlib/_backport/sysconfig.cfg new file mode 100644 index 0000000000..1746bd01c1 --- /dev/null +++ b/pipenv/vendor/distlib/_backport/sysconfig.cfg @@ -0,0 +1,84 @@ +[posix_prefix] +# Configuration directories. Some of these come straight out of the +# configure script. They are for implementing the other variables, not to +# be used directly in [resource_locations]. +confdir = /etc +datadir = /usr/share +libdir = /usr/lib +statedir = /var +# User resource directory +local = ~/.local/{distribution.name} + +stdlib = {base}/lib/python{py_version_short} +platstdlib = {platbase}/lib/python{py_version_short} +purelib = {base}/lib/python{py_version_short}/site-packages +platlib = {platbase}/lib/python{py_version_short}/site-packages +include = {base}/include/python{py_version_short}{abiflags} +platinclude = {platbase}/include/python{py_version_short}{abiflags} +data = {base} + +[posix_home] +stdlib = {base}/lib/python +platstdlib = {base}/lib/python +purelib = {base}/lib/python +platlib = {base}/lib/python +include = {base}/include/python +platinclude = {base}/include/python +scripts = {base}/bin +data = {base} + +[nt] +stdlib = {base}/Lib +platstdlib = {base}/Lib +purelib = {base}/Lib/site-packages +platlib = {base}/Lib/site-packages +include = {base}/Include +platinclude = {base}/Include +scripts = {base}/Scripts +data = {base} + +[os2] +stdlib = {base}/Lib +platstdlib = {base}/Lib +purelib = {base}/Lib/site-packages +platlib = {base}/Lib/site-packages +include = {base}/Include +platinclude = {base}/Include +scripts = {base}/Scripts +data = {base} + +[os2_home] +stdlib = {userbase}/lib/python{py_version_short} +platstdlib = {userbase}/lib/python{py_version_short} +purelib = {userbase}/lib/python{py_version_short}/site-packages +platlib = {userbase}/lib/python{py_version_short}/site-packages +include = {userbase}/include/python{py_version_short} +scripts = {userbase}/bin +data = {userbase} + +[nt_user] +stdlib = {userbase}/Python{py_version_nodot} +platstdlib = {userbase}/Python{py_version_nodot} +purelib = {userbase}/Python{py_version_nodot}/site-packages +platlib = {userbase}/Python{py_version_nodot}/site-packages +include = {userbase}/Python{py_version_nodot}/Include +scripts = {userbase}/Scripts +data = {userbase} + +[posix_user] +stdlib = {userbase}/lib/python{py_version_short} +platstdlib = {userbase}/lib/python{py_version_short} +purelib = {userbase}/lib/python{py_version_short}/site-packages +platlib = {userbase}/lib/python{py_version_short}/site-packages +include = {userbase}/include/python{py_version_short} +scripts = {userbase}/bin +data = {userbase} + +[osx_framework_user] +stdlib = {userbase}/lib/python +platstdlib = {userbase}/lib/python +purelib = {userbase}/lib/python/site-packages +platlib = {userbase}/lib/python/site-packages +include = {userbase}/include +scripts = {userbase}/bin +data = {userbase} diff --git a/pipenv/vendor/distlib/_backport/sysconfig.py b/pipenv/vendor/distlib/_backport/sysconfig.py new file mode 100644 index 0000000000..1df3aba144 --- /dev/null +++ b/pipenv/vendor/distlib/_backport/sysconfig.py @@ -0,0 +1,788 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Access to Python's configuration information.""" + +import codecs +import os +import re +import sys +from os.path import pardir, realpath +try: + import configparser +except ImportError: + import ConfigParser as configparser + + +__all__ = [ + 'get_config_h_filename', + 'get_config_var', + 'get_config_vars', + 'get_makefile_filename', + 'get_path', + 'get_path_names', + 'get_paths', + 'get_platform', + 'get_python_version', + 'get_scheme_names', + 'parse_config_h', +] + + +def _safe_realpath(path): + try: + return realpath(path) + except OSError: + return path + + +if sys.executable: + _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable)) +else: + # sys.executable can be empty if argv[0] has been changed and Python is + # unable to retrieve the real program name + _PROJECT_BASE = _safe_realpath(os.getcwd()) + +if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower(): + _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir)) +# PC/VS7.1 +if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower(): + _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) +# PC/AMD64 +if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower(): + _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) + + +def is_python_build(): + for fn in ("Setup.dist", "Setup.local"): + if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)): + return True + return False + +_PYTHON_BUILD = is_python_build() + +_cfg_read = False + +def _ensure_cfg_read(): + global _cfg_read + if not _cfg_read: + from ..resources import finder + backport_package = __name__.rsplit('.', 1)[0] + _finder = finder(backport_package) + _cfgfile = _finder.find('sysconfig.cfg') + assert _cfgfile, 'sysconfig.cfg exists' + with _cfgfile.as_stream() as s: + _SCHEMES.readfp(s) + if _PYTHON_BUILD: + for scheme in ('posix_prefix', 'posix_home'): + _SCHEMES.set(scheme, 'include', '{srcdir}/Include') + _SCHEMES.set(scheme, 'platinclude', '{projectbase}/.') + + _cfg_read = True + + +_SCHEMES = configparser.RawConfigParser() +_VAR_REPL = re.compile(r'\{([^{]*?)\}') + +def _expand_globals(config): + _ensure_cfg_read() + if config.has_section('globals'): + globals = config.items('globals') + else: + globals = tuple() + + sections = config.sections() + for section in sections: + if section == 'globals': + continue + for option, value in globals: + if config.has_option(section, option): + continue + config.set(section, option, value) + config.remove_section('globals') + + # now expanding local variables defined in the cfg file + # + for section in config.sections(): + variables = dict(config.items(section)) + + def _replacer(matchobj): + name = matchobj.group(1) + if name in variables: + return variables[name] + return matchobj.group(0) + + for option, value in config.items(section): + config.set(section, option, _VAR_REPL.sub(_replacer, value)) + +#_expand_globals(_SCHEMES) + + # FIXME don't rely on sys.version here, its format is an implementation detail + # of CPython, use sys.version_info or sys.hexversion +_PY_VERSION = sys.version.split()[0] +_PY_VERSION_SHORT = sys.version[:3] +_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2] +_PREFIX = os.path.normpath(sys.prefix) +_EXEC_PREFIX = os.path.normpath(sys.exec_prefix) +_CONFIG_VARS = None +_USER_BASE = None + + +def _subst_vars(path, local_vars): + """In the string `path`, replace tokens like {some.thing} with the + corresponding value from the map `local_vars`. + + If there is no corresponding value, leave the token unchanged. + """ + def _replacer(matchobj): + name = matchobj.group(1) + if name in local_vars: + return local_vars[name] + elif name in os.environ: + return os.environ[name] + return matchobj.group(0) + return _VAR_REPL.sub(_replacer, path) + + +def _extend_dict(target_dict, other_dict): + target_keys = target_dict.keys() + for key, value in other_dict.items(): + if key in target_keys: + continue + target_dict[key] = value + + +def _expand_vars(scheme, vars): + res = {} + if vars is None: + vars = {} + _extend_dict(vars, get_config_vars()) + + for key, value in _SCHEMES.items(scheme): + if os.name in ('posix', 'nt'): + value = os.path.expanduser(value) + res[key] = os.path.normpath(_subst_vars(value, vars)) + return res + + +def format_value(value, vars): + def _replacer(matchobj): + name = matchobj.group(1) + if name in vars: + return vars[name] + return matchobj.group(0) + return _VAR_REPL.sub(_replacer, value) + + +def _get_default_scheme(): + if os.name == 'posix': + # the default scheme for posix is posix_prefix + return 'posix_prefix' + return os.name + + +def _getuserbase(): + env_base = os.environ.get("PYTHONUSERBASE", None) + + def joinuser(*args): + return os.path.expanduser(os.path.join(*args)) + + # what about 'os2emx', 'riscos' ? + if os.name == "nt": + base = os.environ.get("APPDATA") or "~" + if env_base: + return env_base + else: + return joinuser(base, "Python") + + if sys.platform == "darwin": + framework = get_config_var("PYTHONFRAMEWORK") + if framework: + if env_base: + return env_base + else: + return joinuser("~", "Library", framework, "%d.%d" % + sys.version_info[:2]) + + if env_base: + return env_base + else: + return joinuser("~", ".local") + + +def _parse_makefile(filename, vars=None): + """Parse a Makefile-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + """ + # Regexes needed for parsing Makefile (and similar syntaxes, + # like old-style Setup files). + _variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") + _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") + _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") + + if vars is None: + vars = {} + done = {} + notdone = {} + + with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f: + lines = f.readlines() + + for line in lines: + if line.startswith('#') or line.strip() == '': + continue + m = _variable_rx.match(line) + if m: + n, v = m.group(1, 2) + v = v.strip() + # `$$' is a literal `$' in make + tmpv = v.replace('$$', '') + + if "$" in tmpv: + notdone[n] = v + else: + try: + v = int(v) + except ValueError: + # insert literal `$' + done[n] = v.replace('$$', '$') + else: + done[n] = v + + # do variable interpolation here + variables = list(notdone.keys()) + + # Variables with a 'PY_' prefix in the makefile. These need to + # be made available without that prefix through sysconfig. + # Special care is needed to ensure that variable expansion works, even + # if the expansion uses the name without a prefix. + renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') + + while len(variables) > 0: + for name in tuple(variables): + value = notdone[name] + m = _findvar1_rx.search(value) or _findvar2_rx.search(value) + if m is not None: + n = m.group(1) + found = True + if n in done: + item = str(done[n]) + elif n in notdone: + # get it on a subsequent round + found = False + elif n in os.environ: + # do it like make: fall back to environment + item = os.environ[n] + + elif n in renamed_variables: + if (name.startswith('PY_') and + name[3:] in renamed_variables): + item = "" + + elif 'PY_' + n in notdone: + found = False + + else: + item = str(done['PY_' + n]) + + else: + done[n] = item = "" + + if found: + after = value[m.end():] + value = value[:m.start()] + item + after + if "$" in after: + notdone[name] = value + else: + try: + value = int(value) + except ValueError: + done[name] = value.strip() + else: + done[name] = value + variables.remove(name) + + if (name.startswith('PY_') and + name[3:] in renamed_variables): + + name = name[3:] + if name not in done: + done[name] = value + + else: + # bogus variable reference (e.g. "prefix=$/opt/python"); + # just drop it since we can't deal + done[name] = value + variables.remove(name) + + # strip spurious spaces + for k, v in done.items(): + if isinstance(v, str): + done[k] = v.strip() + + # save the results in the global dictionary + vars.update(done) + return vars + + +def get_makefile_filename(): + """Return the path of the Makefile.""" + if _PYTHON_BUILD: + return os.path.join(_PROJECT_BASE, "Makefile") + if hasattr(sys, 'abiflags'): + config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags) + else: + config_dir_name = 'config' + return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile') + + +def _init_posix(vars): + """Initialize the module as appropriate for POSIX systems.""" + # load the installed Makefile: + makefile = get_makefile_filename() + try: + _parse_makefile(makefile, vars) + except IOError as e: + msg = "invalid Python installation: unable to open %s" % makefile + if hasattr(e, "strerror"): + msg = msg + " (%s)" % e.strerror + raise IOError(msg) + # load the installed pyconfig.h: + config_h = get_config_h_filename() + try: + with open(config_h) as f: + parse_config_h(f, vars) + except IOError as e: + msg = "invalid Python installation: unable to open %s" % config_h + if hasattr(e, "strerror"): + msg = msg + " (%s)" % e.strerror + raise IOError(msg) + # On AIX, there are wrong paths to the linker scripts in the Makefile + # -- these paths are relative to the Python source, but when installed + # the scripts are in another directory. + if _PYTHON_BUILD: + vars['LDSHARED'] = vars['BLDSHARED'] + + +def _init_non_posix(vars): + """Initialize the module as appropriate for NT""" + # set basic install directories + vars['LIBDEST'] = get_path('stdlib') + vars['BINLIBDEST'] = get_path('platstdlib') + vars['INCLUDEPY'] = get_path('include') + vars['SO'] = '.pyd' + vars['EXE'] = '.exe' + vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT + vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) + +# +# public APIs +# + + +def parse_config_h(fp, vars=None): + """Parse a config.h-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + """ + if vars is None: + vars = {} + define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") + undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") + + while True: + line = fp.readline() + if not line: + break + m = define_rx.match(line) + if m: + n, v = m.group(1, 2) + try: + v = int(v) + except ValueError: + pass + vars[n] = v + else: + m = undef_rx.match(line) + if m: + vars[m.group(1)] = 0 + return vars + + +def get_config_h_filename(): + """Return the path of pyconfig.h.""" + if _PYTHON_BUILD: + if os.name == "nt": + inc_dir = os.path.join(_PROJECT_BASE, "PC") + else: + inc_dir = _PROJECT_BASE + else: + inc_dir = get_path('platinclude') + return os.path.join(inc_dir, 'pyconfig.h') + + +def get_scheme_names(): + """Return a tuple containing the schemes names.""" + return tuple(sorted(_SCHEMES.sections())) + + +def get_path_names(): + """Return a tuple containing the paths names.""" + # xxx see if we want a static list + return _SCHEMES.options('posix_prefix') + + +def get_paths(scheme=_get_default_scheme(), vars=None, expand=True): + """Return a mapping containing an install scheme. + + ``scheme`` is the install scheme name. If not provided, it will + return the default scheme for the current platform. + """ + _ensure_cfg_read() + if expand: + return _expand_vars(scheme, vars) + else: + return dict(_SCHEMES.items(scheme)) + + +def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True): + """Return a path corresponding to the scheme. + + ``scheme`` is the install scheme name. + """ + return get_paths(scheme, vars, expand)[name] + + +def get_config_vars(*args): + """With no arguments, return a dictionary of all configuration + variables relevant for the current platform. + + On Unix, this means every variable defined in Python's installed Makefile; + On Windows and Mac OS it's a much smaller set. + + With arguments, return a list of values that result from looking up + each argument in the configuration variable dictionary. + """ + global _CONFIG_VARS + if _CONFIG_VARS is None: + _CONFIG_VARS = {} + # Normalized versions of prefix and exec_prefix are handy to have; + # in fact, these are the standard versions used most places in the + # distutils2 module. + _CONFIG_VARS['prefix'] = _PREFIX + _CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX + _CONFIG_VARS['py_version'] = _PY_VERSION + _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT + _CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2] + _CONFIG_VARS['base'] = _PREFIX + _CONFIG_VARS['platbase'] = _EXEC_PREFIX + _CONFIG_VARS['projectbase'] = _PROJECT_BASE + try: + _CONFIG_VARS['abiflags'] = sys.abiflags + except AttributeError: + # sys.abiflags may not be defined on all platforms. + _CONFIG_VARS['abiflags'] = '' + + if os.name in ('nt', 'os2'): + _init_non_posix(_CONFIG_VARS) + if os.name == 'posix': + _init_posix(_CONFIG_VARS) + # Setting 'userbase' is done below the call to the + # init function to enable using 'get_config_var' in + # the init-function. + if sys.version >= '2.6': + _CONFIG_VARS['userbase'] = _getuserbase() + + if 'srcdir' not in _CONFIG_VARS: + _CONFIG_VARS['srcdir'] = _PROJECT_BASE + else: + _CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir']) + + # Convert srcdir into an absolute path if it appears necessary. + # Normally it is relative to the build directory. However, during + # testing, for example, we might be running a non-installed python + # from a different directory. + if _PYTHON_BUILD and os.name == "posix": + base = _PROJECT_BASE + try: + cwd = os.getcwd() + except OSError: + cwd = None + if (not os.path.isabs(_CONFIG_VARS['srcdir']) and + base != cwd): + # srcdir is relative and we are not in the same directory + # as the executable. Assume executable is in the build + # directory and make srcdir absolute. + srcdir = os.path.join(base, _CONFIG_VARS['srcdir']) + _CONFIG_VARS['srcdir'] = os.path.normpath(srcdir) + + if sys.platform == 'darwin': + kernel_version = os.uname()[2] # Kernel version (8.4.3) + major_version = int(kernel_version.split('.')[0]) + + if major_version < 8: + # On Mac OS X before 10.4, check if -arch and -isysroot + # are in CFLAGS or LDFLAGS and remove them if they are. + # This is needed when building extensions on a 10.3 system + # using a universal build of python. + for key in ('LDFLAGS', 'BASECFLAGS', + # a number of derived variables. These need to be + # patched up as well. + 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): + flags = _CONFIG_VARS[key] + flags = re.sub(r'-arch\s+\w+\s', ' ', flags) + flags = re.sub('-isysroot [^ \t]*', ' ', flags) + _CONFIG_VARS[key] = flags + else: + # Allow the user to override the architecture flags using + # an environment variable. + # NOTE: This name was introduced by Apple in OSX 10.5 and + # is used by several scripting languages distributed with + # that OS release. + if 'ARCHFLAGS' in os.environ: + arch = os.environ['ARCHFLAGS'] + for key in ('LDFLAGS', 'BASECFLAGS', + # a number of derived variables. These need to be + # patched up as well. + 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): + + flags = _CONFIG_VARS[key] + flags = re.sub(r'-arch\s+\w+\s', ' ', flags) + flags = flags + ' ' + arch + _CONFIG_VARS[key] = flags + + # If we're on OSX 10.5 or later and the user tries to + # compiles an extension using an SDK that is not present + # on the current machine it is better to not use an SDK + # than to fail. + # + # The major usecase for this is users using a Python.org + # binary installer on OSX 10.6: that installer uses + # the 10.4u SDK, but that SDK is not installed by default + # when you install Xcode. + # + CFLAGS = _CONFIG_VARS.get('CFLAGS', '') + m = re.search(r'-isysroot\s+(\S+)', CFLAGS) + if m is not None: + sdk = m.group(1) + if not os.path.exists(sdk): + for key in ('LDFLAGS', 'BASECFLAGS', + # a number of derived variables. These need to be + # patched up as well. + 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): + + flags = _CONFIG_VARS[key] + flags = re.sub(r'-isysroot\s+\S+(\s|$)', ' ', flags) + _CONFIG_VARS[key] = flags + + if args: + vals = [] + for name in args: + vals.append(_CONFIG_VARS.get(name)) + return vals + else: + return _CONFIG_VARS + + +def get_config_var(name): + """Return the value of a single variable using the dictionary returned by + 'get_config_vars()'. + + Equivalent to get_config_vars().get(name) + """ + return get_config_vars().get(name) + + +def get_platform(): + """Return a string that identifies the current platform. + + This is used mainly to distinguish platform-specific build directories and + platform-specific built distributions. Typically includes the OS name + and version and the architecture (as supplied by 'os.uname()'), + although the exact information included depends on the OS; eg. for IRIX + the architecture isn't particularly important (IRIX only runs on SGI + hardware), but for Linux the kernel version isn't particularly + important. + + Examples of returned values: + linux-i586 + linux-alpha (?) + solaris-2.6-sun4u + irix-5.3 + irix64-6.2 + + Windows will return one of: + win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) + win-ia64 (64bit Windows on Itanium) + win32 (all others - specifically, sys.platform is returned) + + For other non-POSIX platforms, currently just returns 'sys.platform'. + """ + if os.name == 'nt': + # sniff sys.version for architecture. + prefix = " bit (" + i = sys.version.find(prefix) + if i == -1: + return sys.platform + j = sys.version.find(")", i) + look = sys.version[i+len(prefix):j].lower() + if look == 'amd64': + return 'win-amd64' + if look == 'itanium': + return 'win-ia64' + return sys.platform + + if os.name != "posix" or not hasattr(os, 'uname'): + # XXX what about the architecture? NT is Intel or Alpha, + # Mac OS is M68k or PPC, etc. + return sys.platform + + # Try to distinguish various flavours of Unix + osname, host, release, version, machine = os.uname() + + # Convert the OS name to lowercase, remove '/' characters + # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh") + osname = osname.lower().replace('/', '') + machine = machine.replace(' ', '_') + machine = machine.replace('/', '-') + + if osname[:5] == "linux": + # At least on Linux/Intel, 'machine' is the processor -- + # i386, etc. + # XXX what about Alpha, SPARC, etc? + return "%s-%s" % (osname, machine) + elif osname[:5] == "sunos": + if release[0] >= "5": # SunOS 5 == Solaris 2 + osname = "solaris" + release = "%d.%s" % (int(release[0]) - 3, release[2:]) + # fall through to standard osname-release-machine representation + elif osname[:4] == "irix": # could be "irix64"! + return "%s-%s" % (osname, release) + elif osname[:3] == "aix": + return "%s-%s.%s" % (osname, version, release) + elif osname[:6] == "cygwin": + osname = "cygwin" + rel_re = re.compile(r'[\d.]+') + m = rel_re.match(release) + if m: + release = m.group() + elif osname[:6] == "darwin": + # + # For our purposes, we'll assume that the system version from + # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set + # to. This makes the compatibility story a bit more sane because the + # machine is going to compile and link as if it were + # MACOSX_DEPLOYMENT_TARGET. + cfgvars = get_config_vars() + macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') + + if True: + # Always calculate the release of the running machine, + # needed to determine if we can build fat binaries or not. + + macrelease = macver + # Get the system version. Reading this plist is a documented + # way to get the system version (see the documentation for + # the Gestalt Manager) + try: + f = open('/System/Library/CoreServices/SystemVersion.plist') + except IOError: + # We're on a plain darwin box, fall back to the default + # behaviour. + pass + else: + try: + m = re.search(r'ProductUserVisibleVersion\s*' + r'(.*?)', f.read()) + finally: + f.close() + if m is not None: + macrelease = '.'.join(m.group(1).split('.')[:2]) + # else: fall back to the default behaviour + + if not macver: + macver = macrelease + + if macver: + release = macver + osname = "macosx" + + if ((macrelease + '.') >= '10.4.' and + '-arch' in get_config_vars().get('CFLAGS', '').strip()): + # The universal build will build fat binaries, but not on + # systems before 10.4 + # + # Try to detect 4-way universal builds, those have machine-type + # 'universal' instead of 'fat'. + + machine = 'fat' + cflags = get_config_vars().get('CFLAGS') + + archs = re.findall(r'-arch\s+(\S+)', cflags) + archs = tuple(sorted(set(archs))) + + if len(archs) == 1: + machine = archs[0] + elif archs == ('i386', 'ppc'): + machine = 'fat' + elif archs == ('i386', 'x86_64'): + machine = 'intel' + elif archs == ('i386', 'ppc', 'x86_64'): + machine = 'fat3' + elif archs == ('ppc64', 'x86_64'): + machine = 'fat64' + elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): + machine = 'universal' + else: + raise ValueError( + "Don't know machine value for archs=%r" % (archs,)) + + elif machine == 'i386': + # On OSX the machine type returned by uname is always the + # 32-bit variant, even if the executable architecture is + # the 64-bit variant + if sys.maxsize >= 2**32: + machine = 'x86_64' + + elif machine in ('PowerPC', 'Power_Macintosh'): + # Pick a sane name for the PPC architecture. + # See 'i386' case + if sys.maxsize >= 2**32: + machine = 'ppc64' + else: + machine = 'ppc' + + return "%s-%s-%s" % (osname, release, machine) + + +def get_python_version(): + return _PY_VERSION_SHORT + + +def _print_dict(title, data): + for index, (key, value) in enumerate(sorted(data.items())): + if index == 0: + print('%s: ' % (title)) + print('\t%s = "%s"' % (key, value)) + + +def _main(): + """Display all information sysconfig detains.""" + print('Platform: "%s"' % get_platform()) + print('Python version: "%s"' % get_python_version()) + print('Current installation scheme: "%s"' % _get_default_scheme()) + print() + _print_dict('Paths', get_paths()) + print() + _print_dict('Variables', get_config_vars()) + + +if __name__ == '__main__': + _main() diff --git a/pipenv/vendor/distlib/_backport/tarfile.py b/pipenv/vendor/distlib/_backport/tarfile.py new file mode 100644 index 0000000000..d66d856637 --- /dev/null +++ b/pipenv/vendor/distlib/_backport/tarfile.py @@ -0,0 +1,2607 @@ +#------------------------------------------------------------------- +# tarfile.py +#------------------------------------------------------------------- +# Copyright (C) 2002 Lars Gustaebel +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +from __future__ import print_function + +"""Read from and write to tar format archives. +""" + +__version__ = "$Revision$" + +version = "0.9.0" +__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" +__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $" +__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $" +__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." + +#--------- +# Imports +#--------- +import sys +import os +import stat +import errno +import time +import struct +import copy +import re + +try: + import grp, pwd +except ImportError: + grp = pwd = None + +# os.symlink on Windows prior to 6.0 raises NotImplementedError +symlink_exception = (AttributeError, NotImplementedError) +try: + # WindowsError (1314) will be raised if the caller does not hold the + # SeCreateSymbolicLinkPrivilege privilege + symlink_exception += (WindowsError,) +except NameError: + pass + +# from tarfile import * +__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"] + +if sys.version_info[0] < 3: + import __builtin__ as builtins +else: + import builtins + +_open = builtins.open # Since 'open' is TarFile.open + +#--------------------------------------------------------- +# tar constants +#--------------------------------------------------------- +NUL = b"\0" # the null character +BLOCKSIZE = 512 # length of processing blocks +RECORDSIZE = BLOCKSIZE * 20 # length of records +GNU_MAGIC = b"ustar \0" # magic gnu tar string +POSIX_MAGIC = b"ustar\x0000" # magic posix tar string + +LENGTH_NAME = 100 # maximum length of a filename +LENGTH_LINK = 100 # maximum length of a linkname +LENGTH_PREFIX = 155 # maximum length of the prefix field + +REGTYPE = b"0" # regular file +AREGTYPE = b"\0" # regular file +LNKTYPE = b"1" # link (inside tarfile) +SYMTYPE = b"2" # symbolic link +CHRTYPE = b"3" # character special device +BLKTYPE = b"4" # block special device +DIRTYPE = b"5" # directory +FIFOTYPE = b"6" # fifo special device +CONTTYPE = b"7" # contiguous file + +GNUTYPE_LONGNAME = b"L" # GNU tar longname +GNUTYPE_LONGLINK = b"K" # GNU tar longlink +GNUTYPE_SPARSE = b"S" # GNU tar sparse file + +XHDTYPE = b"x" # POSIX.1-2001 extended header +XGLTYPE = b"g" # POSIX.1-2001 global header +SOLARIS_XHDTYPE = b"X" # Solaris extended header + +USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format +GNU_FORMAT = 1 # GNU tar format +PAX_FORMAT = 2 # POSIX.1-2001 (pax) format +DEFAULT_FORMAT = GNU_FORMAT + +#--------------------------------------------------------- +# tarfile constants +#--------------------------------------------------------- +# File types that tarfile supports: +SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, + SYMTYPE, DIRTYPE, FIFOTYPE, + CONTTYPE, CHRTYPE, BLKTYPE, + GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# File types that will be treated as a regular file. +REGULAR_TYPES = (REGTYPE, AREGTYPE, + CONTTYPE, GNUTYPE_SPARSE) + +# File types that are part of the GNU tar format. +GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# Fields from a pax header that override a TarInfo attribute. +PAX_FIELDS = ("path", "linkpath", "size", "mtime", + "uid", "gid", "uname", "gname") + +# Fields from a pax header that are affected by hdrcharset. +PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname")) + +# Fields in a pax header that are numbers, all other fields +# are treated as strings. +PAX_NUMBER_FIELDS = { + "atime": float, + "ctime": float, + "mtime": float, + "uid": int, + "gid": int, + "size": int +} + +#--------------------------------------------------------- +# Bits used in the mode field, values in octal. +#--------------------------------------------------------- +S_IFLNK = 0o120000 # symbolic link +S_IFREG = 0o100000 # regular file +S_IFBLK = 0o060000 # block device +S_IFDIR = 0o040000 # directory +S_IFCHR = 0o020000 # character device +S_IFIFO = 0o010000 # fifo + +TSUID = 0o4000 # set UID on execution +TSGID = 0o2000 # set GID on execution +TSVTX = 0o1000 # reserved + +TUREAD = 0o400 # read by owner +TUWRITE = 0o200 # write by owner +TUEXEC = 0o100 # execute/search by owner +TGREAD = 0o040 # read by group +TGWRITE = 0o020 # write by group +TGEXEC = 0o010 # execute/search by group +TOREAD = 0o004 # read by other +TOWRITE = 0o002 # write by other +TOEXEC = 0o001 # execute/search by other + +#--------------------------------------------------------- +# initialization +#--------------------------------------------------------- +if os.name in ("nt", "ce"): + ENCODING = "utf-8" +else: + ENCODING = sys.getfilesystemencoding() + +#--------------------------------------------------------- +# Some useful functions +#--------------------------------------------------------- + +def stn(s, length, encoding, errors): + """Convert a string to a null-terminated bytes object. + """ + s = s.encode(encoding, errors) + return s[:length] + (length - len(s)) * NUL + +def nts(s, encoding, errors): + """Convert a null-terminated bytes object to a string. + """ + p = s.find(b"\0") + if p != -1: + s = s[:p] + return s.decode(encoding, errors) + +def nti(s): + """Convert a number field to a python number. + """ + # There are two possible encodings for a number field, see + # itn() below. + if s[0] != chr(0o200): + try: + n = int(nts(s, "ascii", "strict") or "0", 8) + except ValueError: + raise InvalidHeaderError("invalid header") + else: + n = 0 + for i in range(len(s) - 1): + n <<= 8 + n += ord(s[i + 1]) + return n + +def itn(n, digits=8, format=DEFAULT_FORMAT): + """Convert a python number to a number field. + """ + # POSIX 1003.1-1988 requires numbers to be encoded as a string of + # octal digits followed by a null-byte, this allows values up to + # (8**(digits-1))-1. GNU tar allows storing numbers greater than + # that if necessary. A leading 0o200 byte indicates this particular + # encoding, the following digits-1 bytes are a big-endian + # representation. This allows values up to (256**(digits-1))-1. + if 0 <= n < 8 ** (digits - 1): + s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL + else: + if format != GNU_FORMAT or n >= 256 ** (digits - 1): + raise ValueError("overflow in number field") + + if n < 0: + # XXX We mimic GNU tar's behaviour with negative numbers, + # this could raise OverflowError. + n = struct.unpack("L", struct.pack("l", n))[0] + + s = bytearray() + for i in range(digits - 1): + s.insert(0, n & 0o377) + n >>= 8 + s.insert(0, 0o200) + return s + +def calc_chksums(buf): + """Calculate the checksum for a member's header by summing up all + characters except for the chksum field which is treated as if + it was filled with spaces. According to the GNU tar sources, + some tars (Sun and NeXT) calculate chksum with signed char, + which will be different if there are chars in the buffer with + the high bit set. So we calculate two checksums, unsigned and + signed. + """ + unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) + signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) + return unsigned_chksum, signed_chksum + +def copyfileobj(src, dst, length=None): + """Copy length bytes from fileobj src to fileobj dst. + If length is None, copy the entire content. + """ + if length == 0: + return + if length is None: + while True: + buf = src.read(16*1024) + if not buf: + break + dst.write(buf) + return + + BUFSIZE = 16 * 1024 + blocks, remainder = divmod(length, BUFSIZE) + for b in range(blocks): + buf = src.read(BUFSIZE) + if len(buf) < BUFSIZE: + raise IOError("end of file reached") + dst.write(buf) + + if remainder != 0: + buf = src.read(remainder) + if len(buf) < remainder: + raise IOError("end of file reached") + dst.write(buf) + return + +filemode_table = ( + ((S_IFLNK, "l"), + (S_IFREG, "-"), + (S_IFBLK, "b"), + (S_IFDIR, "d"), + (S_IFCHR, "c"), + (S_IFIFO, "p")), + + ((TUREAD, "r"),), + ((TUWRITE, "w"),), + ((TUEXEC|TSUID, "s"), + (TSUID, "S"), + (TUEXEC, "x")), + + ((TGREAD, "r"),), + ((TGWRITE, "w"),), + ((TGEXEC|TSGID, "s"), + (TSGID, "S"), + (TGEXEC, "x")), + + ((TOREAD, "r"),), + ((TOWRITE, "w"),), + ((TOEXEC|TSVTX, "t"), + (TSVTX, "T"), + (TOEXEC, "x")) +) + +def filemode(mode): + """Convert a file's mode to a string of the form + -rwxrwxrwx. + Used by TarFile.list() + """ + perm = [] + for table in filemode_table: + for bit, char in table: + if mode & bit == bit: + perm.append(char) + break + else: + perm.append("-") + return "".join(perm) + +class TarError(Exception): + """Base exception.""" + pass +class ExtractError(TarError): + """General exception for extract errors.""" + pass +class ReadError(TarError): + """Exception for unreadable tar archives.""" + pass +class CompressionError(TarError): + """Exception for unavailable compression methods.""" + pass +class StreamError(TarError): + """Exception for unsupported operations on stream-like TarFiles.""" + pass +class HeaderError(TarError): + """Base exception for header errors.""" + pass +class EmptyHeaderError(HeaderError): + """Exception for empty headers.""" + pass +class TruncatedHeaderError(HeaderError): + """Exception for truncated headers.""" + pass +class EOFHeaderError(HeaderError): + """Exception for end of file headers.""" + pass +class InvalidHeaderError(HeaderError): + """Exception for invalid headers.""" + pass +class SubsequentHeaderError(HeaderError): + """Exception for missing and invalid extended headers.""" + pass + +#--------------------------- +# internal stream interface +#--------------------------- +class _LowLevelFile(object): + """Low-level file object. Supports reading and writing. + It is used instead of a regular file object for streaming + access. + """ + + def __init__(self, name, mode): + mode = { + "r": os.O_RDONLY, + "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, + }[mode] + if hasattr(os, "O_BINARY"): + mode |= os.O_BINARY + self.fd = os.open(name, mode, 0o666) + + def close(self): + os.close(self.fd) + + def read(self, size): + return os.read(self.fd, size) + + def write(self, s): + os.write(self.fd, s) + +class _Stream(object): + """Class that serves as an adapter between TarFile and + a stream-like object. The stream-like object only + needs to have a read() or write() method and is accessed + blockwise. Use of gzip or bzip2 compression is possible. + A stream-like object could be for example: sys.stdin, + sys.stdout, a socket, a tape device etc. + + _Stream is intended to be used only internally. + """ + + def __init__(self, name, mode, comptype, fileobj, bufsize): + """Construct a _Stream object. + """ + self._extfileobj = True + if fileobj is None: + fileobj = _LowLevelFile(name, mode) + self._extfileobj = False + + if comptype == '*': + # Enable transparent compression detection for the + # stream interface + fileobj = _StreamProxy(fileobj) + comptype = fileobj.getcomptype() + + self.name = name or "" + self.mode = mode + self.comptype = comptype + self.fileobj = fileobj + self.bufsize = bufsize + self.buf = b"" + self.pos = 0 + self.closed = False + + try: + if comptype == "gz": + try: + import zlib + except ImportError: + raise CompressionError("zlib module is not available") + self.zlib = zlib + self.crc = zlib.crc32(b"") + if mode == "r": + self._init_read_gz() + else: + self._init_write_gz() + + if comptype == "bz2": + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") + if mode == "r": + self.dbuf = b"" + self.cmp = bz2.BZ2Decompressor() + else: + self.cmp = bz2.BZ2Compressor() + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + def __del__(self): + if hasattr(self, "closed") and not self.closed: + self.close() + + def _init_write_gz(self): + """Initialize for writing with gzip compression. + """ + self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, + -self.zlib.MAX_WBITS, + self.zlib.DEF_MEM_LEVEL, + 0) + timestamp = struct.pack(" self.bufsize: + self.fileobj.write(self.buf[:self.bufsize]) + self.buf = self.buf[self.bufsize:] + + def close(self): + """Close the _Stream object. No operation should be + done on it afterwards. + """ + if self.closed: + return + + if self.mode == "w" and self.comptype != "tar": + self.buf += self.cmp.flush() + + if self.mode == "w" and self.buf: + self.fileobj.write(self.buf) + self.buf = b"" + if self.comptype == "gz": + # The native zlib crc is an unsigned 32-bit integer, but + # the Python wrapper implicitly casts that to a signed C + # long. So, on a 32-bit box self.crc may "look negative", + # while the same crc on a 64-bit box may "look positive". + # To avoid irksome warnings from the `struct` module, force + # it to look positive on all boxes. + self.fileobj.write(struct.pack("= 0: + blocks, remainder = divmod(pos - self.pos, self.bufsize) + for i in range(blocks): + self.read(self.bufsize) + self.read(remainder) + else: + raise StreamError("seeking backwards is not allowed") + return self.pos + + def read(self, size=None): + """Return the next size number of bytes from the stream. + If size is not defined, return all bytes of the stream + up to EOF. + """ + if size is None: + t = [] + while True: + buf = self._read(self.bufsize) + if not buf: + break + t.append(buf) + buf = "".join(t) + else: + buf = self._read(size) + self.pos += len(buf) + return buf + + def _read(self, size): + """Return size bytes from the stream. + """ + if self.comptype == "tar": + return self.__read(size) + + c = len(self.dbuf) + while c < size: + buf = self.__read(self.bufsize) + if not buf: + break + try: + buf = self.cmp.decompress(buf) + except IOError: + raise ReadError("invalid compressed data") + self.dbuf += buf + c += len(buf) + buf = self.dbuf[:size] + self.dbuf = self.dbuf[size:] + return buf + + def __read(self, size): + """Return size bytes from stream. If internal buffer is empty, + read another block from the stream. + """ + c = len(self.buf) + while c < size: + buf = self.fileobj.read(self.bufsize) + if not buf: + break + self.buf += buf + c += len(buf) + buf = self.buf[:size] + self.buf = self.buf[size:] + return buf +# class _Stream + +class _StreamProxy(object): + """Small proxy class that enables transparent compression + detection for the Stream interface (mode 'r|*'). + """ + + def __init__(self, fileobj): + self.fileobj = fileobj + self.buf = self.fileobj.read(BLOCKSIZE) + + def read(self, size): + self.read = self.fileobj.read + return self.buf + + def getcomptype(self): + if self.buf.startswith(b"\037\213\010"): + return "gz" + if self.buf.startswith(b"BZh91"): + return "bz2" + return "tar" + + def close(self): + self.fileobj.close() +# class StreamProxy + +class _BZ2Proxy(object): + """Small proxy class that enables external file object + support for "r:bz2" and "w:bz2" modes. This is actually + a workaround for a limitation in bz2 module's BZ2File + class which (unlike gzip.GzipFile) has no support for + a file object argument. + """ + + blocksize = 16 * 1024 + + def __init__(self, fileobj, mode): + self.fileobj = fileobj + self.mode = mode + self.name = getattr(self.fileobj, "name", None) + self.init() + + def init(self): + import bz2 + self.pos = 0 + if self.mode == "r": + self.bz2obj = bz2.BZ2Decompressor() + self.fileobj.seek(0) + self.buf = b"" + else: + self.bz2obj = bz2.BZ2Compressor() + + def read(self, size): + x = len(self.buf) + while x < size: + raw = self.fileobj.read(self.blocksize) + if not raw: + break + data = self.bz2obj.decompress(raw) + self.buf += data + x += len(data) + + buf = self.buf[:size] + self.buf = self.buf[size:] + self.pos += len(buf) + return buf + + def seek(self, pos): + if pos < self.pos: + self.init() + self.read(pos - self.pos) + + def tell(self): + return self.pos + + def write(self, data): + self.pos += len(data) + raw = self.bz2obj.compress(data) + self.fileobj.write(raw) + + def close(self): + if self.mode == "w": + raw = self.bz2obj.flush() + self.fileobj.write(raw) +# class _BZ2Proxy + +#------------------------ +# Extraction file object +#------------------------ +class _FileInFile(object): + """A thin wrapper around an existing file object that + provides a part of its data as an individual file + object. + """ + + def __init__(self, fileobj, offset, size, blockinfo=None): + self.fileobj = fileobj + self.offset = offset + self.size = size + self.position = 0 + + if blockinfo is None: + blockinfo = [(0, size)] + + # Construct a map with data and zero blocks. + self.map_index = 0 + self.map = [] + lastpos = 0 + realpos = self.offset + for offset, size in blockinfo: + if offset > lastpos: + self.map.append((False, lastpos, offset, None)) + self.map.append((True, offset, offset + size, realpos)) + realpos += size + lastpos = offset + size + if lastpos < self.size: + self.map.append((False, lastpos, self.size, None)) + + def seekable(self): + if not hasattr(self.fileobj, "seekable"): + # XXX gzip.GzipFile and bz2.BZ2File + return True + return self.fileobj.seekable() + + def tell(self): + """Return the current file position. + """ + return self.position + + def seek(self, position): + """Seek to a position in the file. + """ + self.position = position + + def read(self, size=None): + """Read data from the file. + """ + if size is None: + size = self.size - self.position + else: + size = min(size, self.size - self.position) + + buf = b"" + while size > 0: + while True: + data, start, stop, offset = self.map[self.map_index] + if start <= self.position < stop: + break + else: + self.map_index += 1 + if self.map_index == len(self.map): + self.map_index = 0 + length = min(size, stop - self.position) + if data: + self.fileobj.seek(offset + (self.position - start)) + buf += self.fileobj.read(length) + else: + buf += NUL * length + size -= length + self.position += length + return buf +#class _FileInFile + + +class ExFileObject(object): + """File-like object for reading an archive member. + Is returned by TarFile.extractfile(). + """ + blocksize = 1024 + + def __init__(self, tarfile, tarinfo): + self.fileobj = _FileInFile(tarfile.fileobj, + tarinfo.offset_data, + tarinfo.size, + tarinfo.sparse) + self.name = tarinfo.name + self.mode = "r" + self.closed = False + self.size = tarinfo.size + + self.position = 0 + self.buffer = b"" + + def readable(self): + return True + + def writable(self): + return False + + def seekable(self): + return self.fileobj.seekable() + + def read(self, size=None): + """Read at most size bytes from the file. If size is not + present or None, read all data until EOF is reached. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + buf = b"" + if self.buffer: + if size is None: + buf = self.buffer + self.buffer = b"" + else: + buf = self.buffer[:size] + self.buffer = self.buffer[size:] + + if size is None: + buf += self.fileobj.read() + else: + buf += self.fileobj.read(size - len(buf)) + + self.position += len(buf) + return buf + + # XXX TextIOWrapper uses the read1() method. + read1 = read + + def readline(self, size=-1): + """Read one entire line from the file. If size is present + and non-negative, return a string with at most that + size, which may be an incomplete line. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + pos = self.buffer.find(b"\n") + 1 + if pos == 0: + # no newline found. + while True: + buf = self.fileobj.read(self.blocksize) + self.buffer += buf + if not buf or b"\n" in buf: + pos = self.buffer.find(b"\n") + 1 + if pos == 0: + # no newline found. + pos = len(self.buffer) + break + + if size != -1: + pos = min(size, pos) + + buf = self.buffer[:pos] + self.buffer = self.buffer[pos:] + self.position += len(buf) + return buf + + def readlines(self): + """Return a list with all remaining lines. + """ + result = [] + while True: + line = self.readline() + if not line: break + result.append(line) + return result + + def tell(self): + """Return the current file position. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + return self.position + + def seek(self, pos, whence=os.SEEK_SET): + """Seek to a position in the file. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + if whence == os.SEEK_SET: + self.position = min(max(pos, 0), self.size) + elif whence == os.SEEK_CUR: + if pos < 0: + self.position = max(self.position + pos, 0) + else: + self.position = min(self.position + pos, self.size) + elif whence == os.SEEK_END: + self.position = max(min(self.size + pos, self.size), 0) + else: + raise ValueError("Invalid argument") + + self.buffer = b"" + self.fileobj.seek(self.position) + + def close(self): + """Close the file object. + """ + self.closed = True + + def __iter__(self): + """Get an iterator over the file's lines. + """ + while True: + line = self.readline() + if not line: + break + yield line +#class ExFileObject + +#------------------ +# Exported Classes +#------------------ +class TarInfo(object): + """Informational class which holds the details about an + archive member given by a tar header block. + TarInfo objects are returned by TarFile.getmember(), + TarFile.getmembers() and TarFile.gettarinfo() and are + usually created internally. + """ + + __slots__ = ("name", "mode", "uid", "gid", "size", "mtime", + "chksum", "type", "linkname", "uname", "gname", + "devmajor", "devminor", + "offset", "offset_data", "pax_headers", "sparse", + "tarfile", "_sparse_structs", "_link_target") + + def __init__(self, name=""): + """Construct a TarInfo object. name is the optional name + of the member. + """ + self.name = name # member name + self.mode = 0o644 # file permissions + self.uid = 0 # user id + self.gid = 0 # group id + self.size = 0 # file size + self.mtime = 0 # modification time + self.chksum = 0 # header checksum + self.type = REGTYPE # member type + self.linkname = "" # link name + self.uname = "" # user name + self.gname = "" # group name + self.devmajor = 0 # device major number + self.devminor = 0 # device minor number + + self.offset = 0 # the tar header starts here + self.offset_data = 0 # the file's data starts here + + self.sparse = None # sparse member information + self.pax_headers = {} # pax header information + + # In pax headers the "name" and "linkname" field are called + # "path" and "linkpath". + def _getpath(self): + return self.name + def _setpath(self, name): + self.name = name + path = property(_getpath, _setpath) + + def _getlinkpath(self): + return self.linkname + def _setlinkpath(self, linkname): + self.linkname = linkname + linkpath = property(_getlinkpath, _setlinkpath) + + def __repr__(self): + return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) + + def get_info(self): + """Return the TarInfo's attributes as a dictionary. + """ + info = { + "name": self.name, + "mode": self.mode & 0o7777, + "uid": self.uid, + "gid": self.gid, + "size": self.size, + "mtime": self.mtime, + "chksum": self.chksum, + "type": self.type, + "linkname": self.linkname, + "uname": self.uname, + "gname": self.gname, + "devmajor": self.devmajor, + "devminor": self.devminor + } + + if info["type"] == DIRTYPE and not info["name"].endswith("/"): + info["name"] += "/" + + return info + + def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): + """Return a tar header as a string of 512 byte blocks. + """ + info = self.get_info() + + if format == USTAR_FORMAT: + return self.create_ustar_header(info, encoding, errors) + elif format == GNU_FORMAT: + return self.create_gnu_header(info, encoding, errors) + elif format == PAX_FORMAT: + return self.create_pax_header(info, encoding) + else: + raise ValueError("invalid format") + + def create_ustar_header(self, info, encoding, errors): + """Return the object as a ustar header block. + """ + info["magic"] = POSIX_MAGIC + + if len(info["linkname"]) > LENGTH_LINK: + raise ValueError("linkname is too long") + + if len(info["name"]) > LENGTH_NAME: + info["prefix"], info["name"] = self._posix_split_name(info["name"]) + + return self._create_header(info, USTAR_FORMAT, encoding, errors) + + def create_gnu_header(self, info, encoding, errors): + """Return the object as a GNU header block sequence. + """ + info["magic"] = GNU_MAGIC + + buf = b"" + if len(info["linkname"]) > LENGTH_LINK: + buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) + + if len(info["name"]) > LENGTH_NAME: + buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) + + return buf + self._create_header(info, GNU_FORMAT, encoding, errors) + + def create_pax_header(self, info, encoding): + """Return the object as a ustar header block. If it cannot be + represented this way, prepend a pax extended header sequence + with supplement information. + """ + info["magic"] = POSIX_MAGIC + pax_headers = self.pax_headers.copy() + + # Test string fields for values that exceed the field length or cannot + # be represented in ASCII encoding. + for name, hname, length in ( + ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), + ("uname", "uname", 32), ("gname", "gname", 32)): + + if hname in pax_headers: + # The pax header has priority. + continue + + # Try to encode the string as ASCII. + try: + info[name].encode("ascii", "strict") + except UnicodeEncodeError: + pax_headers[hname] = info[name] + continue + + if len(info[name]) > length: + pax_headers[hname] = info[name] + + # Test number fields for values that exceed the field limit or values + # that like to be stored as float. + for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): + if name in pax_headers: + # The pax header has priority. Avoid overflow. + info[name] = 0 + continue + + val = info[name] + if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float): + pax_headers[name] = str(val) + info[name] = 0 + + # Create a pax extended header if necessary. + if pax_headers: + buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) + else: + buf = b"" + + return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") + + @classmethod + def create_pax_global_header(cls, pax_headers): + """Return the object as a pax global header block sequence. + """ + return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8") + + def _posix_split_name(self, name): + """Split a name longer than 100 chars into a prefix + and a name part. + """ + prefix = name[:LENGTH_PREFIX + 1] + while prefix and prefix[-1] != "/": + prefix = prefix[:-1] + + name = name[len(prefix):] + prefix = prefix[:-1] + + if not prefix or len(name) > LENGTH_NAME: + raise ValueError("name is too long") + return prefix, name + + @staticmethod + def _create_header(info, format, encoding, errors): + """Return a header block. info is a dictionary with file + information, format must be one of the *_FORMAT constants. + """ + parts = [ + stn(info.get("name", ""), 100, encoding, errors), + itn(info.get("mode", 0) & 0o7777, 8, format), + itn(info.get("uid", 0), 8, format), + itn(info.get("gid", 0), 8, format), + itn(info.get("size", 0), 12, format), + itn(info.get("mtime", 0), 12, format), + b" ", # checksum field + info.get("type", REGTYPE), + stn(info.get("linkname", ""), 100, encoding, errors), + info.get("magic", POSIX_MAGIC), + stn(info.get("uname", ""), 32, encoding, errors), + stn(info.get("gname", ""), 32, encoding, errors), + itn(info.get("devmajor", 0), 8, format), + itn(info.get("devminor", 0), 8, format), + stn(info.get("prefix", ""), 155, encoding, errors) + ] + + buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) + chksum = calc_chksums(buf[-BLOCKSIZE:])[0] + buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:] + return buf + + @staticmethod + def _create_payload(payload): + """Return the string payload filled with zero bytes + up to the next 512 byte border. + """ + blocks, remainder = divmod(len(payload), BLOCKSIZE) + if remainder > 0: + payload += (BLOCKSIZE - remainder) * NUL + return payload + + @classmethod + def _create_gnu_long_header(cls, name, type, encoding, errors): + """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence + for name. + """ + name = name.encode(encoding, errors) + NUL + + info = {} + info["name"] = "././@LongLink" + info["type"] = type + info["size"] = len(name) + info["magic"] = GNU_MAGIC + + # create extended header + name blocks. + return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ + cls._create_payload(name) + + @classmethod + def _create_pax_generic_header(cls, pax_headers, type, encoding): + """Return a POSIX.1-2008 extended or global header sequence + that contains a list of keyword, value pairs. The values + must be strings. + """ + # Check if one of the fields contains surrogate characters and thereby + # forces hdrcharset=BINARY, see _proc_pax() for more information. + binary = False + for keyword, value in pax_headers.items(): + try: + value.encode("utf8", "strict") + except UnicodeEncodeError: + binary = True + break + + records = b"" + if binary: + # Put the hdrcharset field at the beginning of the header. + records += b"21 hdrcharset=BINARY\n" + + for keyword, value in pax_headers.items(): + keyword = keyword.encode("utf8") + if binary: + # Try to restore the original byte representation of `value'. + # Needless to say, that the encoding must match the string. + value = value.encode(encoding, "surrogateescape") + else: + value = value.encode("utf8") + + l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' + n = p = 0 + while True: + n = l + len(str(p)) + if n == p: + break + p = n + records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" + + # We use a hardcoded "././@PaxHeader" name like star does + # instead of the one that POSIX recommends. + info = {} + info["name"] = "././@PaxHeader" + info["type"] = type + info["size"] = len(records) + info["magic"] = POSIX_MAGIC + + # Create pax header + record blocks. + return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ + cls._create_payload(records) + + @classmethod + def frombuf(cls, buf, encoding, errors): + """Construct a TarInfo object from a 512 byte bytes object. + """ + if len(buf) == 0: + raise EmptyHeaderError("empty header") + if len(buf) != BLOCKSIZE: + raise TruncatedHeaderError("truncated header") + if buf.count(NUL) == BLOCKSIZE: + raise EOFHeaderError("end of file header") + + chksum = nti(buf[148:156]) + if chksum not in calc_chksums(buf): + raise InvalidHeaderError("bad checksum") + + obj = cls() + obj.name = nts(buf[0:100], encoding, errors) + obj.mode = nti(buf[100:108]) + obj.uid = nti(buf[108:116]) + obj.gid = nti(buf[116:124]) + obj.size = nti(buf[124:136]) + obj.mtime = nti(buf[136:148]) + obj.chksum = chksum + obj.type = buf[156:157] + obj.linkname = nts(buf[157:257], encoding, errors) + obj.uname = nts(buf[265:297], encoding, errors) + obj.gname = nts(buf[297:329], encoding, errors) + obj.devmajor = nti(buf[329:337]) + obj.devminor = nti(buf[337:345]) + prefix = nts(buf[345:500], encoding, errors) + + # Old V7 tar format represents a directory as a regular + # file with a trailing slash. + if obj.type == AREGTYPE and obj.name.endswith("/"): + obj.type = DIRTYPE + + # The old GNU sparse format occupies some of the unused + # space in the buffer for up to 4 sparse structures. + # Save the them for later processing in _proc_sparse(). + if obj.type == GNUTYPE_SPARSE: + pos = 386 + structs = [] + for i in range(4): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[482]) + origsize = nti(buf[483:495]) + obj._sparse_structs = (structs, isextended, origsize) + + # Remove redundant slashes from directories. + if obj.isdir(): + obj.name = obj.name.rstrip("/") + + # Reconstruct a ustar longname. + if prefix and obj.type not in GNU_TYPES: + obj.name = prefix + "/" + obj.name + return obj + + @classmethod + def fromtarfile(cls, tarfile): + """Return the next TarInfo object from TarFile object + tarfile. + """ + buf = tarfile.fileobj.read(BLOCKSIZE) + obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors) + obj.offset = tarfile.fileobj.tell() - BLOCKSIZE + return obj._proc_member(tarfile) + + #-------------------------------------------------------------------------- + # The following are methods that are called depending on the type of a + # member. The entry point is _proc_member() which can be overridden in a + # subclass to add custom _proc_*() methods. A _proc_*() method MUST + # implement the following + # operations: + # 1. Set self.offset_data to the position where the data blocks begin, + # if there is data that follows. + # 2. Set tarfile.offset to the position where the next member's header will + # begin. + # 3. Return self or another valid TarInfo object. + def _proc_member(self, tarfile): + """Choose the right processing method depending on + the type and call it. + """ + if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): + return self._proc_gnulong(tarfile) + elif self.type == GNUTYPE_SPARSE: + return self._proc_sparse(tarfile) + elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): + return self._proc_pax(tarfile) + else: + return self._proc_builtin(tarfile) + + def _proc_builtin(self, tarfile): + """Process a builtin type or an unknown type which + will be treated as a regular file. + """ + self.offset_data = tarfile.fileobj.tell() + offset = self.offset_data + if self.isreg() or self.type not in SUPPORTED_TYPES: + # Skip the following data blocks. + offset += self._block(self.size) + tarfile.offset = offset + + # Patch the TarInfo object with saved global + # header information. + self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) + + return self + + def _proc_gnulong(self, tarfile): + """Process the blocks that hold a GNU longname + or longlink member. + """ + buf = tarfile.fileobj.read(self._block(self.size)) + + # Fetch the next header and process it. + try: + next = self.fromtarfile(tarfile) + except HeaderError: + raise SubsequentHeaderError("missing or bad subsequent header") + + # Patch the TarInfo object from the next header with + # the longname information. + next.offset = self.offset + if self.type == GNUTYPE_LONGNAME: + next.name = nts(buf, tarfile.encoding, tarfile.errors) + elif self.type == GNUTYPE_LONGLINK: + next.linkname = nts(buf, tarfile.encoding, tarfile.errors) + + return next + + def _proc_sparse(self, tarfile): + """Process a GNU sparse header plus extra headers. + """ + # We already collected some sparse structures in frombuf(). + structs, isextended, origsize = self._sparse_structs + del self._sparse_structs + + # Collect sparse structures from extended header blocks. + while isextended: + buf = tarfile.fileobj.read(BLOCKSIZE) + pos = 0 + for i in range(21): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + if offset and numbytes: + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[504]) + self.sparse = structs + + self.offset_data = tarfile.fileobj.tell() + tarfile.offset = self.offset_data + self._block(self.size) + self.size = origsize + return self + + def _proc_pax(self, tarfile): + """Process an extended or global header as described in + POSIX.1-2008. + """ + # Read the header information. + buf = tarfile.fileobj.read(self._block(self.size)) + + # A pax header stores supplemental information for either + # the following file (extended) or all following files + # (global). + if self.type == XGLTYPE: + pax_headers = tarfile.pax_headers + else: + pax_headers = tarfile.pax_headers.copy() + + # Check if the pax header contains a hdrcharset field. This tells us + # the encoding of the path, linkpath, uname and gname fields. Normally, + # these fields are UTF-8 encoded but since POSIX.1-2008 tar + # implementations are allowed to store them as raw binary strings if + # the translation to UTF-8 fails. + match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf) + if match is not None: + pax_headers["hdrcharset"] = match.group(1).decode("utf8") + + # For the time being, we don't care about anything other than "BINARY". + # The only other value that is currently allowed by the standard is + # "ISO-IR 10646 2000 UTF-8" in other words UTF-8. + hdrcharset = pax_headers.get("hdrcharset") + if hdrcharset == "BINARY": + encoding = tarfile.encoding + else: + encoding = "utf8" + + # Parse pax header information. A record looks like that: + # "%d %s=%s\n" % (length, keyword, value). length is the size + # of the complete record including the length field itself and + # the newline. keyword and value are both UTF-8 encoded strings. + regex = re.compile(br"(\d+) ([^=]+)=") + pos = 0 + while True: + match = regex.match(buf, pos) + if not match: + break + + length, keyword = match.groups() + length = int(length) + value = buf[match.end(2) + 1:match.start(1) + length - 1] + + # Normally, we could just use "utf8" as the encoding and "strict" + # as the error handler, but we better not take the risk. For + # example, GNU tar <= 1.23 is known to store filenames it cannot + # translate to UTF-8 as raw strings (unfortunately without a + # hdrcharset=BINARY header). + # We first try the strict standard encoding, and if that fails we + # fall back on the user's encoding and error handler. + keyword = self._decode_pax_field(keyword, "utf8", "utf8", + tarfile.errors) + if keyword in PAX_NAME_FIELDS: + value = self._decode_pax_field(value, encoding, tarfile.encoding, + tarfile.errors) + else: + value = self._decode_pax_field(value, "utf8", "utf8", + tarfile.errors) + + pax_headers[keyword] = value + pos += length + + # Fetch the next header. + try: + next = self.fromtarfile(tarfile) + except HeaderError: + raise SubsequentHeaderError("missing or bad subsequent header") + + # Process GNU sparse information. + if "GNU.sparse.map" in pax_headers: + # GNU extended sparse format version 0.1. + self._proc_gnusparse_01(next, pax_headers) + + elif "GNU.sparse.size" in pax_headers: + # GNU extended sparse format version 0.0. + self._proc_gnusparse_00(next, pax_headers, buf) + + elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": + # GNU extended sparse format version 1.0. + self._proc_gnusparse_10(next, pax_headers, tarfile) + + if self.type in (XHDTYPE, SOLARIS_XHDTYPE): + # Patch the TarInfo object with the extended header info. + next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) + next.offset = self.offset + + if "size" in pax_headers: + # If the extended header replaces the size field, + # we need to recalculate the offset where the next + # header starts. + offset = next.offset_data + if next.isreg() or next.type not in SUPPORTED_TYPES: + offset += next._block(next.size) + tarfile.offset = offset + + return next + + def _proc_gnusparse_00(self, next, pax_headers, buf): + """Process a GNU tar extended sparse header, version 0.0. + """ + offsets = [] + for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf): + offsets.append(int(match.group(1))) + numbytes = [] + for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf): + numbytes.append(int(match.group(1))) + next.sparse = list(zip(offsets, numbytes)) + + def _proc_gnusparse_01(self, next, pax_headers): + """Process a GNU tar extended sparse header, version 0.1. + """ + sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _proc_gnusparse_10(self, next, pax_headers, tarfile): + """Process a GNU tar extended sparse header, version 1.0. + """ + fields = None + sparse = [] + buf = tarfile.fileobj.read(BLOCKSIZE) + fields, buf = buf.split(b"\n", 1) + fields = int(fields) + while len(sparse) < fields * 2: + if b"\n" not in buf: + buf += tarfile.fileobj.read(BLOCKSIZE) + number, buf = buf.split(b"\n", 1) + sparse.append(int(number)) + next.offset_data = tarfile.fileobj.tell() + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _apply_pax_info(self, pax_headers, encoding, errors): + """Replace fields with supplemental information from a previous + pax extended or global header. + """ + for keyword, value in pax_headers.items(): + if keyword == "GNU.sparse.name": + setattr(self, "path", value) + elif keyword == "GNU.sparse.size": + setattr(self, "size", int(value)) + elif keyword == "GNU.sparse.realsize": + setattr(self, "size", int(value)) + elif keyword in PAX_FIELDS: + if keyword in PAX_NUMBER_FIELDS: + try: + value = PAX_NUMBER_FIELDS[keyword](value) + except ValueError: + value = 0 + if keyword == "path": + value = value.rstrip("/") + setattr(self, keyword, value) + + self.pax_headers = pax_headers.copy() + + def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): + """Decode a single field from a pax record. + """ + try: + return value.decode(encoding, "strict") + except UnicodeDecodeError: + return value.decode(fallback_encoding, fallback_errors) + + def _block(self, count): + """Round up a byte count by BLOCKSIZE and return it, + e.g. _block(834) => 1024. + """ + blocks, remainder = divmod(count, BLOCKSIZE) + if remainder: + blocks += 1 + return blocks * BLOCKSIZE + + def isreg(self): + return self.type in REGULAR_TYPES + def isfile(self): + return self.isreg() + def isdir(self): + return self.type == DIRTYPE + def issym(self): + return self.type == SYMTYPE + def islnk(self): + return self.type == LNKTYPE + def ischr(self): + return self.type == CHRTYPE + def isblk(self): + return self.type == BLKTYPE + def isfifo(self): + return self.type == FIFOTYPE + def issparse(self): + return self.sparse is not None + def isdev(self): + return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) +# class TarInfo + +class TarFile(object): + """The TarFile Class provides an interface to tar archives. + """ + + debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) + + dereference = False # If true, add content of linked file to the + # tar file, else the link. + + ignore_zeros = False # If true, skips empty or invalid blocks and + # continues processing. + + errorlevel = 1 # If 0, fatal errors only appear in debug + # messages (if debug >= 0). If > 0, errors + # are passed to the caller as exceptions. + + format = DEFAULT_FORMAT # The format to use when creating an archive. + + encoding = ENCODING # Encoding for 8-bit character strings. + + errors = None # Error handler for unicode conversion. + + tarinfo = TarInfo # The default TarInfo class to use. + + fileobject = ExFileObject # The default ExFileObject class to use. + + def __init__(self, name=None, mode="r", fileobj=None, format=None, + tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, + errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None): + """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to + read from an existing archive, 'a' to append data to an existing + file or 'w' to create a new file overwriting an existing one. `mode' + defaults to 'r'. + If `fileobj' is given, it is used for reading or writing data. If it + can be determined, `mode' is overridden by `fileobj's mode. + `fileobj' is not closed, when TarFile is closed. + """ + if len(mode) > 1 or mode not in "raw": + raise ValueError("mode must be 'r', 'a' or 'w'") + self.mode = mode + self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode] + + if not fileobj: + if self.mode == "a" and not os.path.exists(name): + # Create nonexistent files in append mode. + self.mode = "w" + self._mode = "wb" + fileobj = bltn_open(name, self._mode) + self._extfileobj = False + else: + if name is None and hasattr(fileobj, "name"): + name = fileobj.name + if hasattr(fileobj, "mode"): + self._mode = fileobj.mode + self._extfileobj = True + self.name = os.path.abspath(name) if name else None + self.fileobj = fileobj + + # Init attributes. + if format is not None: + self.format = format + if tarinfo is not None: + self.tarinfo = tarinfo + if dereference is not None: + self.dereference = dereference + if ignore_zeros is not None: + self.ignore_zeros = ignore_zeros + if encoding is not None: + self.encoding = encoding + self.errors = errors + + if pax_headers is not None and self.format == PAX_FORMAT: + self.pax_headers = pax_headers + else: + self.pax_headers = {} + + if debug is not None: + self.debug = debug + if errorlevel is not None: + self.errorlevel = errorlevel + + # Init datastructures. + self.closed = False + self.members = [] # list of members as TarInfo objects + self._loaded = False # flag if all members have been read + self.offset = self.fileobj.tell() + # current position in the archive file + self.inodes = {} # dictionary caching the inodes of + # archive members already added + + try: + if self.mode == "r": + self.firstmember = None + self.firstmember = self.next() + + if self.mode == "a": + # Move to the end of the archive, + # before the first empty block. + while True: + self.fileobj.seek(self.offset) + try: + tarinfo = self.tarinfo.fromtarfile(self) + self.members.append(tarinfo) + except EOFHeaderError: + self.fileobj.seek(self.offset) + break + except HeaderError as e: + raise ReadError(str(e)) + + if self.mode in "aw": + self._loaded = True + + if self.pax_headers: + buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) + self.fileobj.write(buf) + self.offset += len(buf) + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + #-------------------------------------------------------------------------- + # Below are the classmethods which act as alternate constructors to the + # TarFile class. The open() method is the only one that is needed for + # public use; it is the "super"-constructor and is able to select an + # adequate "sub"-constructor for a particular compression using the mapping + # from OPEN_METH. + # + # This concept allows one to subclass TarFile without losing the comfort of + # the super-constructor. A sub-constructor is registered and made available + # by adding it to the mapping in OPEN_METH. + + @classmethod + def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): + """Open a tar archive for reading, writing or appending. Return + an appropriate TarFile class. + + mode: + 'r' or 'r:*' open for reading with transparent compression + 'r:' open for reading exclusively uncompressed + 'r:gz' open for reading with gzip compression + 'r:bz2' open for reading with bzip2 compression + 'a' or 'a:' open for appending, creating the file if necessary + 'w' or 'w:' open for writing without compression + 'w:gz' open for writing with gzip compression + 'w:bz2' open for writing with bzip2 compression + + 'r|*' open a stream of tar blocks with transparent compression + 'r|' open an uncompressed stream of tar blocks for reading + 'r|gz' open a gzip compressed stream of tar blocks + 'r|bz2' open a bzip2 compressed stream of tar blocks + 'w|' open an uncompressed stream for writing + 'w|gz' open a gzip compressed stream for writing + 'w|bz2' open a bzip2 compressed stream for writing + """ + + if not name and not fileobj: + raise ValueError("nothing to open") + + if mode in ("r", "r:*"): + # Find out which *open() is appropriate for opening the file. + for comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) + if fileobj is not None: + saved_pos = fileobj.tell() + try: + return func(name, "r", fileobj, **kwargs) + except (ReadError, CompressionError) as e: + if fileobj is not None: + fileobj.seek(saved_pos) + continue + raise ReadError("file could not be opened successfully") + + elif ":" in mode: + filemode, comptype = mode.split(":", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + # Select the *open() function according to + # given compression. + if comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) + else: + raise CompressionError("unknown compression type %r" % comptype) + return func(name, filemode, fileobj, **kwargs) + + elif "|" in mode: + filemode, comptype = mode.split("|", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + if filemode not in "rw": + raise ValueError("mode must be 'r' or 'w'") + + stream = _Stream(name, filemode, comptype, fileobj, bufsize) + try: + t = cls(name, filemode, stream, **kwargs) + except: + stream.close() + raise + t._extfileobj = False + return t + + elif mode in "aw": + return cls.taropen(name, mode, fileobj, **kwargs) + + raise ValueError("undiscernible mode") + + @classmethod + def taropen(cls, name, mode="r", fileobj=None, **kwargs): + """Open uncompressed tar archive name for reading or writing. + """ + if len(mode) > 1 or mode not in "raw": + raise ValueError("mode must be 'r', 'a' or 'w'") + return cls(name, mode, fileobj, **kwargs) + + @classmethod + def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open gzip compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if len(mode) > 1 or mode not in "rw": + raise ValueError("mode must be 'r' or 'w'") + + try: + import gzip + gzip.GzipFile + except (ImportError, AttributeError): + raise CompressionError("gzip module is not available") + + extfileobj = fileobj is not None + try: + fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj) + t = cls.taropen(name, mode, fileobj, **kwargs) + except IOError: + if not extfileobj and fileobj is not None: + fileobj.close() + if fileobj is None: + raise + raise ReadError("not a gzip file") + except: + if not extfileobj and fileobj is not None: + fileobj.close() + raise + t._extfileobj = extfileobj + return t + + @classmethod + def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open bzip2 compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if len(mode) > 1 or mode not in "rw": + raise ValueError("mode must be 'r' or 'w'.") + + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") + + if fileobj is not None: + fileobj = _BZ2Proxy(fileobj, mode) + else: + fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel) + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except (IOError, EOFError): + fileobj.close() + raise ReadError("not a bzip2 file") + t._extfileobj = False + return t + + # All *open() methods are registered here. + OPEN_METH = { + "tar": "taropen", # uncompressed tar + "gz": "gzopen", # gzip compressed tar + "bz2": "bz2open" # bzip2 compressed tar + } + + #-------------------------------------------------------------------------- + # The public methods which TarFile provides: + + def close(self): + """Close the TarFile. In write-mode, two finishing zero blocks are + appended to the archive. + """ + if self.closed: + return + + if self.mode in "aw": + self.fileobj.write(NUL * (BLOCKSIZE * 2)) + self.offset += (BLOCKSIZE * 2) + # fill up the end with zero-blocks + # (like option -b20 for tar does) + blocks, remainder = divmod(self.offset, RECORDSIZE) + if remainder > 0: + self.fileobj.write(NUL * (RECORDSIZE - remainder)) + + if not self._extfileobj: + self.fileobj.close() + self.closed = True + + def getmember(self, name): + """Return a TarInfo object for member `name'. If `name' can not be + found in the archive, KeyError is raised. If a member occurs more + than once in the archive, its last occurrence is assumed to be the + most up-to-date version. + """ + tarinfo = self._getmember(name) + if tarinfo is None: + raise KeyError("filename %r not found" % name) + return tarinfo + + def getmembers(self): + """Return the members of the archive as a list of TarInfo objects. The + list has the same order as the members in the archive. + """ + self._check() + if not self._loaded: # if we want to obtain a list of + self._load() # all members, we first have to + # scan the whole archive. + return self.members + + def getnames(self): + """Return the members of the archive as a list of their names. It has + the same order as the list returned by getmembers(). + """ + return [tarinfo.name for tarinfo in self.getmembers()] + + def gettarinfo(self, name=None, arcname=None, fileobj=None): + """Create a TarInfo object for either the file `name' or the file + object `fileobj' (using os.fstat on its file descriptor). You can + modify some of the TarInfo's attributes before you add it using + addfile(). If given, `arcname' specifies an alternative name for the + file in the archive. + """ + self._check("aw") + + # When fileobj is given, replace name by + # fileobj's real name. + if fileobj is not None: + name = fileobj.name + + # Building the name of the member in the archive. + # Backward slashes are converted to forward slashes, + # Absolute paths are turned to relative paths. + if arcname is None: + arcname = name + drv, arcname = os.path.splitdrive(arcname) + arcname = arcname.replace(os.sep, "/") + arcname = arcname.lstrip("/") + + # Now, fill the TarInfo object with + # information specific for the file. + tarinfo = self.tarinfo() + tarinfo.tarfile = self + + # Use os.stat or os.lstat, depending on platform + # and if symlinks shall be resolved. + if fileobj is None: + if hasattr(os, "lstat") and not self.dereference: + statres = os.lstat(name) + else: + statres = os.stat(name) + else: + statres = os.fstat(fileobj.fileno()) + linkname = "" + + stmd = statres.st_mode + if stat.S_ISREG(stmd): + inode = (statres.st_ino, statres.st_dev) + if not self.dereference and statres.st_nlink > 1 and \ + inode in self.inodes and arcname != self.inodes[inode]: + # Is it a hardlink to an already + # archived file? + type = LNKTYPE + linkname = self.inodes[inode] + else: + # The inode is added only if its valid. + # For win32 it is always 0. + type = REGTYPE + if inode[0]: + self.inodes[inode] = arcname + elif stat.S_ISDIR(stmd): + type = DIRTYPE + elif stat.S_ISFIFO(stmd): + type = FIFOTYPE + elif stat.S_ISLNK(stmd): + type = SYMTYPE + linkname = os.readlink(name) + elif stat.S_ISCHR(stmd): + type = CHRTYPE + elif stat.S_ISBLK(stmd): + type = BLKTYPE + else: + return None + + # Fill the TarInfo object with all + # information we can get. + tarinfo.name = arcname + tarinfo.mode = stmd + tarinfo.uid = statres.st_uid + tarinfo.gid = statres.st_gid + if type == REGTYPE: + tarinfo.size = statres.st_size + else: + tarinfo.size = 0 + tarinfo.mtime = statres.st_mtime + tarinfo.type = type + tarinfo.linkname = linkname + if pwd: + try: + tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] + except KeyError: + pass + if grp: + try: + tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] + except KeyError: + pass + + if type in (CHRTYPE, BLKTYPE): + if hasattr(os, "major") and hasattr(os, "minor"): + tarinfo.devmajor = os.major(statres.st_rdev) + tarinfo.devminor = os.minor(statres.st_rdev) + return tarinfo + + def list(self, verbose=True): + """Print a table of contents to sys.stdout. If `verbose' is False, only + the names of the members are printed. If it is True, an `ls -l'-like + output is produced. + """ + self._check() + + for tarinfo in self: + if verbose: + print(filemode(tarinfo.mode), end=' ') + print("%s/%s" % (tarinfo.uname or tarinfo.uid, + tarinfo.gname or tarinfo.gid), end=' ') + if tarinfo.ischr() or tarinfo.isblk(): + print("%10s" % ("%d,%d" \ + % (tarinfo.devmajor, tarinfo.devminor)), end=' ') + else: + print("%10d" % tarinfo.size, end=' ') + print("%d-%02d-%02d %02d:%02d:%02d" \ + % time.localtime(tarinfo.mtime)[:6], end=' ') + + print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ') + + if verbose: + if tarinfo.issym(): + print("->", tarinfo.linkname, end=' ') + if tarinfo.islnk(): + print("link to", tarinfo.linkname, end=' ') + print() + + def add(self, name, arcname=None, recursive=True, exclude=None, filter=None): + """Add the file `name' to the archive. `name' may be any type of file + (directory, fifo, symbolic link, etc.). If given, `arcname' + specifies an alternative name for the file in the archive. + Directories are added recursively by default. This can be avoided by + setting `recursive' to False. `exclude' is a function that should + return True for each filename to be excluded. `filter' is a function + that expects a TarInfo object argument and returns the changed + TarInfo object, if it returns None the TarInfo object will be + excluded from the archive. + """ + self._check("aw") + + if arcname is None: + arcname = name + + # Exclude pathnames. + if exclude is not None: + import warnings + warnings.warn("use the filter argument instead", + DeprecationWarning, 2) + if exclude(name): + self._dbg(2, "tarfile: Excluded %r" % name) + return + + # Skip if somebody tries to archive the archive... + if self.name is not None and os.path.abspath(name) == self.name: + self._dbg(2, "tarfile: Skipped %r" % name) + return + + self._dbg(1, name) + + # Create a TarInfo object from the file. + tarinfo = self.gettarinfo(name, arcname) + + if tarinfo is None: + self._dbg(1, "tarfile: Unsupported type %r" % name) + return + + # Change or exclude the TarInfo object. + if filter is not None: + tarinfo = filter(tarinfo) + if tarinfo is None: + self._dbg(2, "tarfile: Excluded %r" % name) + return + + # Append the tar header and data to the archive. + if tarinfo.isreg(): + f = bltn_open(name, "rb") + self.addfile(tarinfo, f) + f.close() + + elif tarinfo.isdir(): + self.addfile(tarinfo) + if recursive: + for f in os.listdir(name): + self.add(os.path.join(name, f), os.path.join(arcname, f), + recursive, exclude, filter=filter) + + else: + self.addfile(tarinfo) + + def addfile(self, tarinfo, fileobj=None): + """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is + given, tarinfo.size bytes are read from it and added to the archive. + You can create TarInfo objects using gettarinfo(). + On Windows platforms, `fileobj' should always be opened with mode + 'rb' to avoid irritation about the file size. + """ + self._check("aw") + + tarinfo = copy.copy(tarinfo) + + buf = tarinfo.tobuf(self.format, self.encoding, self.errors) + self.fileobj.write(buf) + self.offset += len(buf) + + # If there's data to follow, append it. + if fileobj is not None: + copyfileobj(fileobj, self.fileobj, tarinfo.size) + blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) + if remainder > 0: + self.fileobj.write(NUL * (BLOCKSIZE - remainder)) + blocks += 1 + self.offset += blocks * BLOCKSIZE + + self.members.append(tarinfo) + + def extractall(self, path=".", members=None): + """Extract all members from the archive to the current working + directory and set owner, modification time and permissions on + directories afterwards. `path' specifies a different directory + to extract to. `members' is optional and must be a subset of the + list returned by getmembers(). + """ + directories = [] + + if members is None: + members = self + + for tarinfo in members: + if tarinfo.isdir(): + # Extract directories with a safe mode. + directories.append(tarinfo) + tarinfo = copy.copy(tarinfo) + tarinfo.mode = 0o700 + # Do not set_attrs directories, as we will do that further down + self.extract(tarinfo, path, set_attrs=not tarinfo.isdir()) + + # Reverse sort directories. + directories.sort(key=lambda a: a.name) + directories.reverse() + + # Set correct owner, mtime and filemode on directories. + for tarinfo in directories: + dirpath = os.path.join(path, tarinfo.name) + try: + self.chown(tarinfo, dirpath) + self.utime(tarinfo, dirpath) + self.chmod(tarinfo, dirpath) + except ExtractError as e: + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + def extract(self, member, path="", set_attrs=True): + """Extract a member from the archive to the current working directory, + using its full name. Its file information is extracted as accurately + as possible. `member' may be a filename or a TarInfo object. You can + specify a different directory using `path'. File attributes (owner, + mtime, mode) are set unless `set_attrs' is False. + """ + self._check("r") + + if isinstance(member, str): + tarinfo = self.getmember(member) + else: + tarinfo = member + + # Prepare the link target for makelink(). + if tarinfo.islnk(): + tarinfo._link_target = os.path.join(path, tarinfo.linkname) + + try: + self._extract_member(tarinfo, os.path.join(path, tarinfo.name), + set_attrs=set_attrs) + except EnvironmentError as e: + if self.errorlevel > 0: + raise + else: + if e.filename is None: + self._dbg(1, "tarfile: %s" % e.strerror) + else: + self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) + except ExtractError as e: + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + def extractfile(self, member): + """Extract a member from the archive as a file object. `member' may be + a filename or a TarInfo object. If `member' is a regular file, a + file-like object is returned. If `member' is a link, a file-like + object is constructed from the link's target. If `member' is none of + the above, None is returned. + The file-like object is read-only and provides the following + methods: read(), readline(), readlines(), seek() and tell() + """ + self._check("r") + + if isinstance(member, str): + tarinfo = self.getmember(member) + else: + tarinfo = member + + if tarinfo.isreg(): + return self.fileobject(self, tarinfo) + + elif tarinfo.type not in SUPPORTED_TYPES: + # If a member's type is unknown, it is treated as a + # regular file. + return self.fileobject(self, tarinfo) + + elif tarinfo.islnk() or tarinfo.issym(): + if isinstance(self.fileobj, _Stream): + # A small but ugly workaround for the case that someone tries + # to extract a (sym)link as a file-object from a non-seekable + # stream of tar blocks. + raise StreamError("cannot extract (sym)link as file object") + else: + # A (sym)link's file object is its target's file object. + return self.extractfile(self._find_link_target(tarinfo)) + else: + # If there's no data associated with the member (directory, chrdev, + # blkdev, etc.), return None instead of a file object. + return None + + def _extract_member(self, tarinfo, targetpath, set_attrs=True): + """Extract the TarInfo object tarinfo to a physical + file called targetpath. + """ + # Fetch the TarInfo object for the given name + # and build the destination pathname, replacing + # forward slashes to platform specific separators. + targetpath = targetpath.rstrip("/") + targetpath = targetpath.replace("/", os.sep) + + # Create all upper directories. + upperdirs = os.path.dirname(targetpath) + if upperdirs and not os.path.exists(upperdirs): + # Create directories that are not part of the archive with + # default permissions. + os.makedirs(upperdirs) + + if tarinfo.islnk() or tarinfo.issym(): + self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) + else: + self._dbg(1, tarinfo.name) + + if tarinfo.isreg(): + self.makefile(tarinfo, targetpath) + elif tarinfo.isdir(): + self.makedir(tarinfo, targetpath) + elif tarinfo.isfifo(): + self.makefifo(tarinfo, targetpath) + elif tarinfo.ischr() or tarinfo.isblk(): + self.makedev(tarinfo, targetpath) + elif tarinfo.islnk() or tarinfo.issym(): + self.makelink(tarinfo, targetpath) + elif tarinfo.type not in SUPPORTED_TYPES: + self.makeunknown(tarinfo, targetpath) + else: + self.makefile(tarinfo, targetpath) + + if set_attrs: + self.chown(tarinfo, targetpath) + if not tarinfo.issym(): + self.chmod(tarinfo, targetpath) + self.utime(tarinfo, targetpath) + + #-------------------------------------------------------------------------- + # Below are the different file methods. They are called via + # _extract_member() when extract() is called. They can be replaced in a + # subclass to implement other functionality. + + def makedir(self, tarinfo, targetpath): + """Make a directory called targetpath. + """ + try: + # Use a safe mode for the directory, the real mode is set + # later in _extract_member(). + os.mkdir(targetpath, 0o700) + except EnvironmentError as e: + if e.errno != errno.EEXIST: + raise + + def makefile(self, tarinfo, targetpath): + """Make a file called targetpath. + """ + source = self.fileobj + source.seek(tarinfo.offset_data) + target = bltn_open(targetpath, "wb") + if tarinfo.sparse is not None: + for offset, size in tarinfo.sparse: + target.seek(offset) + copyfileobj(source, target, size) + else: + copyfileobj(source, target, tarinfo.size) + target.seek(tarinfo.size) + target.truncate() + target.close() + + def makeunknown(self, tarinfo, targetpath): + """Make a file from a TarInfo object with an unknown type + at targetpath. + """ + self.makefile(tarinfo, targetpath) + self._dbg(1, "tarfile: Unknown file type %r, " \ + "extracted as regular file." % tarinfo.type) + + def makefifo(self, tarinfo, targetpath): + """Make a fifo called targetpath. + """ + if hasattr(os, "mkfifo"): + os.mkfifo(targetpath) + else: + raise ExtractError("fifo not supported by system") + + def makedev(self, tarinfo, targetpath): + """Make a character or block device called targetpath. + """ + if not hasattr(os, "mknod") or not hasattr(os, "makedev"): + raise ExtractError("special devices not supported by system") + + mode = tarinfo.mode + if tarinfo.isblk(): + mode |= stat.S_IFBLK + else: + mode |= stat.S_IFCHR + + os.mknod(targetpath, mode, + os.makedev(tarinfo.devmajor, tarinfo.devminor)) + + def makelink(self, tarinfo, targetpath): + """Make a (symbolic) link called targetpath. If it cannot be created + (platform limitation), we try to make a copy of the referenced file + instead of a link. + """ + try: + # For systems that support symbolic and hard links. + if tarinfo.issym(): + os.symlink(tarinfo.linkname, targetpath) + else: + # See extract(). + if os.path.exists(tarinfo._link_target): + os.link(tarinfo._link_target, targetpath) + else: + self._extract_member(self._find_link_target(tarinfo), + targetpath) + except symlink_exception: + if tarinfo.issym(): + linkpath = os.path.join(os.path.dirname(tarinfo.name), + tarinfo.linkname) + else: + linkpath = tarinfo.linkname + else: + try: + self._extract_member(self._find_link_target(tarinfo), + targetpath) + except KeyError: + raise ExtractError("unable to resolve link inside archive") + + def chown(self, tarinfo, targetpath): + """Set owner of targetpath according to tarinfo. + """ + if pwd and hasattr(os, "geteuid") and os.geteuid() == 0: + # We have to be root to do so. + try: + g = grp.getgrnam(tarinfo.gname)[2] + except KeyError: + g = tarinfo.gid + try: + u = pwd.getpwnam(tarinfo.uname)[2] + except KeyError: + u = tarinfo.uid + try: + if tarinfo.issym() and hasattr(os, "lchown"): + os.lchown(targetpath, u, g) + else: + if sys.platform != "os2emx": + os.chown(targetpath, u, g) + except EnvironmentError as e: + raise ExtractError("could not change owner") + + def chmod(self, tarinfo, targetpath): + """Set file permissions of targetpath according to tarinfo. + """ + if hasattr(os, 'chmod'): + try: + os.chmod(targetpath, tarinfo.mode) + except EnvironmentError as e: + raise ExtractError("could not change mode") + + def utime(self, tarinfo, targetpath): + """Set modification time of targetpath according to tarinfo. + """ + if not hasattr(os, 'utime'): + return + try: + os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime)) + except EnvironmentError as e: + raise ExtractError("could not change modification time") + + #-------------------------------------------------------------------------- + def next(self): + """Return the next member of the archive as a TarInfo object, when + TarFile is opened for reading. Return None if there is no more + available. + """ + self._check("ra") + if self.firstmember is not None: + m = self.firstmember + self.firstmember = None + return m + + # Read the next block. + self.fileobj.seek(self.offset) + tarinfo = None + while True: + try: + tarinfo = self.tarinfo.fromtarfile(self) + except EOFHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + except InvalidHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + elif self.offset == 0: + raise ReadError(str(e)) + except EmptyHeaderError: + if self.offset == 0: + raise ReadError("empty file") + except TruncatedHeaderError as e: + if self.offset == 0: + raise ReadError(str(e)) + except SubsequentHeaderError as e: + raise ReadError(str(e)) + break + + if tarinfo is not None: + self.members.append(tarinfo) + else: + self._loaded = True + + return tarinfo + + #-------------------------------------------------------------------------- + # Little helper methods: + + def _getmember(self, name, tarinfo=None, normalize=False): + """Find an archive member by name from bottom to top. + If tarinfo is given, it is used as the starting point. + """ + # Ensure that all members have been loaded. + members = self.getmembers() + + # Limit the member search list up to tarinfo. + if tarinfo is not None: + members = members[:members.index(tarinfo)] + + if normalize: + name = os.path.normpath(name) + + for member in reversed(members): + if normalize: + member_name = os.path.normpath(member.name) + else: + member_name = member.name + + if name == member_name: + return member + + def _load(self): + """Read through the entire archive file and look for readable + members. + """ + while True: + tarinfo = self.next() + if tarinfo is None: + break + self._loaded = True + + def _check(self, mode=None): + """Check if TarFile is still open, and if the operation's mode + corresponds to TarFile's mode. + """ + if self.closed: + raise IOError("%s is closed" % self.__class__.__name__) + if mode is not None and self.mode not in mode: + raise IOError("bad operation for mode %r" % self.mode) + + def _find_link_target(self, tarinfo): + """Find the target member of a symlink or hardlink member in the + archive. + """ + if tarinfo.issym(): + # Always search the entire archive. + linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname + limit = None + else: + # Search the archive before the link, because a hard link is + # just a reference to an already archived file. + linkname = tarinfo.linkname + limit = tarinfo + + member = self._getmember(linkname, tarinfo=limit, normalize=True) + if member is None: + raise KeyError("linkname %r not found" % linkname) + return member + + def __iter__(self): + """Provide an iterator object. + """ + if self._loaded: + return iter(self.members) + else: + return TarIter(self) + + def _dbg(self, level, msg): + """Write debugging output to sys.stderr. + """ + if level <= self.debug: + print(msg, file=sys.stderr) + + def __enter__(self): + self._check() + return self + + def __exit__(self, type, value, traceback): + if type is None: + self.close() + else: + # An exception occurred. We must not call close() because + # it would try to write end-of-archive blocks and padding. + if not self._extfileobj: + self.fileobj.close() + self.closed = True +# class TarFile + +class TarIter(object): + """Iterator Class. + + for tarinfo in TarFile(...): + suite... + """ + + def __init__(self, tarfile): + """Construct a TarIter object. + """ + self.tarfile = tarfile + self.index = 0 + def __iter__(self): + """Return iterator object. + """ + return self + + def __next__(self): + """Return the next item using TarFile's next() method. + When all members have been read, set TarFile as _loaded. + """ + # Fix for SF #1100429: Under rare circumstances it can + # happen that getmembers() is called during iteration, + # which will cause TarIter to stop prematurely. + if not self.tarfile._loaded: + tarinfo = self.tarfile.next() + if not tarinfo: + self.tarfile._loaded = True + raise StopIteration + else: + try: + tarinfo = self.tarfile.members[self.index] + except IndexError: + raise StopIteration + self.index += 1 + return tarinfo + + next = __next__ # for Python 2.x + +#-------------------- +# exported functions +#-------------------- +def is_tarfile(name): + """Return True if name points to a tar archive that we + are able to handle, else return False. + """ + try: + t = open(name) + t.close() + return True + except TarError: + return False + +bltn_open = open +open = TarFile.open diff --git a/pipenv/vendor/distlib/compat.py b/pipenv/vendor/distlib/compat.py new file mode 100644 index 0000000000..ff328c8ee4 --- /dev/null +++ b/pipenv/vendor/distlib/compat.py @@ -0,0 +1,1120 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2017 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from __future__ import absolute_import + +import os +import re +import sys + +try: + import ssl +except ImportError: # pragma: no cover + ssl = None + +if sys.version_info[0] < 3: # pragma: no cover + from StringIO import StringIO + string_types = basestring, + text_type = unicode + from types import FileType as file_type + import __builtin__ as builtins + import ConfigParser as configparser + from ._backport import shutil + from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit + from urllib import (urlretrieve, quote as _quote, unquote, url2pathname, + pathname2url, ContentTooShortError, splittype) + + def quote(s): + if isinstance(s, unicode): + s = s.encode('utf-8') + return _quote(s) + + import urllib2 + from urllib2 import (Request, urlopen, URLError, HTTPError, + HTTPBasicAuthHandler, HTTPPasswordMgr, + HTTPHandler, HTTPRedirectHandler, + build_opener) + if ssl: + from urllib2 import HTTPSHandler + import httplib + import xmlrpclib + import Queue as queue + from HTMLParser import HTMLParser + import htmlentitydefs + raw_input = raw_input + from itertools import ifilter as filter + from itertools import ifilterfalse as filterfalse + + _userprog = None + def splituser(host): + """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" + global _userprog + if _userprog is None: + import re + _userprog = re.compile('^(.*)@(.*)$') + + match = _userprog.match(host) + if match: return match.group(1, 2) + return None, host + +else: # pragma: no cover + from io import StringIO + string_types = str, + text_type = str + from io import TextIOWrapper as file_type + import builtins + import configparser + import shutil + from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote, + unquote, urlsplit, urlunsplit, splittype) + from urllib.request import (urlopen, urlretrieve, Request, url2pathname, + pathname2url, + HTTPBasicAuthHandler, HTTPPasswordMgr, + HTTPHandler, HTTPRedirectHandler, + build_opener) + if ssl: + from urllib.request import HTTPSHandler + from urllib.error import HTTPError, URLError, ContentTooShortError + import http.client as httplib + import urllib.request as urllib2 + import xmlrpc.client as xmlrpclib + import queue + from html.parser import HTMLParser + import html.entities as htmlentitydefs + raw_input = input + from itertools import filterfalse + filter = filter + +try: + from ssl import match_hostname, CertificateError +except ImportError: # pragma: no cover + class CertificateError(ValueError): + pass + + + def _dnsname_match(dn, hostname, max_wildcards=1): + """Matching according to RFC 6125, section 6.4.3 + + http://tools.ietf.org/html/rfc6125#section-6.4.3 + """ + pats = [] + if not dn: + return False + + parts = dn.split('.') + leftmost, remainder = parts[0], parts[1:] + + wildcards = leftmost.count('*') + if wildcards > max_wildcards: + # Issue #17980: avoid denials of service by refusing more + # than one wildcard per fragment. A survey of established + # policy among SSL implementations showed it to be a + # reasonable choice. + raise CertificateError( + "too many wildcards in certificate DNS name: " + repr(dn)) + + # speed up common case w/o wildcards + if not wildcards: + return dn.lower() == hostname.lower() + + # RFC 6125, section 6.4.3, subitem 1. + # The client SHOULD NOT attempt to match a presented identifier in which + # the wildcard character comprises a label other than the left-most label. + if leftmost == '*': + # When '*' is a fragment by itself, it matches a non-empty dotless + # fragment. + pats.append('[^.]+') + elif leftmost.startswith('xn--') or hostname.startswith('xn--'): + # RFC 6125, section 6.4.3, subitem 3. + # The client SHOULD NOT attempt to match a presented identifier + # where the wildcard character is embedded within an A-label or + # U-label of an internationalized domain name. + pats.append(re.escape(leftmost)) + else: + # Otherwise, '*' matches any dotless string, e.g. www* + pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) + + # add the remaining fragments, ignore any wildcards + for frag in remainder: + pats.append(re.escape(frag)) + + pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + return pat.match(hostname) + + + def match_hostname(cert, hostname): + """Verify that *cert* (in decoded format as returned by + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 + rules are followed, but IP addresses are not accepted for *hostname*. + + CertificateError is raised on failure. On success, the function + returns nothing. + """ + if not cert: + raise ValueError("empty or no certificate, match_hostname needs a " + "SSL socket or SSL context with either " + "CERT_OPTIONAL or CERT_REQUIRED") + dnsnames = [] + san = cert.get('subjectAltName', ()) + for key, value in san: + if key == 'DNS': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if not dnsnames: + # The subject is only checked when there is no dNSName entry + # in subjectAltName + for sub in cert.get('subject', ()): + for key, value in sub: + # XXX according to RFC 2818, the most specific Common Name + # must be used. + if key == 'commonName': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if len(dnsnames) > 1: + raise CertificateError("hostname %r " + "doesn't match either of %s" + % (hostname, ', '.join(map(repr, dnsnames)))) + elif len(dnsnames) == 1: + raise CertificateError("hostname %r " + "doesn't match %r" + % (hostname, dnsnames[0])) + else: + raise CertificateError("no appropriate commonName or " + "subjectAltName fields were found") + + +try: + from types import SimpleNamespace as Container +except ImportError: # pragma: no cover + class Container(object): + """ + A generic container for when multiple values need to be returned + """ + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + +try: + from shutil import which +except ImportError: # pragma: no cover + # Implementation from Python 3.3 + def which(cmd, mode=os.F_OK | os.X_OK, path=None): + """Given a command, mode, and a PATH string, return the path which + conforms to the given mode on the PATH, or None if there is no such + file. + + `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result + of os.environ.get("PATH"), or can be overridden with a custom search + path. + + """ + # Check that a given file can be accessed with the correct mode. + # Additionally check that `file` is not a directory, as on Windows + # directories pass the os.access check. + def _access_check(fn, mode): + return (os.path.exists(fn) and os.access(fn, mode) + and not os.path.isdir(fn)) + + # If we're given a path with a directory part, look it up directly rather + # than referring to PATH directories. This includes checking relative to the + # current directory, e.g. ./script + if os.path.dirname(cmd): + if _access_check(cmd, mode): + return cmd + return None + + if path is None: + path = os.environ.get("PATH", os.defpath) + if not path: + return None + path = path.split(os.pathsep) + + if sys.platform == "win32": + # The current directory takes precedence on Windows. + if not os.curdir in path: + path.insert(0, os.curdir) + + # PATHEXT is necessary to check on Windows. + pathext = os.environ.get("PATHEXT", "").split(os.pathsep) + # See if the given file matches any of the expected path extensions. + # This will allow us to short circuit when given "python.exe". + # If it does match, only test that one, otherwise we have to try + # others. + if any(cmd.lower().endswith(ext.lower()) for ext in pathext): + files = [cmd] + else: + files = [cmd + ext for ext in pathext] + else: + # On other platforms you don't have things like PATHEXT to tell you + # what file suffixes are executable, so just pass on cmd as-is. + files = [cmd] + + seen = set() + for dir in path: + normdir = os.path.normcase(dir) + if not normdir in seen: + seen.add(normdir) + for thefile in files: + name = os.path.join(dir, thefile) + if _access_check(name, mode): + return name + return None + + +# ZipFile is a context manager in 2.7, but not in 2.6 + +from zipfile import ZipFile as BaseZipFile + +if hasattr(BaseZipFile, '__enter__'): # pragma: no cover + ZipFile = BaseZipFile +else: # pragma: no cover + from zipfile import ZipExtFile as BaseZipExtFile + + class ZipExtFile(BaseZipExtFile): + def __init__(self, base): + self.__dict__.update(base.__dict__) + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + # return None, so if an exception occurred, it will propagate + + class ZipFile(BaseZipFile): + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + # return None, so if an exception occurred, it will propagate + + def open(self, *args, **kwargs): + base = BaseZipFile.open(self, *args, **kwargs) + return ZipExtFile(base) + +try: + from platform import python_implementation +except ImportError: # pragma: no cover + def python_implementation(): + """Return a string identifying the Python implementation.""" + if 'PyPy' in sys.version: + return 'PyPy' + if os.name == 'java': + return 'Jython' + if sys.version.startswith('IronPython'): + return 'IronPython' + return 'CPython' + +try: + import sysconfig +except ImportError: # pragma: no cover + from ._backport import sysconfig + +try: + callable = callable +except NameError: # pragma: no cover + from collections import Callable + + def callable(obj): + return isinstance(obj, Callable) + + +try: + fsencode = os.fsencode + fsdecode = os.fsdecode +except AttributeError: # pragma: no cover + # Issue #99: on some systems (e.g. containerised), + # sys.getfilesystemencoding() returns None, and we need a real value, + # so fall back to utf-8. From the CPython 2.7 docs relating to Unix and + # sys.getfilesystemencoding(): the return value is "the user’s preference + # according to the result of nl_langinfo(CODESET), or None if the + # nl_langinfo(CODESET) failed." + _fsencoding = sys.getfilesystemencoding() or 'utf-8' + if _fsencoding == 'mbcs': + _fserrors = 'strict' + else: + _fserrors = 'surrogateescape' + + def fsencode(filename): + if isinstance(filename, bytes): + return filename + elif isinstance(filename, text_type): + return filename.encode(_fsencoding, _fserrors) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) + + def fsdecode(filename): + if isinstance(filename, text_type): + return filename + elif isinstance(filename, bytes): + return filename.decode(_fsencoding, _fserrors) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) + +try: + from tokenize import detect_encoding +except ImportError: # pragma: no cover + from codecs import BOM_UTF8, lookup + import re + + cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)") + + def _get_normal_name(orig_enc): + """Imitates get_normal_name in tokenizer.c.""" + # Only care about the first 12 characters. + enc = orig_enc[:12].lower().replace("_", "-") + if enc == "utf-8" or enc.startswith("utf-8-"): + return "utf-8" + if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ + enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): + return "iso-8859-1" + return orig_enc + + def detect_encoding(readline): + """ + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argument, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, + but disagree, a SyntaxError will be raised. If the encoding cookie is an + invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + """ + try: + filename = readline.__self__.name + except AttributeError: + filename = None + bom_found = False + encoding = None + default = 'utf-8' + def read_or_stop(): + try: + return readline() + except StopIteration: + return b'' + + def find_cookie(line): + try: + # Decode as UTF-8. Either the line is an encoding declaration, + # in which case it should be pure ASCII, or it must be UTF-8 + # per default encoding. + line_string = line.decode('utf-8') + except UnicodeDecodeError: + msg = "invalid or missing encoding declaration" + if filename is not None: + msg = '{} for {!r}'.format(msg, filename) + raise SyntaxError(msg) + + matches = cookie_re.findall(line_string) + if not matches: + return None + encoding = _get_normal_name(matches[0]) + try: + codec = lookup(encoding) + except LookupError: + # This behaviour mimics the Python interpreter + if filename is None: + msg = "unknown encoding: " + encoding + else: + msg = "unknown encoding for {!r}: {}".format(filename, + encoding) + raise SyntaxError(msg) + + if bom_found: + if codec.name != 'utf-8': + # This behaviour mimics the Python interpreter + if filename is None: + msg = 'encoding problem: utf-8' + else: + msg = 'encoding problem for {!r}: utf-8'.format(filename) + raise SyntaxError(msg) + encoding += '-sig' + return encoding + + first = read_or_stop() + if first.startswith(BOM_UTF8): + bom_found = True + first = first[3:] + default = 'utf-8-sig' + if not first: + return default, [] + + encoding = find_cookie(first) + if encoding: + return encoding, [first] + + second = read_or_stop() + if not second: + return default, [first] + + encoding = find_cookie(second) + if encoding: + return encoding, [first, second] + + return default, [first, second] + +# For converting & <-> & etc. +try: + from html import escape +except ImportError: + from cgi import escape +if sys.version_info[:2] < (3, 4): + unescape = HTMLParser().unescape +else: + from html import unescape + +try: + from collections import ChainMap +except ImportError: # pragma: no cover + from collections import MutableMapping + + try: + from reprlib import recursive_repr as _recursive_repr + except ImportError: + def _recursive_repr(fillvalue='...'): + ''' + Decorator to make a repr function return fillvalue for a recursive + call + ''' + + def decorating_function(user_function): + repr_running = set() + + def wrapper(self): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + result = user_function(self) + finally: + repr_running.discard(key) + return result + + # Can't use functools.wraps() here because of bootstrap issues + wrapper.__module__ = getattr(user_function, '__module__') + wrapper.__doc__ = getattr(user_function, '__doc__') + wrapper.__name__ = getattr(user_function, '__name__') + wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) + return wrapper + + return decorating_function + + class ChainMap(MutableMapping): + ''' A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + accessed or updated using the *maps* attribute. There is no other state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + + ''' + + def __init__(self, *maps): + '''Initialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + ''' + self.maps = list(maps) or [{}] # always at least one map + + def __missing__(self, key): + raise KeyError(key) + + def __getitem__(self, key): + for mapping in self.maps: + try: + return mapping[key] # can't use 'key in mapping' with defaultdict + except KeyError: + pass + return self.__missing__(key) # support subclasses that define __missing__ + + def get(self, key, default=None): + return self[key] if key in self else default + + def __len__(self): + return len(set().union(*self.maps)) # reuses stored hash values if possible + + def __iter__(self): + return iter(set().union(*self.maps)) + + def __contains__(self, key): + return any(key in m for m in self.maps) + + def __bool__(self): + return any(self.maps) + + @_recursive_repr() + def __repr__(self): + return '{0.__class__.__name__}({1})'.format( + self, ', '.join(map(repr, self.maps))) + + @classmethod + def fromkeys(cls, iterable, *args): + 'Create a ChainMap with a single dict created from the iterable.' + return cls(dict.fromkeys(iterable, *args)) + + def copy(self): + 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' + return self.__class__(self.maps[0].copy(), *self.maps[1:]) + + __copy__ = copy + + def new_child(self): # like Django's Context.push() + 'New ChainMap with a new dict followed by all previous maps.' + return self.__class__({}, *self.maps) + + @property + def parents(self): # like Django's Context.pop() + 'New ChainMap from maps[1:].' + return self.__class__(*self.maps[1:]) + + def __setitem__(self, key, value): + self.maps[0][key] = value + + def __delitem__(self, key): + try: + del self.maps[0][key] + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def popitem(self): + 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' + try: + return self.maps[0].popitem() + except KeyError: + raise KeyError('No keys found in the first mapping.') + + def pop(self, key, *args): + 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' + try: + return self.maps[0].pop(key, *args) + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def clear(self): + 'Clear maps[0], leaving maps[1:] intact.' + self.maps[0].clear() + +try: + from importlib.util import cache_from_source # Python >= 3.4 +except ImportError: # pragma: no cover + try: + from imp import cache_from_source + except ImportError: # pragma: no cover + def cache_from_source(path, debug_override=None): + assert path.endswith('.py') + if debug_override is None: + debug_override = __debug__ + if debug_override: + suffix = 'c' + else: + suffix = 'o' + return path + suffix + +try: + from collections import OrderedDict +except ImportError: # pragma: no cover +## {{{ http://code.activestate.com/recipes/576693/ (r9) +# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. +# Passes Python2.7's test suite and incorporates all the latest updates. + try: + from thread import get_ident as _get_ident + except ImportError: + from dummy_thread import get_ident as _get_ident + + try: + from _abcoll import KeysView, ValuesView, ItemsView + except ImportError: + pass + + + class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as for regular dictionaries. + + # The internal self.__map dictionary maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. Signature is the same as for + regular dictionaries, but keyword arguments are not recommended + because their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__root = root = [] # sentinel node + root[:] = [root, root, None] + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, dict_setitem=dict.__setitem__): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link which goes at the end of the linked + # list, and the inherited dictionary is updated with the new key/value pair. + if key not in self: + root = self.__root + last = root[0] + last[1] = root[0] = self.__map[key] = [last, root, key] + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which is + # then removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link_prev, link_next, key = self.__map.pop(key) + link_prev[1] = link_next + link_next[0] = link_prev + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + root = self.__root + curr = root[1] + while curr is not root: + yield curr[2] + curr = curr[1] + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + root = self.__root + curr = root[0] + while curr is not root: + yield curr[2] + curr = curr[0] + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + try: + for node in self.__map.itervalues(): + del node[:] + root = self.__root + root[:] = [root, root, None] + self.__map.clear() + except AttributeError: + pass + dict.clear(self) + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root[0] + link_prev = link[0] + link_prev[1] = root + root[0] = link_prev + else: + link = root[1] + link_next = link[1] + root[1] = link_next + link_next[0] = root + key = link[2] + del self.__map[key] + value = dict.pop(self, key) + return key, value + + # -- the following methods do not depend on the internal structure -- + + def keys(self): + 'od.keys() -> list of keys in od' + return list(self) + + def values(self): + 'od.values() -> list of values in od' + return [self[key] for key in self] + + def items(self): + 'od.items() -> list of (key, value) pairs in od' + return [(key, self[key]) for key in self] + + def iterkeys(self): + 'od.iterkeys() -> an iterator over the keys in od' + return iter(self) + + def itervalues(self): + 'od.itervalues -> an iterator over the values in od' + for k in self: + yield self[k] + + def iteritems(self): + 'od.iteritems -> an iterator over the (key, value) items in od' + for k in self: + yield (k, self[k]) + + def update(*args, **kwds): + '''od.update(E, **F) -> None. Update od from dict/iterable E and F. + + If E is a dict instance, does: for k in E: od[k] = E[k] + If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] + Or if E is an iterable of items, does: for k, v in E: od[k] = v + In either case, this is followed by: for k, v in F.items(): od[k] = v + + ''' + if len(args) > 2: + raise TypeError('update() takes at most 2 positional ' + 'arguments (%d given)' % (len(args),)) + elif not args: + raise TypeError('update() takes at least 1 argument (0 given)') + self = args[0] + # Make progressively weaker assumptions about "other" + other = () + if len(args) == 2: + other = args[1] + if isinstance(other, dict): + for key in other: + self[key] = other[key] + elif hasattr(other, 'keys'): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + __update = update # let subclasses override update without breaking __init__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + def __repr__(self, _repr_running=None): + 'od.__repr__() <==> repr(od)' + if not _repr_running: _repr_running = {} + call_key = id(self), _get_ident() + if call_key in _repr_running: + return '...' + _repr_running[call_key] = 1 + try: + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + finally: + del _repr_running[call_key] + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S + and values equal to v (which defaults to None). + + ''' + d = cls() + for key in iterable: + d[key] = value + return d + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return len(self)==len(other) and self.items() == other.items() + return dict.__eq__(self, other) + + def __ne__(self, other): + return not self == other + + # -- the following methods are only used in Python 2.7 -- + + def viewkeys(self): + "od.viewkeys() -> a set-like object providing a view on od's keys" + return KeysView(self) + + def viewvalues(self): + "od.viewvalues() -> an object providing a view on od's values" + return ValuesView(self) + + def viewitems(self): + "od.viewitems() -> a set-like object providing a view on od's items" + return ItemsView(self) + +try: + from logging.config import BaseConfigurator, valid_ident +except ImportError: # pragma: no cover + IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) + + + def valid_ident(s): + m = IDENTIFIER.match(s) + if not m: + raise ValueError('Not a valid Python identifier: %r' % s) + return True + + + # The ConvertingXXX classes are wrappers around standard Python containers, + # and they serve to convert any suitable values in the container. The + # conversion converts base dicts, lists and tuples to their wrapped + # equivalents, whereas strings which match a conversion format are converted + # appropriately. + # + # Each wrapper should have a configurator attribute holding the actual + # configurator to use for conversion. + + class ConvertingDict(dict): + """A converting dictionary wrapper.""" + + def __getitem__(self, key): + value = dict.__getitem__(self, key) + result = self.configurator.convert(value) + #If the converted value is different, save for next time + if value is not result: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def get(self, key, default=None): + value = dict.get(self, key, default) + result = self.configurator.convert(value) + #If the converted value is different, save for next time + if value is not result: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def pop(self, key, default=None): + value = dict.pop(self, key, default) + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + class ConvertingList(list): + """A converting list wrapper.""" + def __getitem__(self, key): + value = list.__getitem__(self, key) + result = self.configurator.convert(value) + #If the converted value is different, save for next time + if value is not result: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def pop(self, idx=-1): + value = list.pop(self, idx) + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + return result + + class ConvertingTuple(tuple): + """A converting tuple wrapper.""" + def __getitem__(self, key): + value = tuple.__getitem__(self, key) + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + class BaseConfigurator(object): + """ + The configurator base class which defines some useful defaults. + """ + + CONVERT_PATTERN = re.compile(r'^(?P[a-z]+)://(?P.*)$') + + WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') + DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') + INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') + DIGIT_PATTERN = re.compile(r'^\d+$') + + value_converters = { + 'ext' : 'ext_convert', + 'cfg' : 'cfg_convert', + } + + # We might want to use a different one, e.g. importlib + importer = staticmethod(__import__) + + def __init__(self, config): + self.config = ConvertingDict(config) + self.config.configurator = self + + def resolve(self, s): + """ + Resolve strings to objects using standard import and attribute + syntax. + """ + name = s.split('.') + used = name.pop(0) + try: + found = self.importer(used) + for frag in name: + used += '.' + frag + try: + found = getattr(found, frag) + except AttributeError: + self.importer(used) + found = getattr(found, frag) + return found + except ImportError: + e, tb = sys.exc_info()[1:] + v = ValueError('Cannot resolve %r: %s' % (s, e)) + v.__cause__, v.__traceback__ = e, tb + raise v + + def ext_convert(self, value): + """Default converter for the ext:// protocol.""" + return self.resolve(value) + + def cfg_convert(self, value): + """Default converter for the cfg:// protocol.""" + rest = value + m = self.WORD_PATTERN.match(rest) + if m is None: + raise ValueError("Unable to convert %r" % value) + else: + rest = rest[m.end():] + d = self.config[m.groups()[0]] + #print d, rest + while rest: + m = self.DOT_PATTERN.match(rest) + if m: + d = d[m.groups()[0]] + else: + m = self.INDEX_PATTERN.match(rest) + if m: + idx = m.groups()[0] + if not self.DIGIT_PATTERN.match(idx): + d = d[idx] + else: + try: + n = int(idx) # try as number first (most likely) + d = d[n] + except TypeError: + d = d[idx] + if m: + rest = rest[m.end():] + else: + raise ValueError('Unable to convert ' + '%r at %r' % (value, rest)) + #rest should be empty + return d + + def convert(self, value): + """ + Convert values to an appropriate type. dicts, lists and tuples are + replaced by their converting alternatives. Strings are checked to + see if they have a conversion format and are converted if they do. + """ + if not isinstance(value, ConvertingDict) and isinstance(value, dict): + value = ConvertingDict(value) + value.configurator = self + elif not isinstance(value, ConvertingList) and isinstance(value, list): + value = ConvertingList(value) + value.configurator = self + elif not isinstance(value, ConvertingTuple) and\ + isinstance(value, tuple): + value = ConvertingTuple(value) + value.configurator = self + elif isinstance(value, string_types): + m = self.CONVERT_PATTERN.match(value) + if m: + d = m.groupdict() + prefix = d['prefix'] + converter = self.value_converters.get(prefix, None) + if converter: + suffix = d['suffix'] + converter = getattr(self, converter) + value = converter(suffix) + return value + + def configure_custom(self, config): + """Configure an object with a user-supplied factory.""" + c = config.pop('()') + if not callable(c): + c = self.resolve(c) + props = config.pop('.', None) + # Check for valid identifiers + kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) + result = c(**kwargs) + if props: + for name, value in props.items(): + setattr(result, name, value) + return result + + def as_tuple(self, value): + """Utility function which converts lists to tuples.""" + if isinstance(value, list): + value = tuple(value) + return value diff --git a/pipenv/vendor/distlib/database.py b/pipenv/vendor/distlib/database.py new file mode 100644 index 0000000000..a19905e215 --- /dev/null +++ b/pipenv/vendor/distlib/database.py @@ -0,0 +1,1336 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2017 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""PEP 376 implementation.""" + +from __future__ import unicode_literals + +import base64 +import codecs +import contextlib +import hashlib +import logging +import os +import posixpath +import sys +import zipimport + +from . import DistlibException, resources +from .compat import StringIO +from .version import get_scheme, UnsupportedVersionError +from .metadata import Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME +from .util import (parse_requirement, cached_property, parse_name_and_version, + read_exports, write_exports, CSVReader, CSVWriter) + + +__all__ = ['Distribution', 'BaseInstalledDistribution', + 'InstalledDistribution', 'EggInfoDistribution', + 'DistributionPath'] + + +logger = logging.getLogger(__name__) + +EXPORTS_FILENAME = 'pydist-exports.json' +COMMANDS_FILENAME = 'pydist-commands.json' + +DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED', + 'RESOURCES', EXPORTS_FILENAME, 'SHARED') + +DISTINFO_EXT = '.dist-info' + + +class _Cache(object): + """ + A simple cache mapping names and .dist-info paths to distributions + """ + def __init__(self): + """ + Initialise an instance. There is normally one for each DistributionPath. + """ + self.name = {} + self.path = {} + self.generated = False + + def clear(self): + """ + Clear the cache, setting it to its initial state. + """ + self.name.clear() + self.path.clear() + self.generated = False + + def add(self, dist): + """ + Add a distribution to the cache. + :param dist: The distribution to add. + """ + if dist.path not in self.path: + self.path[dist.path] = dist + self.name.setdefault(dist.key, []).append(dist) + + +class DistributionPath(object): + """ + Represents a set of distributions installed on a path (typically sys.path). + """ + def __init__(self, path=None, include_egg=False): + """ + Create an instance from a path, optionally including legacy (distutils/ + setuptools/distribute) distributions. + :param path: The path to use, as a list of directories. If not specified, + sys.path is used. + :param include_egg: If True, this instance will look for and return legacy + distributions as well as those based on PEP 376. + """ + if path is None: + path = sys.path + self.path = path + self._include_dist = True + self._include_egg = include_egg + + self._cache = _Cache() + self._cache_egg = _Cache() + self._cache_enabled = True + self._scheme = get_scheme('default') + + def _get_cache_enabled(self): + return self._cache_enabled + + def _set_cache_enabled(self, value): + self._cache_enabled = value + + cache_enabled = property(_get_cache_enabled, _set_cache_enabled) + + def clear_cache(self): + """ + Clears the internal cache. + """ + self._cache.clear() + self._cache_egg.clear() + + + def _yield_distributions(self): + """ + Yield .dist-info and/or .egg(-info) distributions. + """ + # We need to check if we've seen some resources already, because on + # some Linux systems (e.g. some Debian/Ubuntu variants) there are + # symlinks which alias other files in the environment. + seen = set() + for path in self.path: + finder = resources.finder_for_path(path) + if finder is None: + continue + r = finder.find('') + if not r or not r.is_container: + continue + rset = sorted(r.resources) + for entry in rset: + r = finder.find(entry) + if not r or r.path in seen: + continue + if self._include_dist and entry.endswith(DISTINFO_EXT): + possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME] + for metadata_filename in possible_filenames: + metadata_path = posixpath.join(entry, metadata_filename) + pydist = finder.find(metadata_path) + if pydist: + break + else: + continue + + with contextlib.closing(pydist.as_stream()) as stream: + metadata = Metadata(fileobj=stream, scheme='legacy') + logger.debug('Found %s', r.path) + seen.add(r.path) + yield new_dist_class(r.path, metadata=metadata, + env=self) + elif self._include_egg and entry.endswith(('.egg-info', + '.egg')): + logger.debug('Found %s', r.path) + seen.add(r.path) + yield old_dist_class(r.path, self) + + def _generate_cache(self): + """ + Scan the path for distributions and populate the cache with + those that are found. + """ + gen_dist = not self._cache.generated + gen_egg = self._include_egg and not self._cache_egg.generated + if gen_dist or gen_egg: + for dist in self._yield_distributions(): + if isinstance(dist, InstalledDistribution): + self._cache.add(dist) + else: + self._cache_egg.add(dist) + + if gen_dist: + self._cache.generated = True + if gen_egg: + self._cache_egg.generated = True + + @classmethod + def distinfo_dirname(cls, name, version): + """ + The *name* and *version* parameters are converted into their + filename-escaped form, i.e. any ``'-'`` characters are replaced + with ``'_'`` other than the one in ``'dist-info'`` and the one + separating the name from the version number. + + :parameter name: is converted to a standard distribution name by replacing + any runs of non- alphanumeric characters with a single + ``'-'``. + :type name: string + :parameter version: is converted to a standard version string. Spaces + become dots, and all other non-alphanumeric characters + (except dots) become dashes, with runs of multiple + dashes condensed to a single dash. + :type version: string + :returns: directory name + :rtype: string""" + name = name.replace('-', '_') + return '-'.join([name, version]) + DISTINFO_EXT + + def get_distributions(self): + """ + Provides an iterator that looks for distributions and returns + :class:`InstalledDistribution` or + :class:`EggInfoDistribution` instances for each one of them. + + :rtype: iterator of :class:`InstalledDistribution` and + :class:`EggInfoDistribution` instances + """ + if not self._cache_enabled: + for dist in self._yield_distributions(): + yield dist + else: + self._generate_cache() + + for dist in self._cache.path.values(): + yield dist + + if self._include_egg: + for dist in self._cache_egg.path.values(): + yield dist + + def get_distribution(self, name): + """ + Looks for a named distribution on the path. + + This function only returns the first result found, as no more than one + value is expected. If nothing is found, ``None`` is returned. + + :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution` + or ``None`` + """ + result = None + name = name.lower() + if not self._cache_enabled: + for dist in self._yield_distributions(): + if dist.key == name: + result = dist + break + else: + self._generate_cache() + + if name in self._cache.name: + result = self._cache.name[name][0] + elif self._include_egg and name in self._cache_egg.name: + result = self._cache_egg.name[name][0] + return result + + def provides_distribution(self, name, version=None): + """ + Iterates over all distributions to find which distributions provide *name*. + If a *version* is provided, it will be used to filter the results. + + This function only returns the first result found, since no more than + one values are expected. If the directory is not found, returns ``None``. + + :parameter version: a version specifier that indicates the version + required, conforming to the format in ``PEP-345`` + + :type name: string + :type version: string + """ + matcher = None + if version is not None: + try: + matcher = self._scheme.matcher('%s (%s)' % (name, version)) + except ValueError: + raise DistlibException('invalid name or version: %r, %r' % + (name, version)) + + for dist in self.get_distributions(): + # We hit a problem on Travis where enum34 was installed and doesn't + # have a provides attribute ... + if not hasattr(dist, 'provides'): + logger.debug('No "provides": %s', dist) + else: + provided = dist.provides + + for p in provided: + p_name, p_ver = parse_name_and_version(p) + if matcher is None: + if p_name == name: + yield dist + break + else: + if p_name == name and matcher.match(p_ver): + yield dist + break + + def get_file_path(self, name, relative_path): + """ + Return the path to a resource file. + """ + dist = self.get_distribution(name) + if dist is None: + raise LookupError('no distribution named %r found' % name) + return dist.get_resource_path(relative_path) + + def get_exported_entries(self, category, name=None): + """ + Return all of the exported entries in a particular category. + + :param category: The category to search for entries. + :param name: If specified, only entries with that name are returned. + """ + for dist in self.get_distributions(): + r = dist.exports + if category in r: + d = r[category] + if name is not None: + if name in d: + yield d[name] + else: + for v in d.values(): + yield v + + +class Distribution(object): + """ + A base class for distributions, whether installed or from indexes. + Either way, it must have some metadata, so that's all that's needed + for construction. + """ + + build_time_dependency = False + """ + Set to True if it's known to be only a build-time dependency (i.e. + not needed after installation). + """ + + requested = False + """A boolean that indicates whether the ``REQUESTED`` metadata file is + present (in other words, whether the package was installed by user + request or it was installed as a dependency).""" + + def __init__(self, metadata): + """ + Initialise an instance. + :param metadata: The instance of :class:`Metadata` describing this + distribution. + """ + self.metadata = metadata + self.name = metadata.name + self.key = self.name.lower() # for case-insensitive comparisons + self.version = metadata.version + self.locator = None + self.digest = None + self.extras = None # additional features requested + self.context = None # environment marker overrides + self.download_urls = set() + self.digests = {} + + @property + def source_url(self): + """ + The source archive download URL for this distribution. + """ + return self.metadata.source_url + + download_url = source_url # Backward compatibility + + @property + def name_and_version(self): + """ + A utility property which displays the name and version in parentheses. + """ + return '%s (%s)' % (self.name, self.version) + + @property + def provides(self): + """ + A set of distribution names and versions provided by this distribution. + :return: A set of "name (version)" strings. + """ + plist = self.metadata.provides + s = '%s (%s)' % (self.name, self.version) + if s not in plist: + plist.append(s) + return plist + + def _get_requirements(self, req_attr): + md = self.metadata + logger.debug('Getting requirements from metadata %r', md.todict()) + reqts = getattr(md, req_attr) + return set(md.get_requirements(reqts, extras=self.extras, + env=self.context)) + + @property + def run_requires(self): + return self._get_requirements('run_requires') + + @property + def meta_requires(self): + return self._get_requirements('meta_requires') + + @property + def build_requires(self): + return self._get_requirements('build_requires') + + @property + def test_requires(self): + return self._get_requirements('test_requires') + + @property + def dev_requires(self): + return self._get_requirements('dev_requires') + + def matches_requirement(self, req): + """ + Say if this instance matches (fulfills) a requirement. + :param req: The requirement to match. + :rtype req: str + :return: True if it matches, else False. + """ + # Requirement may contain extras - parse to lose those + # from what's passed to the matcher + r = parse_requirement(req) + scheme = get_scheme(self.metadata.scheme) + try: + matcher = scheme.matcher(r.requirement) + except UnsupportedVersionError: + # XXX compat-mode if cannot read the version + logger.warning('could not read version %r - using name only', + req) + name = req.split()[0] + matcher = scheme.matcher(name) + + name = matcher.key # case-insensitive + + result = False + for p in self.provides: + p_name, p_ver = parse_name_and_version(p) + if p_name != name: + continue + try: + result = matcher.match(p_ver) + break + except UnsupportedVersionError: + pass + return result + + def __repr__(self): + """ + Return a textual representation of this instance, + """ + if self.source_url: + suffix = ' [%s]' % self.source_url + else: + suffix = '' + return '' % (self.name, self.version, suffix) + + def __eq__(self, other): + """ + See if this distribution is the same as another. + :param other: The distribution to compare with. To be equal to one + another. distributions must have the same type, name, + version and source_url. + :return: True if it is the same, else False. + """ + if type(other) is not type(self): + result = False + else: + result = (self.name == other.name and + self.version == other.version and + self.source_url == other.source_url) + return result + + def __hash__(self): + """ + Compute hash in a way which matches the equality test. + """ + return hash(self.name) + hash(self.version) + hash(self.source_url) + + +class BaseInstalledDistribution(Distribution): + """ + This is the base class for installed distributions (whether PEP 376 or + legacy). + """ + + hasher = None + + def __init__(self, metadata, path, env=None): + """ + Initialise an instance. + :param metadata: An instance of :class:`Metadata` which describes the + distribution. This will normally have been initialised + from a metadata file in the ``path``. + :param path: The path of the ``.dist-info`` or ``.egg-info`` + directory for the distribution. + :param env: This is normally the :class:`DistributionPath` + instance where this distribution was found. + """ + super(BaseInstalledDistribution, self).__init__(metadata) + self.path = path + self.dist_path = env + + def get_hash(self, data, hasher=None): + """ + Get the hash of some data, using a particular hash algorithm, if + specified. + + :param data: The data to be hashed. + :type data: bytes + :param hasher: The name of a hash implementation, supported by hashlib, + or ``None``. Examples of valid values are ``'sha1'``, + ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and + ``'sha512'``. If no hasher is specified, the ``hasher`` + attribute of the :class:`InstalledDistribution` instance + is used. If the hasher is determined to be ``None``, MD5 + is used as the hashing algorithm. + :returns: The hash of the data. If a hasher was explicitly specified, + the returned hash will be prefixed with the specified hasher + followed by '='. + :rtype: str + """ + if hasher is None: + hasher = self.hasher + if hasher is None: + hasher = hashlib.md5 + prefix = '' + else: + hasher = getattr(hashlib, hasher) + prefix = '%s=' % self.hasher + digest = hasher(data).digest() + digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii') + return '%s%s' % (prefix, digest) + + +class InstalledDistribution(BaseInstalledDistribution): + """ + Created with the *path* of the ``.dist-info`` directory provided to the + constructor. It reads the metadata contained in ``pydist.json`` when it is + instantiated., or uses a passed in Metadata instance (useful for when + dry-run mode is being used). + """ + + hasher = 'sha256' + + def __init__(self, path, metadata=None, env=None): + self.modules = [] + self.finder = finder = resources.finder_for_path(path) + if finder is None: + raise ValueError('finder unavailable for %s' % path) + if env and env._cache_enabled and path in env._cache.path: + metadata = env._cache.path[path].metadata + elif metadata is None: + r = finder.find(METADATA_FILENAME) + # Temporary - for Wheel 0.23 support + if r is None: + r = finder.find(WHEEL_METADATA_FILENAME) + # Temporary - for legacy support + if r is None: + r = finder.find('METADATA') + if r is None: + raise ValueError('no %s found in %s' % (METADATA_FILENAME, + path)) + with contextlib.closing(r.as_stream()) as stream: + metadata = Metadata(fileobj=stream, scheme='legacy') + + super(InstalledDistribution, self).__init__(metadata, path, env) + + if env and env._cache_enabled: + env._cache.add(self) + + r = finder.find('REQUESTED') + self.requested = r is not None + p = os.path.join(path, 'top_level.txt') + if os.path.exists(p): + with open(p, 'rb') as f: + data = f.read() + self.modules = data.splitlines() + + def __repr__(self): + return '' % ( + self.name, self.version, self.path) + + def __str__(self): + return "%s %s" % (self.name, self.version) + + def _get_records(self): + """ + Get the list of installed files for the distribution + :return: A list of tuples of path, hash and size. Note that hash and + size might be ``None`` for some entries. The path is exactly + as stored in the file (which is as in PEP 376). + """ + results = [] + r = self.get_distinfo_resource('RECORD') + with contextlib.closing(r.as_stream()) as stream: + with CSVReader(stream=stream) as record_reader: + # Base location is parent dir of .dist-info dir + #base_location = os.path.dirname(self.path) + #base_location = os.path.abspath(base_location) + for row in record_reader: + missing = [None for i in range(len(row), 3)] + path, checksum, size = row + missing + #if not os.path.isabs(path): + # path = path.replace('/', os.sep) + # path = os.path.join(base_location, path) + results.append((path, checksum, size)) + return results + + @cached_property + def exports(self): + """ + Return the information exported by this distribution. + :return: A dictionary of exports, mapping an export category to a dict + of :class:`ExportEntry` instances describing the individual + export entries, and keyed by name. + """ + result = {} + r = self.get_distinfo_resource(EXPORTS_FILENAME) + if r: + result = self.read_exports() + return result + + def read_exports(self): + """ + Read exports data from a file in .ini format. + + :return: A dictionary of exports, mapping an export category to a list + of :class:`ExportEntry` instances describing the individual + export entries. + """ + result = {} + r = self.get_distinfo_resource(EXPORTS_FILENAME) + if r: + with contextlib.closing(r.as_stream()) as stream: + result = read_exports(stream) + return result + + def write_exports(self, exports): + """ + Write a dictionary of exports to a file in .ini format. + :param exports: A dictionary of exports, mapping an export category to + a list of :class:`ExportEntry` instances describing the + individual export entries. + """ + rf = self.get_distinfo_file(EXPORTS_FILENAME) + with open(rf, 'w') as f: + write_exports(exports, f) + + def get_resource_path(self, relative_path): + """ + NOTE: This API may change in the future. + + Return the absolute path to a resource file with the given relative + path. + + :param relative_path: The path, relative to .dist-info, of the resource + of interest. + :return: The absolute path where the resource is to be found. + """ + r = self.get_distinfo_resource('RESOURCES') + with contextlib.closing(r.as_stream()) as stream: + with CSVReader(stream=stream) as resources_reader: + for relative, destination in resources_reader: + if relative == relative_path: + return destination + raise KeyError('no resource file with relative path %r ' + 'is installed' % relative_path) + + def list_installed_files(self): + """ + Iterates over the ``RECORD`` entries and returns a tuple + ``(path, hash, size)`` for each line. + + :returns: iterator of (path, hash, size) + """ + for result in self._get_records(): + yield result + + def write_installed_files(self, paths, prefix, dry_run=False): + """ + Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any + existing ``RECORD`` file is silently overwritten. + + prefix is used to determine when to write absolute paths. + """ + prefix = os.path.join(prefix, '') + base = os.path.dirname(self.path) + base_under_prefix = base.startswith(prefix) + base = os.path.join(base, '') + record_path = self.get_distinfo_file('RECORD') + logger.info('creating %s', record_path) + if dry_run: + return None + with CSVWriter(record_path) as writer: + for path in paths: + if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')): + # do not put size and hash, as in PEP-376 + hash_value = size = '' + else: + size = '%d' % os.path.getsize(path) + with open(path, 'rb') as fp: + hash_value = self.get_hash(fp.read()) + if path.startswith(base) or (base_under_prefix and + path.startswith(prefix)): + path = os.path.relpath(path, base) + writer.writerow((path, hash_value, size)) + + # add the RECORD file itself + if record_path.startswith(base): + record_path = os.path.relpath(record_path, base) + writer.writerow((record_path, '', '')) + return record_path + + def check_installed_files(self): + """ + Checks that the hashes and sizes of the files in ``RECORD`` are + matched by the files themselves. Returns a (possibly empty) list of + mismatches. Each entry in the mismatch list will be a tuple consisting + of the path, 'exists', 'size' or 'hash' according to what didn't match + (existence is checked first, then size, then hash), the expected + value and the actual value. + """ + mismatches = [] + base = os.path.dirname(self.path) + record_path = self.get_distinfo_file('RECORD') + for path, hash_value, size in self.list_installed_files(): + if not os.path.isabs(path): + path = os.path.join(base, path) + if path == record_path: + continue + if not os.path.exists(path): + mismatches.append((path, 'exists', True, False)) + elif os.path.isfile(path): + actual_size = str(os.path.getsize(path)) + if size and actual_size != size: + mismatches.append((path, 'size', size, actual_size)) + elif hash_value: + if '=' in hash_value: + hasher = hash_value.split('=', 1)[0] + else: + hasher = None + + with open(path, 'rb') as f: + actual_hash = self.get_hash(f.read(), hasher) + if actual_hash != hash_value: + mismatches.append((path, 'hash', hash_value, actual_hash)) + return mismatches + + @cached_property + def shared_locations(self): + """ + A dictionary of shared locations whose keys are in the set 'prefix', + 'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'. + The corresponding value is the absolute path of that category for + this distribution, and takes into account any paths selected by the + user at installation time (e.g. via command-line arguments). In the + case of the 'namespace' key, this would be a list of absolute paths + for the roots of namespace packages in this distribution. + + The first time this property is accessed, the relevant information is + read from the SHARED file in the .dist-info directory. + """ + result = {} + shared_path = os.path.join(self.path, 'SHARED') + if os.path.isfile(shared_path): + with codecs.open(shared_path, 'r', encoding='utf-8') as f: + lines = f.read().splitlines() + for line in lines: + key, value = line.split('=', 1) + if key == 'namespace': + result.setdefault(key, []).append(value) + else: + result[key] = value + return result + + def write_shared_locations(self, paths, dry_run=False): + """ + Write shared location information to the SHARED file in .dist-info. + :param paths: A dictionary as described in the documentation for + :meth:`shared_locations`. + :param dry_run: If True, the action is logged but no file is actually + written. + :return: The path of the file written to. + """ + shared_path = os.path.join(self.path, 'SHARED') + logger.info('creating %s', shared_path) + if dry_run: + return None + lines = [] + for key in ('prefix', 'lib', 'headers', 'scripts', 'data'): + path = paths[key] + if os.path.isdir(paths[key]): + lines.append('%s=%s' % (key, path)) + for ns in paths.get('namespace', ()): + lines.append('namespace=%s' % ns) + + with codecs.open(shared_path, 'w', encoding='utf-8') as f: + f.write('\n'.join(lines)) + return shared_path + + def get_distinfo_resource(self, path): + if path not in DIST_FILES: + raise DistlibException('invalid path for a dist-info file: ' + '%r at %r' % (path, self.path)) + finder = resources.finder_for_path(self.path) + if finder is None: + raise DistlibException('Unable to get a finder for %s' % self.path) + return finder.find(path) + + def get_distinfo_file(self, path): + """ + Returns a path located under the ``.dist-info`` directory. Returns a + string representing the path. + + :parameter path: a ``'/'``-separated path relative to the + ``.dist-info`` directory or an absolute path; + If *path* is an absolute path and doesn't start + with the ``.dist-info`` directory path, + a :class:`DistlibException` is raised + :type path: str + :rtype: str + """ + # Check if it is an absolute path # XXX use relpath, add tests + if path.find(os.sep) >= 0: + # it's an absolute path? + distinfo_dirname, path = path.split(os.sep)[-2:] + if distinfo_dirname != self.path.split(os.sep)[-1]: + raise DistlibException( + 'dist-info file %r does not belong to the %r %s ' + 'distribution' % (path, self.name, self.version)) + + # The file must be relative + if path not in DIST_FILES: + raise DistlibException('invalid path for a dist-info file: ' + '%r at %r' % (path, self.path)) + + return os.path.join(self.path, path) + + def list_distinfo_files(self): + """ + Iterates over the ``RECORD`` entries and returns paths for each line if + the path is pointing to a file located in the ``.dist-info`` directory + or one of its subdirectories. + + :returns: iterator of paths + """ + base = os.path.dirname(self.path) + for path, checksum, size in self._get_records(): + # XXX add separator or use real relpath algo + if not os.path.isabs(path): + path = os.path.join(base, path) + if path.startswith(self.path): + yield path + + def __eq__(self, other): + return (isinstance(other, InstalledDistribution) and + self.path == other.path) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + __hash__ = object.__hash__ + + +class EggInfoDistribution(BaseInstalledDistribution): + """Created with the *path* of the ``.egg-info`` directory or file provided + to the constructor. It reads the metadata contained in the file itself, or + if the given path happens to be a directory, the metadata is read from the + file ``PKG-INFO`` under that directory.""" + + requested = True # as we have no way of knowing, assume it was + shared_locations = {} + + def __init__(self, path, env=None): + def set_name_and_version(s, n, v): + s.name = n + s.key = n.lower() # for case-insensitive comparisons + s.version = v + + self.path = path + self.dist_path = env + if env and env._cache_enabled and path in env._cache_egg.path: + metadata = env._cache_egg.path[path].metadata + set_name_and_version(self, metadata.name, metadata.version) + else: + metadata = self._get_metadata(path) + + # Need to be set before caching + set_name_and_version(self, metadata.name, metadata.version) + + if env and env._cache_enabled: + env._cache_egg.add(self) + super(EggInfoDistribution, self).__init__(metadata, path, env) + + def _get_metadata(self, path): + requires = None + + def parse_requires_data(data): + """Create a list of dependencies from a requires.txt file. + + *data*: the contents of a setuptools-produced requires.txt file. + """ + reqs = [] + lines = data.splitlines() + for line in lines: + line = line.strip() + if line.startswith('['): + logger.warning('Unexpected line: quitting requirement scan: %r', + line) + break + r = parse_requirement(line) + if not r: + logger.warning('Not recognised as a requirement: %r', line) + continue + if r.extras: + logger.warning('extra requirements in requires.txt are ' + 'not supported') + if not r.constraints: + reqs.append(r.name) + else: + cons = ', '.join('%s%s' % c for c in r.constraints) + reqs.append('%s (%s)' % (r.name, cons)) + return reqs + + def parse_requires_path(req_path): + """Create a list of dependencies from a requires.txt file. + + *req_path*: the path to a setuptools-produced requires.txt file. + """ + + reqs = [] + try: + with codecs.open(req_path, 'r', 'utf-8') as fp: + reqs = parse_requires_data(fp.read()) + except IOError: + pass + return reqs + + tl_path = tl_data = None + if path.endswith('.egg'): + if os.path.isdir(path): + p = os.path.join(path, 'EGG-INFO') + meta_path = os.path.join(p, 'PKG-INFO') + metadata = Metadata(path=meta_path, scheme='legacy') + req_path = os.path.join(p, 'requires.txt') + tl_path = os.path.join(p, 'top_level.txt') + requires = parse_requires_path(req_path) + else: + # FIXME handle the case where zipfile is not available + zipf = zipimport.zipimporter(path) + fileobj = StringIO( + zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8')) + metadata = Metadata(fileobj=fileobj, scheme='legacy') + try: + data = zipf.get_data('EGG-INFO/requires.txt') + tl_data = zipf.get_data('EGG-INFO/top_level.txt').decode('utf-8') + requires = parse_requires_data(data.decode('utf-8')) + except IOError: + requires = None + elif path.endswith('.egg-info'): + if os.path.isdir(path): + req_path = os.path.join(path, 'requires.txt') + requires = parse_requires_path(req_path) + path = os.path.join(path, 'PKG-INFO') + tl_path = os.path.join(path, 'top_level.txt') + metadata = Metadata(path=path, scheme='legacy') + else: + raise DistlibException('path must end with .egg-info or .egg, ' + 'got %r' % path) + + if requires: + metadata.add_requirements(requires) + # look for top-level modules in top_level.txt, if present + if tl_data is None: + if tl_path is not None and os.path.exists(tl_path): + with open(tl_path, 'rb') as f: + tl_data = f.read().decode('utf-8') + if not tl_data: + tl_data = [] + else: + tl_data = tl_data.splitlines() + self.modules = tl_data + return metadata + + def __repr__(self): + return '' % ( + self.name, self.version, self.path) + + def __str__(self): + return "%s %s" % (self.name, self.version) + + def check_installed_files(self): + """ + Checks that the hashes and sizes of the files in ``RECORD`` are + matched by the files themselves. Returns a (possibly empty) list of + mismatches. Each entry in the mismatch list will be a tuple consisting + of the path, 'exists', 'size' or 'hash' according to what didn't match + (existence is checked first, then size, then hash), the expected + value and the actual value. + """ + mismatches = [] + record_path = os.path.join(self.path, 'installed-files.txt') + if os.path.exists(record_path): + for path, _, _ in self.list_installed_files(): + if path == record_path: + continue + if not os.path.exists(path): + mismatches.append((path, 'exists', True, False)) + return mismatches + + def list_installed_files(self): + """ + Iterates over the ``installed-files.txt`` entries and returns a tuple + ``(path, hash, size)`` for each line. + + :returns: a list of (path, hash, size) + """ + + def _md5(path): + f = open(path, 'rb') + try: + content = f.read() + finally: + f.close() + return hashlib.md5(content).hexdigest() + + def _size(path): + return os.stat(path).st_size + + record_path = os.path.join(self.path, 'installed-files.txt') + result = [] + if os.path.exists(record_path): + with codecs.open(record_path, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + p = os.path.normpath(os.path.join(self.path, line)) + # "./" is present as a marker between installed files + # and installation metadata files + if not os.path.exists(p): + logger.warning('Non-existent file: %s', p) + if p.endswith(('.pyc', '.pyo')): + continue + #otherwise fall through and fail + if not os.path.isdir(p): + result.append((p, _md5(p), _size(p))) + result.append((record_path, None, None)) + return result + + def list_distinfo_files(self, absolute=False): + """ + Iterates over the ``installed-files.txt`` entries and returns paths for + each line if the path is pointing to a file located in the + ``.egg-info`` directory or one of its subdirectories. + + :parameter absolute: If *absolute* is ``True``, each returned path is + transformed into a local absolute path. Otherwise the + raw value from ``installed-files.txt`` is returned. + :type absolute: boolean + :returns: iterator of paths + """ + record_path = os.path.join(self.path, 'installed-files.txt') + if os.path.exists(record_path): + skip = True + with codecs.open(record_path, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + if line == './': + skip = False + continue + if not skip: + p = os.path.normpath(os.path.join(self.path, line)) + if p.startswith(self.path): + if absolute: + yield p + else: + yield line + + def __eq__(self, other): + return (isinstance(other, EggInfoDistribution) and + self.path == other.path) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + __hash__ = object.__hash__ + +new_dist_class = InstalledDistribution +old_dist_class = EggInfoDistribution + + +class DependencyGraph(object): + """ + Represents a dependency graph between distributions. + + The dependency relationships are stored in an ``adjacency_list`` that maps + distributions to a list of ``(other, label)`` tuples where ``other`` + is a distribution and the edge is labeled with ``label`` (i.e. the version + specifier, if such was provided). Also, for more efficient traversal, for + every distribution ``x``, a list of predecessors is kept in + ``reverse_list[x]``. An edge from distribution ``a`` to + distribution ``b`` means that ``a`` depends on ``b``. If any missing + dependencies are found, they are stored in ``missing``, which is a + dictionary that maps distributions to a list of requirements that were not + provided by any other distributions. + """ + + def __init__(self): + self.adjacency_list = {} + self.reverse_list = {} + self.missing = {} + + def add_distribution(self, distribution): + """Add the *distribution* to the graph. + + :type distribution: :class:`distutils2.database.InstalledDistribution` + or :class:`distutils2.database.EggInfoDistribution` + """ + self.adjacency_list[distribution] = [] + self.reverse_list[distribution] = [] + #self.missing[distribution] = [] + + def add_edge(self, x, y, label=None): + """Add an edge from distribution *x* to distribution *y* with the given + *label*. + + :type x: :class:`distutils2.database.InstalledDistribution` or + :class:`distutils2.database.EggInfoDistribution` + :type y: :class:`distutils2.database.InstalledDistribution` or + :class:`distutils2.database.EggInfoDistribution` + :type label: ``str`` or ``None`` + """ + self.adjacency_list[x].append((y, label)) + # multiple edges are allowed, so be careful + if x not in self.reverse_list[y]: + self.reverse_list[y].append(x) + + def add_missing(self, distribution, requirement): + """ + Add a missing *requirement* for the given *distribution*. + + :type distribution: :class:`distutils2.database.InstalledDistribution` + or :class:`distutils2.database.EggInfoDistribution` + :type requirement: ``str`` + """ + logger.debug('%s missing %r', distribution, requirement) + self.missing.setdefault(distribution, []).append(requirement) + + def _repr_dist(self, dist): + return '%s %s' % (dist.name, dist.version) + + def repr_node(self, dist, level=1): + """Prints only a subgraph""" + output = [self._repr_dist(dist)] + for other, label in self.adjacency_list[dist]: + dist = self._repr_dist(other) + if label is not None: + dist = '%s [%s]' % (dist, label) + output.append(' ' * level + str(dist)) + suboutput = self.repr_node(other, level + 1) + subs = suboutput.split('\n') + output.extend(subs[1:]) + return '\n'.join(output) + + def to_dot(self, f, skip_disconnected=True): + """Writes a DOT output for the graph to the provided file *f*. + + If *skip_disconnected* is set to ``True``, then all distributions + that are not dependent on any other distribution are skipped. + + :type f: has to support ``file``-like operations + :type skip_disconnected: ``bool`` + """ + disconnected = [] + + f.write("digraph dependencies {\n") + for dist, adjs in self.adjacency_list.items(): + if len(adjs) == 0 and not skip_disconnected: + disconnected.append(dist) + for other, label in adjs: + if not label is None: + f.write('"%s" -> "%s" [label="%s"]\n' % + (dist.name, other.name, label)) + else: + f.write('"%s" -> "%s"\n' % (dist.name, other.name)) + if not skip_disconnected and len(disconnected) > 0: + f.write('subgraph disconnected {\n') + f.write('label = "Disconnected"\n') + f.write('bgcolor = red\n') + + for dist in disconnected: + f.write('"%s"' % dist.name) + f.write('\n') + f.write('}\n') + f.write('}\n') + + def topological_sort(self): + """ + Perform a topological sort of the graph. + :return: A tuple, the first element of which is a topologically sorted + list of distributions, and the second element of which is a + list of distributions that cannot be sorted because they have + circular dependencies and so form a cycle. + """ + result = [] + # Make a shallow copy of the adjacency list + alist = {} + for k, v in self.adjacency_list.items(): + alist[k] = v[:] + while True: + # See what we can remove in this run + to_remove = [] + for k, v in list(alist.items())[:]: + if not v: + to_remove.append(k) + del alist[k] + if not to_remove: + # What's left in alist (if anything) is a cycle. + break + # Remove from the adjacency list of others + for k, v in alist.items(): + alist[k] = [(d, r) for d, r in v if d not in to_remove] + logger.debug('Moving to result: %s', + ['%s (%s)' % (d.name, d.version) for d in to_remove]) + result.extend(to_remove) + return result, list(alist.keys()) + + def __repr__(self): + """Representation of the graph""" + output = [] + for dist, adjs in self.adjacency_list.items(): + output.append(self.repr_node(dist)) + return '\n'.join(output) + + +def make_graph(dists, scheme='default'): + """Makes a dependency graph from the given distributions. + + :parameter dists: a list of distributions + :type dists: list of :class:`distutils2.database.InstalledDistribution` and + :class:`distutils2.database.EggInfoDistribution` instances + :rtype: a :class:`DependencyGraph` instance + """ + scheme = get_scheme(scheme) + graph = DependencyGraph() + provided = {} # maps names to lists of (version, dist) tuples + + # first, build the graph and find out what's provided + for dist in dists: + graph.add_distribution(dist) + + for p in dist.provides: + name, version = parse_name_and_version(p) + logger.debug('Add to provided: %s, %s, %s', name, version, dist) + provided.setdefault(name, []).append((version, dist)) + + # now make the edges + for dist in dists: + requires = (dist.run_requires | dist.meta_requires | + dist.build_requires | dist.dev_requires) + for req in requires: + try: + matcher = scheme.matcher(req) + except UnsupportedVersionError: + # XXX compat-mode if cannot read the version + logger.warning('could not read version %r - using name only', + req) + name = req.split()[0] + matcher = scheme.matcher(name) + + name = matcher.key # case-insensitive + + matched = False + if name in provided: + for version, provider in provided[name]: + try: + match = matcher.match(version) + except UnsupportedVersionError: + match = False + + if match: + graph.add_edge(dist, provider, req) + matched = True + break + if not matched: + graph.add_missing(dist, req) + return graph + + +def get_dependent_dists(dists, dist): + """Recursively generate a list of distributions from *dists* that are + dependent on *dist*. + + :param dists: a list of distributions + :param dist: a distribution, member of *dists* for which we are interested + """ + if dist not in dists: + raise DistlibException('given distribution %r is not a member ' + 'of the list' % dist.name) + graph = make_graph(dists) + + dep = [dist] # dependent distributions + todo = graph.reverse_list[dist] # list of nodes we should inspect + + while todo: + d = todo.pop() + dep.append(d) + for succ in graph.reverse_list[d]: + if succ not in dep: + todo.append(succ) + + dep.pop(0) # remove dist from dep, was there to prevent infinite loops + return dep + + +def get_required_dists(dists, dist): + """Recursively generate a list of distributions from *dists* that are + required by *dist*. + + :param dists: a list of distributions + :param dist: a distribution, member of *dists* for which we are interested + """ + if dist not in dists: + raise DistlibException('given distribution %r is not a member ' + 'of the list' % dist.name) + graph = make_graph(dists) + + req = [] # required distributions + todo = graph.adjacency_list[dist] # list of nodes we should inspect + + while todo: + d = todo.pop()[0] + req.append(d) + for pred in graph.adjacency_list[d]: + if pred not in req: + todo.append(pred) + + return req + + +def make_dist(name, version, **kwargs): + """ + A convenience method for making a dist given just a name and version. + """ + summary = kwargs.pop('summary', 'Placeholder for summary') + md = Metadata(**kwargs) + md.name = name + md.version = version + md.summary = summary or 'Placeholder for summary' + return Distribution(md) diff --git a/pipenv/vendor/distlib/index.py b/pipenv/vendor/distlib/index.py new file mode 100644 index 0000000000..2406be2169 --- /dev/null +++ b/pipenv/vendor/distlib/index.py @@ -0,0 +1,516 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import hashlib +import logging +import os +import shutil +import subprocess +import tempfile +try: + from threading import Thread +except ImportError: + from dummy_threading import Thread + +from . import DistlibException +from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr, + urlparse, build_opener, string_types) +from .util import cached_property, zip_dir, ServerProxy + +logger = logging.getLogger(__name__) + +DEFAULT_INDEX = 'https://pypi.python.org/pypi' +DEFAULT_REALM = 'pypi' + +class PackageIndex(object): + """ + This class represents a package index compatible with PyPI, the Python + Package Index. + """ + + boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$' + + def __init__(self, url=None): + """ + Initialise an instance. + + :param url: The URL of the index. If not specified, the URL for PyPI is + used. + """ + self.url = url or DEFAULT_INDEX + self.read_configuration() + scheme, netloc, path, params, query, frag = urlparse(self.url) + if params or query or frag or scheme not in ('http', 'https'): + raise DistlibException('invalid repository: %s' % self.url) + self.password_handler = None + self.ssl_verifier = None + self.gpg = None + self.gpg_home = None + with open(os.devnull, 'w') as sink: + # Use gpg by default rather than gpg2, as gpg2 insists on + # prompting for passwords + for s in ('gpg', 'gpg2'): + try: + rc = subprocess.check_call([s, '--version'], stdout=sink, + stderr=sink) + if rc == 0: + self.gpg = s + break + except OSError: + pass + + def _get_pypirc_command(self): + """ + Get the distutils command for interacting with PyPI configurations. + :return: the command. + """ + from distutils.core import Distribution + from distutils.config import PyPIRCCommand + d = Distribution() + return PyPIRCCommand(d) + + def read_configuration(self): + """ + Read the PyPI access configuration as supported by distutils, getting + PyPI to do the actual work. This populates ``username``, ``password``, + ``realm`` and ``url`` attributes from the configuration. + """ + # get distutils to do the work + c = self._get_pypirc_command() + c.repository = self.url + cfg = c._read_pypirc() + self.username = cfg.get('username') + self.password = cfg.get('password') + self.realm = cfg.get('realm', 'pypi') + self.url = cfg.get('repository', self.url) + + def save_configuration(self): + """ + Save the PyPI access configuration. You must have set ``username`` and + ``password`` attributes before calling this method. + + Again, distutils is used to do the actual work. + """ + self.check_credentials() + # get distutils to do the work + c = self._get_pypirc_command() + c._store_pypirc(self.username, self.password) + + def check_credentials(self): + """ + Check that ``username`` and ``password`` have been set, and raise an + exception if not. + """ + if self.username is None or self.password is None: + raise DistlibException('username and password must be set') + pm = HTTPPasswordMgr() + _, netloc, _, _, _, _ = urlparse(self.url) + pm.add_password(self.realm, netloc, self.username, self.password) + self.password_handler = HTTPBasicAuthHandler(pm) + + def register(self, metadata): + """ + Register a distribution on PyPI, using the provided metadata. + + :param metadata: A :class:`Metadata` instance defining at least a name + and version number for the distribution to be + registered. + :return: The HTTP response received from PyPI upon submission of the + request. + """ + self.check_credentials() + metadata.validate() + d = metadata.todict() + d[':action'] = 'verify' + request = self.encode_request(d.items(), []) + response = self.send_request(request) + d[':action'] = 'submit' + request = self.encode_request(d.items(), []) + return self.send_request(request) + + def _reader(self, name, stream, outbuf): + """ + Thread runner for reading lines of from a subprocess into a buffer. + + :param name: The logical name of the stream (used for logging only). + :param stream: The stream to read from. This will typically a pipe + connected to the output stream of a subprocess. + :param outbuf: The list to append the read lines to. + """ + while True: + s = stream.readline() + if not s: + break + s = s.decode('utf-8').rstrip() + outbuf.append(s) + logger.debug('%s: %s' % (name, s)) + stream.close() + + def get_sign_command(self, filename, signer, sign_password, + keystore=None): + """ + Return a suitable command for signing a file. + + :param filename: The pathname to the file to be signed. + :param signer: The identifier of the signer of the file. + :param sign_password: The passphrase for the signer's + private key used for signing. + :param keystore: The path to a directory which contains the keys + used in verification. If not specified, the + instance's ``gpg_home`` attribute is used instead. + :return: The signing command as a list suitable to be + passed to :class:`subprocess.Popen`. + """ + cmd = [self.gpg, '--status-fd', '2', '--no-tty'] + if keystore is None: + keystore = self.gpg_home + if keystore: + cmd.extend(['--homedir', keystore]) + if sign_password is not None: + cmd.extend(['--batch', '--passphrase-fd', '0']) + td = tempfile.mkdtemp() + sf = os.path.join(td, os.path.basename(filename) + '.asc') + cmd.extend(['--detach-sign', '--armor', '--local-user', + signer, '--output', sf, filename]) + logger.debug('invoking: %s', ' '.join(cmd)) + return cmd, sf + + def run_command(self, cmd, input_data=None): + """ + Run a command in a child process , passing it any input data specified. + + :param cmd: The command to run. + :param input_data: If specified, this must be a byte string containing + data to be sent to the child process. + :return: A tuple consisting of the subprocess' exit code, a list of + lines read from the subprocess' ``stdout``, and a list of + lines read from the subprocess' ``stderr``. + """ + kwargs = { + 'stdout': subprocess.PIPE, + 'stderr': subprocess.PIPE, + } + if input_data is not None: + kwargs['stdin'] = subprocess.PIPE + stdout = [] + stderr = [] + p = subprocess.Popen(cmd, **kwargs) + # We don't use communicate() here because we may need to + # get clever with interacting with the command + t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout)) + t1.start() + t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr)) + t2.start() + if input_data is not None: + p.stdin.write(input_data) + p.stdin.close() + + p.wait() + t1.join() + t2.join() + return p.returncode, stdout, stderr + + def sign_file(self, filename, signer, sign_password, keystore=None): + """ + Sign a file. + + :param filename: The pathname to the file to be signed. + :param signer: The identifier of the signer of the file. + :param sign_password: The passphrase for the signer's + private key used for signing. + :param keystore: The path to a directory which contains the keys + used in signing. If not specified, the instance's + ``gpg_home`` attribute is used instead. + :return: The absolute pathname of the file where the signature is + stored. + """ + cmd, sig_file = self.get_sign_command(filename, signer, sign_password, + keystore) + rc, stdout, stderr = self.run_command(cmd, + sign_password.encode('utf-8')) + if rc != 0: + raise DistlibException('sign command failed with error ' + 'code %s' % rc) + return sig_file + + def upload_file(self, metadata, filename, signer=None, sign_password=None, + filetype='sdist', pyversion='source', keystore=None): + """ + Upload a release file to the index. + + :param metadata: A :class:`Metadata` instance defining at least a name + and version number for the file to be uploaded. + :param filename: The pathname of the file to be uploaded. + :param signer: The identifier of the signer of the file. + :param sign_password: The passphrase for the signer's + private key used for signing. + :param filetype: The type of the file being uploaded. This is the + distutils command which produced that file, e.g. + ``sdist`` or ``bdist_wheel``. + :param pyversion: The version of Python which the release relates + to. For code compatible with any Python, this would + be ``source``, otherwise it would be e.g. ``3.2``. + :param keystore: The path to a directory which contains the keys + used in signing. If not specified, the instance's + ``gpg_home`` attribute is used instead. + :return: The HTTP response received from PyPI upon submission of the + request. + """ + self.check_credentials() + if not os.path.exists(filename): + raise DistlibException('not found: %s' % filename) + metadata.validate() + d = metadata.todict() + sig_file = None + if signer: + if not self.gpg: + logger.warning('no signing program available - not signed') + else: + sig_file = self.sign_file(filename, signer, sign_password, + keystore) + with open(filename, 'rb') as f: + file_data = f.read() + md5_digest = hashlib.md5(file_data).hexdigest() + sha256_digest = hashlib.sha256(file_data).hexdigest() + d.update({ + ':action': 'file_upload', + 'protocol_version': '1', + 'filetype': filetype, + 'pyversion': pyversion, + 'md5_digest': md5_digest, + 'sha256_digest': sha256_digest, + }) + files = [('content', os.path.basename(filename), file_data)] + if sig_file: + with open(sig_file, 'rb') as f: + sig_data = f.read() + files.append(('gpg_signature', os.path.basename(sig_file), + sig_data)) + shutil.rmtree(os.path.dirname(sig_file)) + request = self.encode_request(d.items(), files) + return self.send_request(request) + + def upload_documentation(self, metadata, doc_dir): + """ + Upload documentation to the index. + + :param metadata: A :class:`Metadata` instance defining at least a name + and version number for the documentation to be + uploaded. + :param doc_dir: The pathname of the directory which contains the + documentation. This should be the directory that + contains the ``index.html`` for the documentation. + :return: The HTTP response received from PyPI upon submission of the + request. + """ + self.check_credentials() + if not os.path.isdir(doc_dir): + raise DistlibException('not a directory: %r' % doc_dir) + fn = os.path.join(doc_dir, 'index.html') + if not os.path.exists(fn): + raise DistlibException('not found: %r' % fn) + metadata.validate() + name, version = metadata.name, metadata.version + zip_data = zip_dir(doc_dir).getvalue() + fields = [(':action', 'doc_upload'), + ('name', name), ('version', version)] + files = [('content', name, zip_data)] + request = self.encode_request(fields, files) + return self.send_request(request) + + def get_verify_command(self, signature_filename, data_filename, + keystore=None): + """ + Return a suitable command for verifying a file. + + :param signature_filename: The pathname to the file containing the + signature. + :param data_filename: The pathname to the file containing the + signed data. + :param keystore: The path to a directory which contains the keys + used in verification. If not specified, the + instance's ``gpg_home`` attribute is used instead. + :return: The verifying command as a list suitable to be + passed to :class:`subprocess.Popen`. + """ + cmd = [self.gpg, '--status-fd', '2', '--no-tty'] + if keystore is None: + keystore = self.gpg_home + if keystore: + cmd.extend(['--homedir', keystore]) + cmd.extend(['--verify', signature_filename, data_filename]) + logger.debug('invoking: %s', ' '.join(cmd)) + return cmd + + def verify_signature(self, signature_filename, data_filename, + keystore=None): + """ + Verify a signature for a file. + + :param signature_filename: The pathname to the file containing the + signature. + :param data_filename: The pathname to the file containing the + signed data. + :param keystore: The path to a directory which contains the keys + used in verification. If not specified, the + instance's ``gpg_home`` attribute is used instead. + :return: True if the signature was verified, else False. + """ + if not self.gpg: + raise DistlibException('verification unavailable because gpg ' + 'unavailable') + cmd = self.get_verify_command(signature_filename, data_filename, + keystore) + rc, stdout, stderr = self.run_command(cmd) + if rc not in (0, 1): + raise DistlibException('verify command failed with error ' + 'code %s' % rc) + return rc == 0 + + def download_file(self, url, destfile, digest=None, reporthook=None): + """ + This is a convenience method for downloading a file from an URL. + Normally, this will be a file from the index, though currently + no check is made for this (i.e. a file can be downloaded from + anywhere). + + The method is just like the :func:`urlretrieve` function in the + standard library, except that it allows digest computation to be + done during download and checking that the downloaded data + matched any expected value. + + :param url: The URL of the file to be downloaded (assumed to be + available via an HTTP GET request). + :param destfile: The pathname where the downloaded file is to be + saved. + :param digest: If specified, this must be a (hasher, value) + tuple, where hasher is the algorithm used (e.g. + ``'md5'``) and ``value`` is the expected value. + :param reporthook: The same as for :func:`urlretrieve` in the + standard library. + """ + if digest is None: + digester = None + logger.debug('No digest specified') + else: + if isinstance(digest, (list, tuple)): + hasher, digest = digest + else: + hasher = 'md5' + digester = getattr(hashlib, hasher)() + logger.debug('Digest specified: %s' % digest) + # The following code is equivalent to urlretrieve. + # We need to do it this way so that we can compute the + # digest of the file as we go. + with open(destfile, 'wb') as dfp: + # addinfourl is not a context manager on 2.x + # so we have to use try/finally + sfp = self.send_request(Request(url)) + try: + headers = sfp.info() + blocksize = 8192 + size = -1 + read = 0 + blocknum = 0 + if "content-length" in headers: + size = int(headers["Content-Length"]) + if reporthook: + reporthook(blocknum, blocksize, size) + while True: + block = sfp.read(blocksize) + if not block: + break + read += len(block) + dfp.write(block) + if digester: + digester.update(block) + blocknum += 1 + if reporthook: + reporthook(blocknum, blocksize, size) + finally: + sfp.close() + + # check that we got the whole file, if we can + if size >= 0 and read < size: + raise DistlibException( + 'retrieval incomplete: got only %d out of %d bytes' + % (read, size)) + # if we have a digest, it must match. + if digester: + actual = digester.hexdigest() + if digest != actual: + raise DistlibException('%s digest mismatch for %s: expected ' + '%s, got %s' % (hasher, destfile, + digest, actual)) + logger.debug('Digest verified: %s', digest) + + def send_request(self, req): + """ + Send a standard library :class:`Request` to PyPI and return its + response. + + :param req: The request to send. + :return: The HTTP response from PyPI (a standard library HTTPResponse). + """ + handlers = [] + if self.password_handler: + handlers.append(self.password_handler) + if self.ssl_verifier: + handlers.append(self.ssl_verifier) + opener = build_opener(*handlers) + return opener.open(req) + + def encode_request(self, fields, files): + """ + Encode fields and files for posting to an HTTP server. + + :param fields: The fields to send as a list of (fieldname, value) + tuples. + :param files: The files to send as a list of (fieldname, filename, + file_bytes) tuple. + """ + # Adapted from packaging, which in turn was adapted from + # http://code.activestate.com/recipes/146306 + + parts = [] + boundary = self.boundary + for k, values in fields: + if not isinstance(values, (list, tuple)): + values = [values] + + for v in values: + parts.extend(( + b'--' + boundary, + ('Content-Disposition: form-data; name="%s"' % + k).encode('utf-8'), + b'', + v.encode('utf-8'))) + for key, filename, value in files: + parts.extend(( + b'--' + boundary, + ('Content-Disposition: form-data; name="%s"; filename="%s"' % + (key, filename)).encode('utf-8'), + b'', + value)) + + parts.extend((b'--' + boundary + b'--', b'')) + + body = b'\r\n'.join(parts) + ct = b'multipart/form-data; boundary=' + boundary + headers = { + 'Content-type': ct, + 'Content-length': str(len(body)) + } + return Request(self.url, body, headers) + + def search(self, terms, operator=None): + if isinstance(terms, string_types): + terms = {'name': terms} + rpc_proxy = ServerProxy(self.url, timeout=3.0) + try: + return rpc_proxy.search(terms, operator or 'and') + finally: + rpc_proxy('close')() diff --git a/pipenv/vendor/distlib/locators.py b/pipenv/vendor/distlib/locators.py new file mode 100644 index 0000000000..11d26361c9 --- /dev/null +++ b/pipenv/vendor/distlib/locators.py @@ -0,0 +1,1292 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2015 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# + +import gzip +from io import BytesIO +import json +import logging +import os +import posixpath +import re +try: + import threading +except ImportError: # pragma: no cover + import dummy_threading as threading +import zlib + +from . import DistlibException +from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url, + queue, quote, unescape, string_types, build_opener, + HTTPRedirectHandler as BaseRedirectHandler, text_type, + Request, HTTPError, URLError) +from .database import Distribution, DistributionPath, make_dist +from .metadata import Metadata, MetadataInvalidError +from .util import (cached_property, parse_credentials, ensure_slash, + split_filename, get_project_data, parse_requirement, + parse_name_and_version, ServerProxy, normalize_name) +from .version import get_scheme, UnsupportedVersionError +from .wheel import Wheel, is_compatible + +logger = logging.getLogger(__name__) + +HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)') +CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I) +HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml') +DEFAULT_INDEX = 'https://pypi.python.org/pypi' + +def get_all_distribution_names(url=None): + """ + Return all distribution names known by an index. + :param url: The URL of the index. + :return: A list of all known distribution names. + """ + if url is None: + url = DEFAULT_INDEX + client = ServerProxy(url, timeout=3.0) + try: + return client.list_packages() + finally: + client('close')() + +class RedirectHandler(BaseRedirectHandler): + """ + A class to work around a bug in some Python 3.2.x releases. + """ + # There's a bug in the base version for some 3.2.x + # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header + # returns e.g. /abc, it bails because it says the scheme '' + # is bogus, when actually it should use the request's + # URL for the scheme. See Python issue #13696. + def http_error_302(self, req, fp, code, msg, headers): + # Some servers (incorrectly) return multiple Location headers + # (so probably same goes for URI). Use first header. + newurl = None + for key in ('location', 'uri'): + if key in headers: + newurl = headers[key] + break + if newurl is None: # pragma: no cover + return + urlparts = urlparse(newurl) + if urlparts.scheme == '': + newurl = urljoin(req.get_full_url(), newurl) + if hasattr(headers, 'replace_header'): + headers.replace_header(key, newurl) + else: + headers[key] = newurl + return BaseRedirectHandler.http_error_302(self, req, fp, code, msg, + headers) + + http_error_301 = http_error_303 = http_error_307 = http_error_302 + +class Locator(object): + """ + A base class for locators - things that locate distributions. + """ + source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz') + binary_extensions = ('.egg', '.exe', '.whl') + excluded_extensions = ('.pdf',) + + # A list of tags indicating which wheels you want to match. The default + # value of None matches against the tags compatible with the running + # Python. If you want to match other values, set wheel_tags on a locator + # instance to a list of tuples (pyver, abi, arch) which you want to match. + wheel_tags = None + + downloadable_extensions = source_extensions + ('.whl',) + + def __init__(self, scheme='default'): + """ + Initialise an instance. + :param scheme: Because locators look for most recent versions, they + need to know the version scheme to use. This specifies + the current PEP-recommended scheme - use ``'legacy'`` + if you need to support existing distributions on PyPI. + """ + self._cache = {} + self.scheme = scheme + # Because of bugs in some of the handlers on some of the platforms, + # we use our own opener rather than just using urlopen. + self.opener = build_opener(RedirectHandler()) + # If get_project() is called from locate(), the matcher instance + # is set from the requirement passed to locate(). See issue #18 for + # why this can be useful to know. + self.matcher = None + self.errors = queue.Queue() + + def get_errors(self): + """ + Return any errors which have occurred. + """ + result = [] + while not self.errors.empty(): # pragma: no cover + try: + e = self.errors.get(False) + result.append(e) + except self.errors.Empty: + continue + self.errors.task_done() + return result + + def clear_errors(self): + """ + Clear any errors which may have been logged. + """ + # Just get the errors and throw them away + self.get_errors() + + def clear_cache(self): + self._cache.clear() + + def _get_scheme(self): + return self._scheme + + def _set_scheme(self, value): + self._scheme = value + + scheme = property(_get_scheme, _set_scheme) + + def _get_project(self, name): + """ + For a given project, get a dictionary mapping available versions to Distribution + instances. + + This should be implemented in subclasses. + + If called from a locate() request, self.matcher will be set to a + matcher for the requirement to satisfy, otherwise it will be None. + """ + raise NotImplementedError('Please implement in the subclass') + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + raise NotImplementedError('Please implement in the subclass') + + def get_project(self, name): + """ + For a given project, get a dictionary mapping available versions to Distribution + instances. + + This calls _get_project to do all the work, and just implements a caching layer on top. + """ + if self._cache is None: # pragma: no cover + result = self._get_project(name) + elif name in self._cache: + result = self._cache[name] + else: + self.clear_errors() + result = self._get_project(name) + self._cache[name] = result + return result + + def score_url(self, url): + """ + Give an url a score which can be used to choose preferred URLs + for a given project release. + """ + t = urlparse(url) + basename = posixpath.basename(t.path) + compatible = True + is_wheel = basename.endswith('.whl') + is_downloadable = basename.endswith(self.downloadable_extensions) + if is_wheel: + compatible = is_compatible(Wheel(basename), self.wheel_tags) + return (t.scheme == 'https', 'pypi.python.org' in t.netloc, + is_downloadable, is_wheel, compatible, basename) + + def prefer_url(self, url1, url2): + """ + Choose one of two URLs where both are candidates for distribution + archives for the same version of a distribution (for example, + .tar.gz vs. zip). + + The current implementation favours https:// URLs over http://, archives + from PyPI over those from other locations, wheel compatibility (if a + wheel) and then the archive name. + """ + result = url2 + if url1: + s1 = self.score_url(url1) + s2 = self.score_url(url2) + if s1 > s2: + result = url1 + if result != url2: + logger.debug('Not replacing %r with %r', url1, url2) + else: + logger.debug('Replacing %r with %r', url1, url2) + return result + + def split_filename(self, filename, project_name): + """ + Attempt to split a filename in project name, version and Python version. + """ + return split_filename(filename, project_name) + + def convert_url_to_download_info(self, url, project_name): + """ + See if a URL is a candidate for a download URL for a project (the URL + has typically been scraped from an HTML page). + + If it is, a dictionary is returned with keys "name", "version", + "filename" and "url"; otherwise, None is returned. + """ + def same_project(name1, name2): + return normalize_name(name1) == normalize_name(name2) + + result = None + scheme, netloc, path, params, query, frag = urlparse(url) + if frag.lower().startswith('egg='): # pragma: no cover + logger.debug('%s: version hint in fragment: %r', + project_name, frag) + m = HASHER_HASH.match(frag) + if m: + algo, digest = m.groups() + else: + algo, digest = None, None + origpath = path + if path and path[-1] == '/': # pragma: no cover + path = path[:-1] + if path.endswith('.whl'): + try: + wheel = Wheel(path) + if is_compatible(wheel, self.wheel_tags): + if project_name is None: + include = True + else: + include = same_project(wheel.name, project_name) + if include: + result = { + 'name': wheel.name, + 'version': wheel.version, + 'filename': wheel.filename, + 'url': urlunparse((scheme, netloc, origpath, + params, query, '')), + 'python-version': ', '.join( + ['.'.join(list(v[2:])) for v in wheel.pyver]), + } + except Exception as e: # pragma: no cover + logger.warning('invalid path for wheel: %s', path) + elif not path.endswith(self.downloadable_extensions): # pragma: no cover + logger.debug('Not downloadable: %s', path) + else: # downloadable extension + path = filename = posixpath.basename(path) + for ext in self.downloadable_extensions: + if path.endswith(ext): + path = path[:-len(ext)] + t = self.split_filename(path, project_name) + if not t: # pragma: no cover + logger.debug('No match for project/version: %s', path) + else: + name, version, pyver = t + if not project_name or same_project(project_name, name): + result = { + 'name': name, + 'version': version, + 'filename': filename, + 'url': urlunparse((scheme, netloc, origpath, + params, query, '')), + #'packagetype': 'sdist', + } + if pyver: # pragma: no cover + result['python-version'] = pyver + break + if result and algo: + result['%s_digest' % algo] = digest + return result + + def _get_digest(self, info): + """ + Get a digest from a dictionary by looking at keys of the form + 'algo_digest'. + + Returns a 2-tuple (algo, digest) if found, else None. Currently + looks only for SHA256, then MD5. + """ + result = None + for algo in ('sha256', 'md5'): + key = '%s_digest' % algo + if key in info: + result = (algo, info[key]) + break + return result + + def _update_version_data(self, result, info): + """ + Update a result dictionary (the final result from _get_project) with a + dictionary for a specific version, which typically holds information + gleaned from a filename or URL for an archive for the distribution. + """ + name = info.pop('name') + version = info.pop('version') + if version in result: + dist = result[version] + md = dist.metadata + else: + dist = make_dist(name, version, scheme=self.scheme) + md = dist.metadata + dist.digest = digest = self._get_digest(info) + url = info['url'] + result['digests'][url] = digest + if md.source_url != info['url']: + md.source_url = self.prefer_url(md.source_url, url) + result['urls'].setdefault(version, set()).add(url) + dist.locator = self + result[version] = dist + + def locate(self, requirement, prereleases=False): + """ + Find the most recent distribution which matches the given + requirement. + + :param requirement: A requirement of the form 'foo (1.0)' or perhaps + 'foo (>= 1.0, < 2.0, != 1.3)' + :param prereleases: If ``True``, allow pre-release versions + to be located. Otherwise, pre-release versions + are not returned. + :return: A :class:`Distribution` instance, or ``None`` if no such + distribution could be located. + """ + result = None + r = parse_requirement(requirement) + if r is None: # pragma: no cover + raise DistlibException('Not a valid requirement: %r' % requirement) + scheme = get_scheme(self.scheme) + self.matcher = matcher = scheme.matcher(r.requirement) + logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__) + versions = self.get_project(r.name) + if len(versions) > 2: # urls and digests keys are present + # sometimes, versions are invalid + slist = [] + vcls = matcher.version_class + for k in versions: + if k in ('urls', 'digests'): + continue + try: + if not matcher.match(k): + logger.debug('%s did not match %r', matcher, k) + else: + if prereleases or not vcls(k).is_prerelease: + slist.append(k) + else: + logger.debug('skipping pre-release ' + 'version %s of %s', k, matcher.name) + except Exception: # pragma: no cover + logger.warning('error matching %s with %r', matcher, k) + pass # slist.append(k) + if len(slist) > 1: + slist = sorted(slist, key=scheme.key) + if slist: + logger.debug('sorted list: %s', slist) + version = slist[-1] + result = versions[version] + if result: + if r.extras: + result.extras = r.extras + result.download_urls = versions.get('urls', {}).get(version, set()) + d = {} + sd = versions.get('digests', {}) + for url in result.download_urls: + if url in sd: # pragma: no cover + d[url] = sd[url] + result.digests = d + self.matcher = None + return result + + +class PyPIRPCLocator(Locator): + """ + This locator uses XML-RPC to locate distributions. It therefore + cannot be used with simple mirrors (that only mirror file content). + """ + def __init__(self, url, **kwargs): + """ + Initialise an instance. + + :param url: The URL to use for XML-RPC. + :param kwargs: Passed to the superclass constructor. + """ + super(PyPIRPCLocator, self).__init__(**kwargs) + self.base_url = url + self.client = ServerProxy(url, timeout=3.0) + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + return set(self.client.list_packages()) + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + versions = self.client.package_releases(name, True) + for v in versions: + urls = self.client.release_urls(name, v) + data = self.client.release_data(name, v) + metadata = Metadata(scheme=self.scheme) + metadata.name = data['name'] + metadata.version = data['version'] + metadata.license = data.get('license') + metadata.keywords = data.get('keywords', []) + metadata.summary = data.get('summary') + dist = Distribution(metadata) + if urls: + info = urls[0] + metadata.source_url = info['url'] + dist.digest = self._get_digest(info) + dist.locator = self + result[v] = dist + for info in urls: + url = info['url'] + digest = self._get_digest(info) + result['urls'].setdefault(v, set()).add(url) + result['digests'][url] = digest + return result + +class PyPIJSONLocator(Locator): + """ + This locator uses PyPI's JSON interface. It's very limited in functionality + and probably not worth using. + """ + def __init__(self, url, **kwargs): + super(PyPIJSONLocator, self).__init__(**kwargs) + self.base_url = ensure_slash(url) + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + raise NotImplementedError('Not available from this locator') + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + url = urljoin(self.base_url, '%s/json' % quote(name)) + try: + resp = self.opener.open(url) + data = resp.read().decode() # for now + d = json.loads(data) + md = Metadata(scheme=self.scheme) + data = d['info'] + md.name = data['name'] + md.version = data['version'] + md.license = data.get('license') + md.keywords = data.get('keywords', []) + md.summary = data.get('summary') + dist = Distribution(md) + dist.locator = self + urls = d['urls'] + result[md.version] = dist + for info in d['urls']: + url = info['url'] + dist.download_urls.add(url) + dist.digests[url] = self._get_digest(info) + result['urls'].setdefault(md.version, set()).add(url) + result['digests'][url] = self._get_digest(info) + # Now get other releases + for version, infos in d['releases'].items(): + if version == md.version: + continue # already done + omd = Metadata(scheme=self.scheme) + omd.name = md.name + omd.version = version + odist = Distribution(omd) + odist.locator = self + result[version] = odist + for info in infos: + url = info['url'] + odist.download_urls.add(url) + odist.digests[url] = self._get_digest(info) + result['urls'].setdefault(version, set()).add(url) + result['digests'][url] = self._get_digest(info) +# for info in urls: +# md.source_url = info['url'] +# dist.digest = self._get_digest(info) +# dist.locator = self +# for info in urls: +# url = info['url'] +# result['urls'].setdefault(md.version, set()).add(url) +# result['digests'][url] = self._get_digest(info) + except Exception as e: + self.errors.put(text_type(e)) + logger.exception('JSON fetch failed: %s', e) + return result + + +class Page(object): + """ + This class represents a scraped HTML page. + """ + # The following slightly hairy-looking regex just looks for the contents of + # an anchor link, which has an attribute "href" either immediately preceded + # or immediately followed by a "rel" attribute. The attribute values can be + # declared with double quotes, single quotes or no quotes - which leads to + # the length of the expression. + _href = re.compile(""" +(rel\\s*=\\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\\s\n]*))\\s+)? +href\\s*=\\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\\s\n]*)) +(\\s+rel\\s*=\\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\\s\n]*)))? +""", re.I | re.S | re.X) + _base = re.compile(r"""]+)""", re.I | re.S) + + def __init__(self, data, url): + """ + Initialise an instance with the Unicode page contents and the URL they + came from. + """ + self.data = data + self.base_url = self.url = url + m = self._base.search(self.data) + if m: + self.base_url = m.group(1) + + _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) + + @cached_property + def links(self): + """ + Return the URLs of all the links on a page together with information + about their "rel" attribute, for determining which ones to treat as + downloads and which ones to queue for further scraping. + """ + def clean(url): + "Tidy up an URL." + scheme, netloc, path, params, query, frag = urlparse(url) + return urlunparse((scheme, netloc, quote(path), + params, query, frag)) + + result = set() + for match in self._href.finditer(self.data): + d = match.groupdict('') + rel = (d['rel1'] or d['rel2'] or d['rel3'] or + d['rel4'] or d['rel5'] or d['rel6']) + url = d['url1'] or d['url2'] or d['url3'] + url = urljoin(self.base_url, url) + url = unescape(url) + url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url) + result.add((url, rel)) + # We sort the result, hoping to bring the most recent versions + # to the front + result = sorted(result, key=lambda t: t[0], reverse=True) + return result + + +class SimpleScrapingLocator(Locator): + """ + A locator which scrapes HTML pages to locate downloads for a distribution. + This runs multiple threads to do the I/O; performance is at least as good + as pip's PackageFinder, which works in an analogous fashion. + """ + + # These are used to deal with various Content-Encoding schemes. + decoders = { + 'deflate': zlib.decompress, + 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(), + 'none': lambda b: b, + } + + def __init__(self, url, timeout=None, num_workers=10, **kwargs): + """ + Initialise an instance. + :param url: The root URL to use for scraping. + :param timeout: The timeout, in seconds, to be applied to requests. + This defaults to ``None`` (no timeout specified). + :param num_workers: The number of worker threads you want to do I/O, + This defaults to 10. + :param kwargs: Passed to the superclass. + """ + super(SimpleScrapingLocator, self).__init__(**kwargs) + self.base_url = ensure_slash(url) + self.timeout = timeout + self._page_cache = {} + self._seen = set() + self._to_fetch = queue.Queue() + self._bad_hosts = set() + self.skip_externals = False + self.num_workers = num_workers + self._lock = threading.RLock() + # See issue #45: we need to be resilient when the locator is used + # in a thread, e.g. with concurrent.futures. We can't use self._lock + # as it is for coordinating our internal threads - the ones created + # in _prepare_threads. + self._gplock = threading.RLock() + + def _prepare_threads(self): + """ + Threads are created only when get_project is called, and terminate + before it returns. They are there primarily to parallelise I/O (i.e. + fetching web pages). + """ + self._threads = [] + for i in range(self.num_workers): + t = threading.Thread(target=self._fetch) + t.setDaemon(True) + t.start() + self._threads.append(t) + + def _wait_threads(self): + """ + Tell all the threads to terminate (by sending a sentinel value) and + wait for them to do so. + """ + # Note that you need two loops, since you can't say which + # thread will get each sentinel + for t in self._threads: + self._to_fetch.put(None) # sentinel + for t in self._threads: + t.join() + self._threads = [] + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + with self._gplock: + self.result = result + self.project_name = name + url = urljoin(self.base_url, '%s/' % quote(name)) + self._seen.clear() + self._page_cache.clear() + self._prepare_threads() + try: + logger.debug('Queueing %s', url) + self._to_fetch.put(url) + self._to_fetch.join() + finally: + self._wait_threads() + del self.result + return result + + platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|' + r'win(32|-amd64)|macosx-?\d+)\b', re.I) + + def _is_platform_dependent(self, url): + """ + Does an URL refer to a platform-specific download? + """ + return self.platform_dependent.search(url) + + def _process_download(self, url): + """ + See if an URL is a suitable download for a project. + + If it is, register information in the result dictionary (for + _get_project) about the specific version it's for. + + Note that the return value isn't actually used other than as a boolean + value. + """ + if self._is_platform_dependent(url): + info = None + else: + info = self.convert_url_to_download_info(url, self.project_name) + logger.debug('process_download: %s -> %s', url, info) + if info: + with self._lock: # needed because self.result is shared + self._update_version_data(self.result, info) + return info + + def _should_queue(self, link, referrer, rel): + """ + Determine whether a link URL from a referring page and with a + particular "rel" attribute should be queued for scraping. + """ + scheme, netloc, path, _, _, _ = urlparse(link) + if path.endswith(self.source_extensions + self.binary_extensions + + self.excluded_extensions): + result = False + elif self.skip_externals and not link.startswith(self.base_url): + result = False + elif not referrer.startswith(self.base_url): + result = False + elif rel not in ('homepage', 'download'): + result = False + elif scheme not in ('http', 'https', 'ftp'): + result = False + elif self._is_platform_dependent(link): + result = False + else: + host = netloc.split(':', 1)[0] + if host.lower() == 'localhost': + result = False + else: + result = True + logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, + referrer, result) + return result + + def _fetch(self): + """ + Get a URL to fetch from the work queue, get the HTML page, examine its + links for download candidates and candidates for further scraping. + + This is a handy method to run in a thread. + """ + while True: + url = self._to_fetch.get() + try: + if url: + page = self.get_page(url) + if page is None: # e.g. after an error + continue + for link, rel in page.links: + if link not in self._seen: + try: + self._seen.add(link) + if (not self._process_download(link) and + self._should_queue(link, url, rel)): + logger.debug('Queueing %s from %s', link, url) + self._to_fetch.put(link) + except MetadataInvalidError: # e.g. invalid versions + pass + except Exception as e: # pragma: no cover + self.errors.put(text_type(e)) + finally: + # always do this, to avoid hangs :-) + self._to_fetch.task_done() + if not url: + #logger.debug('Sentinel seen, quitting.') + break + + def get_page(self, url): + """ + Get the HTML for an URL, possibly from an in-memory cache. + + XXX TODO Note: this cache is never actually cleared. It's assumed that + the data won't get stale over the lifetime of a locator instance (not + necessarily true for the default_locator). + """ + # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api + scheme, netloc, path, _, _, _ = urlparse(url) + if scheme == 'file' and os.path.isdir(url2pathname(path)): + url = urljoin(ensure_slash(url), 'index.html') + + if url in self._page_cache: + result = self._page_cache[url] + logger.debug('Returning %s from cache: %s', url, result) + else: + host = netloc.split(':', 1)[0] + result = None + if host in self._bad_hosts: + logger.debug('Skipping %s due to bad host %s', url, host) + else: + req = Request(url, headers={'Accept-encoding': 'identity'}) + try: + logger.debug('Fetching %s', url) + resp = self.opener.open(req, timeout=self.timeout) + logger.debug('Fetched %s', url) + headers = resp.info() + content_type = headers.get('Content-Type', '') + if HTML_CONTENT_TYPE.match(content_type): + final_url = resp.geturl() + data = resp.read() + encoding = headers.get('Content-Encoding') + if encoding: + decoder = self.decoders[encoding] # fail if not found + data = decoder(data) + encoding = 'utf-8' + m = CHARSET.search(content_type) + if m: + encoding = m.group(1) + try: + data = data.decode(encoding) + except UnicodeError: # pragma: no cover + data = data.decode('latin-1') # fallback + result = Page(data, final_url) + self._page_cache[final_url] = result + except HTTPError as e: + if e.code != 404: + logger.exception('Fetch failed: %s: %s', url, e) + except URLError as e: # pragma: no cover + logger.exception('Fetch failed: %s: %s', url, e) + with self._lock: + self._bad_hosts.add(host) + except Exception as e: # pragma: no cover + logger.exception('Fetch failed: %s: %s', url, e) + finally: + self._page_cache[url] = result # even if None (failure) + return result + + _distname_re = re.compile(']*>([^<]+)<') + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + result = set() + page = self.get_page(self.base_url) + if not page: + raise DistlibException('Unable to get %s' % self.base_url) + for match in self._distname_re.finditer(page.data): + result.add(match.group(1)) + return result + +class DirectoryLocator(Locator): + """ + This class locates distributions in a directory tree. + """ + + def __init__(self, path, **kwargs): + """ + Initialise an instance. + :param path: The root of the directory tree to search. + :param kwargs: Passed to the superclass constructor, + except for: + * recursive - if True (the default), subdirectories are + recursed into. If False, only the top-level directory + is searched, + """ + self.recursive = kwargs.pop('recursive', True) + super(DirectoryLocator, self).__init__(**kwargs) + path = os.path.abspath(path) + if not os.path.isdir(path): # pragma: no cover + raise DistlibException('Not a directory: %r' % path) + self.base_dir = path + + def should_include(self, filename, parent): + """ + Should a filename be considered as a candidate for a distribution + archive? As well as the filename, the directory which contains it + is provided, though not used by the current implementation. + """ + return filename.endswith(self.downloadable_extensions) + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + for root, dirs, files in os.walk(self.base_dir): + for fn in files: + if self.should_include(fn, root): + fn = os.path.join(root, fn) + url = urlunparse(('file', '', + pathname2url(os.path.abspath(fn)), + '', '', '')) + info = self.convert_url_to_download_info(url, name) + if info: + self._update_version_data(result, info) + if not self.recursive: + break + return result + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + result = set() + for root, dirs, files in os.walk(self.base_dir): + for fn in files: + if self.should_include(fn, root): + fn = os.path.join(root, fn) + url = urlunparse(('file', '', + pathname2url(os.path.abspath(fn)), + '', '', '')) + info = self.convert_url_to_download_info(url, None) + if info: + result.add(info['name']) + if not self.recursive: + break + return result + +class JSONLocator(Locator): + """ + This locator uses special extended metadata (not available on PyPI) and is + the basis of performant dependency resolution in distlib. Other locators + require archive downloads before dependencies can be determined! As you + might imagine, that can be slow. + """ + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + raise NotImplementedError('Not available from this locator') + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + data = get_project_data(name) + if data: + for info in data.get('files', []): + if info['ptype'] != 'sdist' or info['pyversion'] != 'source': + continue + # We don't store summary in project metadata as it makes + # the data bigger for no benefit during dependency + # resolution + dist = make_dist(data['name'], info['version'], + summary=data.get('summary', + 'Placeholder for summary'), + scheme=self.scheme) + md = dist.metadata + md.source_url = info['url'] + # TODO SHA256 digest + if 'digest' in info and info['digest']: + dist.digest = ('md5', info['digest']) + md.dependencies = info.get('requirements', {}) + dist.exports = info.get('exports', {}) + result[dist.version] = dist + result['urls'].setdefault(dist.version, set()).add(info['url']) + return result + +class DistPathLocator(Locator): + """ + This locator finds installed distributions in a path. It can be useful for + adding to an :class:`AggregatingLocator`. + """ + def __init__(self, distpath, **kwargs): + """ + Initialise an instance. + + :param distpath: A :class:`DistributionPath` instance to search. + """ + super(DistPathLocator, self).__init__(**kwargs) + assert isinstance(distpath, DistributionPath) + self.distpath = distpath + + def _get_project(self, name): + dist = self.distpath.get_distribution(name) + if dist is None: + result = {'urls': {}, 'digests': {}} + else: + result = { + dist.version: dist, + 'urls': {dist.version: set([dist.source_url])}, + 'digests': {dist.version: set([None])} + } + return result + + +class AggregatingLocator(Locator): + """ + This class allows you to chain and/or merge a list of locators. + """ + def __init__(self, *locators, **kwargs): + """ + Initialise an instance. + + :param locators: The list of locators to search. + :param kwargs: Passed to the superclass constructor, + except for: + * merge - if False (the default), the first successful + search from any of the locators is returned. If True, + the results from all locators are merged (this can be + slow). + """ + self.merge = kwargs.pop('merge', False) + self.locators = locators + super(AggregatingLocator, self).__init__(**kwargs) + + def clear_cache(self): + super(AggregatingLocator, self).clear_cache() + for locator in self.locators: + locator.clear_cache() + + def _set_scheme(self, value): + self._scheme = value + for locator in self.locators: + locator.scheme = value + + scheme = property(Locator.scheme.fget, _set_scheme) + + def _get_project(self, name): + result = {} + for locator in self.locators: + d = locator.get_project(name) + if d: + if self.merge: + files = result.get('urls', {}) + digests = result.get('digests', {}) + # next line could overwrite result['urls'], result['digests'] + result.update(d) + df = result.get('urls') + if files and df: + for k, v in files.items(): + if k in df: + df[k] |= v + else: + df[k] = v + dd = result.get('digests') + if digests and dd: + dd.update(digests) + else: + # See issue #18. If any dists are found and we're looking + # for specific constraints, we only return something if + # a match is found. For example, if a DirectoryLocator + # returns just foo (1.0) while we're looking for + # foo (>= 2.0), we'll pretend there was nothing there so + # that subsequent locators can be queried. Otherwise we + # would just return foo (1.0) which would then lead to a + # failure to find foo (>= 2.0), because other locators + # weren't searched. Note that this only matters when + # merge=False. + if self.matcher is None: + found = True + else: + found = False + for k in d: + if self.matcher.match(k): + found = True + break + if found: + result = d + break + return result + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + result = set() + for locator in self.locators: + try: + result |= locator.get_distribution_names() + except NotImplementedError: + pass + return result + + +# We use a legacy scheme simply because most of the dists on PyPI use legacy +# versions which don't conform to PEP 426 / PEP 440. +default_locator = AggregatingLocator( + JSONLocator(), + SimpleScrapingLocator('https://pypi.python.org/simple/', + timeout=3.0), + scheme='legacy') + +locate = default_locator.locate + +NAME_VERSION_RE = re.compile(r'(?P[\w-]+)\s*' + r'\(\s*(==\s*)?(?P[^)]+)\)$') + +class DependencyFinder(object): + """ + Locate dependencies for distributions. + """ + + def __init__(self, locator=None): + """ + Initialise an instance, using the specified locator + to locate distributions. + """ + self.locator = locator or default_locator + self.scheme = get_scheme(self.locator.scheme) + + def add_distribution(self, dist): + """ + Add a distribution to the finder. This will update internal information + about who provides what. + :param dist: The distribution to add. + """ + logger.debug('adding distribution %s', dist) + name = dist.key + self.dists_by_name[name] = dist + self.dists[(name, dist.version)] = dist + for p in dist.provides: + name, version = parse_name_and_version(p) + logger.debug('Add to provided: %s, %s, %s', name, version, dist) + self.provided.setdefault(name, set()).add((version, dist)) + + def remove_distribution(self, dist): + """ + Remove a distribution from the finder. This will update internal + information about who provides what. + :param dist: The distribution to remove. + """ + logger.debug('removing distribution %s', dist) + name = dist.key + del self.dists_by_name[name] + del self.dists[(name, dist.version)] + for p in dist.provides: + name, version = parse_name_and_version(p) + logger.debug('Remove from provided: %s, %s, %s', name, version, dist) + s = self.provided[name] + s.remove((version, dist)) + if not s: + del self.provided[name] + + def get_matcher(self, reqt): + """ + Get a version matcher for a requirement. + :param reqt: The requirement + :type reqt: str + :return: A version matcher (an instance of + :class:`distlib.version.Matcher`). + """ + try: + matcher = self.scheme.matcher(reqt) + except UnsupportedVersionError: # pragma: no cover + # XXX compat-mode if cannot read the version + name = reqt.split()[0] + matcher = self.scheme.matcher(name) + return matcher + + def find_providers(self, reqt): + """ + Find the distributions which can fulfill a requirement. + + :param reqt: The requirement. + :type reqt: str + :return: A set of distribution which can fulfill the requirement. + """ + matcher = self.get_matcher(reqt) + name = matcher.key # case-insensitive + result = set() + provided = self.provided + if name in provided: + for version, provider in provided[name]: + try: + match = matcher.match(version) + except UnsupportedVersionError: + match = False + + if match: + result.add(provider) + break + return result + + def try_to_replace(self, provider, other, problems): + """ + Attempt to replace one provider with another. This is typically used + when resolving dependencies from multiple sources, e.g. A requires + (B >= 1.0) while C requires (B >= 1.1). + + For successful replacement, ``provider`` must meet all the requirements + which ``other`` fulfills. + + :param provider: The provider we are trying to replace with. + :param other: The provider we're trying to replace. + :param problems: If False is returned, this will contain what + problems prevented replacement. This is currently + a tuple of the literal string 'cantreplace', + ``provider``, ``other`` and the set of requirements + that ``provider`` couldn't fulfill. + :return: True if we can replace ``other`` with ``provider``, else + False. + """ + rlist = self.reqts[other] + unmatched = set() + for s in rlist: + matcher = self.get_matcher(s) + if not matcher.match(provider.version): + unmatched.add(s) + if unmatched: + # can't replace other with provider + problems.add(('cantreplace', provider, other, + frozenset(unmatched))) + result = False + else: + # can replace other with provider + self.remove_distribution(other) + del self.reqts[other] + for s in rlist: + self.reqts.setdefault(provider, set()).add(s) + self.add_distribution(provider) + result = True + return result + + def find(self, requirement, meta_extras=None, prereleases=False): + """ + Find a distribution and all distributions it depends on. + + :param requirement: The requirement specifying the distribution to + find, or a Distribution instance. + :param meta_extras: A list of meta extras such as :test:, :build: and + so on. + :param prereleases: If ``True``, allow pre-release versions to be + returned - otherwise, don't return prereleases + unless they're all that's available. + + Return a set of :class:`Distribution` instances and a set of + problems. + + The distributions returned should be such that they have the + :attr:`required` attribute set to ``True`` if they were + from the ``requirement`` passed to ``find()``, and they have the + :attr:`build_time_dependency` attribute set to ``True`` unless they + are post-installation dependencies of the ``requirement``. + + The problems should be a tuple consisting of the string + ``'unsatisfied'`` and the requirement which couldn't be satisfied + by any distribution known to the locator. + """ + + self.provided = {} + self.dists = {} + self.dists_by_name = {} + self.reqts = {} + + meta_extras = set(meta_extras or []) + if ':*:' in meta_extras: + meta_extras.remove(':*:') + # :meta: and :run: are implicitly included + meta_extras |= set([':test:', ':build:', ':dev:']) + + if isinstance(requirement, Distribution): + dist = odist = requirement + logger.debug('passed %s as requirement', odist) + else: + dist = odist = self.locator.locate(requirement, + prereleases=prereleases) + if dist is None: + raise DistlibException('Unable to locate %r' % requirement) + logger.debug('located %s', odist) + dist.requested = True + problems = set() + todo = set([dist]) + install_dists = set([odist]) + while todo: + dist = todo.pop() + name = dist.key # case-insensitive + if name not in self.dists_by_name: + self.add_distribution(dist) + else: + #import pdb; pdb.set_trace() + other = self.dists_by_name[name] + if other != dist: + self.try_to_replace(dist, other, problems) + + ireqts = dist.run_requires | dist.meta_requires + sreqts = dist.build_requires + ereqts = set() + if meta_extras and dist in install_dists: + for key in ('test', 'build', 'dev'): + e = ':%s:' % key + if e in meta_extras: + ereqts |= getattr(dist, '%s_requires' % key) + all_reqts = ireqts | sreqts | ereqts + for r in all_reqts: + providers = self.find_providers(r) + if not providers: + logger.debug('No providers found for %r', r) + provider = self.locator.locate(r, prereleases=prereleases) + # If no provider is found and we didn't consider + # prereleases, consider them now. + if provider is None and not prereleases: + provider = self.locator.locate(r, prereleases=True) + if provider is None: + logger.debug('Cannot satisfy %r', r) + problems.add(('unsatisfied', r)) + else: + n, v = provider.key, provider.version + if (n, v) not in self.dists: + todo.add(provider) + providers.add(provider) + if r in ireqts and dist in install_dists: + install_dists.add(provider) + logger.debug('Adding %s to install_dists', + provider.name_and_version) + for p in providers: + name = p.key + if name not in self.dists_by_name: + self.reqts.setdefault(p, set()).add(r) + else: + other = self.dists_by_name[name] + if other != p: + # see if other can be replaced by p + self.try_to_replace(p, other, problems) + + dists = set(self.dists.values()) + for dist in dists: + dist.build_time_dependency = dist not in install_dists + if dist.build_time_dependency: + logger.debug('%s is a build-time dependency only.', + dist.name_and_version) + logger.debug('find done for %s', odist) + return dists, problems diff --git a/pipenv/vendor/distlib/manifest.py b/pipenv/vendor/distlib/manifest.py new file mode 100644 index 0000000000..ca0fe442d9 --- /dev/null +++ b/pipenv/vendor/distlib/manifest.py @@ -0,0 +1,393 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2013 Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +""" +Class representing the list of files in a distribution. + +Equivalent to distutils.filelist, but fixes some problems. +""" +import fnmatch +import logging +import os +import re +import sys + +from . import DistlibException +from .compat import fsdecode +from .util import convert_path + + +__all__ = ['Manifest'] + +logger = logging.getLogger(__name__) + +# a \ followed by some spaces + EOL +_COLLAPSE_PATTERN = re.compile('\\\\w*\n', re.M) +_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S) + +# +# Due to the different results returned by fnmatch.translate, we need +# to do slightly different processing for Python 2.7 and 3.2 ... this needed +# to be brought in for Python 3.6 onwards. +# +_PYTHON_VERSION = sys.version_info[:2] + +class Manifest(object): + """A list of files built by on exploring the filesystem and filtered by + applying various patterns to what we find there. + """ + + def __init__(self, base=None): + """ + Initialise an instance. + + :param base: The base directory to explore under. + """ + self.base = os.path.abspath(os.path.normpath(base or os.getcwd())) + self.prefix = self.base + os.sep + self.allfiles = None + self.files = set() + + # + # Public API + # + + def findall(self): + """Find all files under the base and set ``allfiles`` to the absolute + pathnames of files found. + """ + from stat import S_ISREG, S_ISDIR, S_ISLNK + + self.allfiles = allfiles = [] + root = self.base + stack = [root] + pop = stack.pop + push = stack.append + + while stack: + root = pop() + names = os.listdir(root) + + for name in names: + fullname = os.path.join(root, name) + + # Avoid excess stat calls -- just one will do, thank you! + stat = os.stat(fullname) + mode = stat.st_mode + if S_ISREG(mode): + allfiles.append(fsdecode(fullname)) + elif S_ISDIR(mode) and not S_ISLNK(mode): + push(fullname) + + def add(self, item): + """ + Add a file to the manifest. + + :param item: The pathname to add. This can be relative to the base. + """ + if not item.startswith(self.prefix): + item = os.path.join(self.base, item) + self.files.add(os.path.normpath(item)) + + def add_many(self, items): + """ + Add a list of files to the manifest. + + :param items: The pathnames to add. These can be relative to the base. + """ + for item in items: + self.add(item) + + def sorted(self, wantdirs=False): + """ + Return sorted files in directory order + """ + + def add_dir(dirs, d): + dirs.add(d) + logger.debug('add_dir added %s', d) + if d != self.base: + parent, _ = os.path.split(d) + assert parent not in ('', '/') + add_dir(dirs, parent) + + result = set(self.files) # make a copy! + if wantdirs: + dirs = set() + for f in result: + add_dir(dirs, os.path.dirname(f)) + result |= dirs + return [os.path.join(*path_tuple) for path_tuple in + sorted(os.path.split(path) for path in result)] + + def clear(self): + """Clear all collected files.""" + self.files = set() + self.allfiles = [] + + def process_directive(self, directive): + """ + Process a directive which either adds some files from ``allfiles`` to + ``files``, or removes some files from ``files``. + + :param directive: The directive to process. This should be in a format + compatible with distutils ``MANIFEST.in`` files: + + http://docs.python.org/distutils/sourcedist.html#commands + """ + # Parse the line: split it up, make sure the right number of words + # is there, and return the relevant words. 'action' is always + # defined: it's the first word of the line. Which of the other + # three are defined depends on the action; it'll be either + # patterns, (dir and patterns), or (dirpattern). + action, patterns, thedir, dirpattern = self._parse_directive(directive) + + # OK, now we know that the action is valid and we have the + # right number of words on the line for that action -- so we + # can proceed with minimal error-checking. + if action == 'include': + for pattern in patterns: + if not self._include_pattern(pattern, anchor=True): + logger.warning('no files found matching %r', pattern) + + elif action == 'exclude': + for pattern in patterns: + found = self._exclude_pattern(pattern, anchor=True) + #if not found: + # logger.warning('no previously-included files ' + # 'found matching %r', pattern) + + elif action == 'global-include': + for pattern in patterns: + if not self._include_pattern(pattern, anchor=False): + logger.warning('no files found matching %r ' + 'anywhere in distribution', pattern) + + elif action == 'global-exclude': + for pattern in patterns: + found = self._exclude_pattern(pattern, anchor=False) + #if not found: + # logger.warning('no previously-included files ' + # 'matching %r found anywhere in ' + # 'distribution', pattern) + + elif action == 'recursive-include': + for pattern in patterns: + if not self._include_pattern(pattern, prefix=thedir): + logger.warning('no files found matching %r ' + 'under directory %r', pattern, thedir) + + elif action == 'recursive-exclude': + for pattern in patterns: + found = self._exclude_pattern(pattern, prefix=thedir) + #if not found: + # logger.warning('no previously-included files ' + # 'matching %r found under directory %r', + # pattern, thedir) + + elif action == 'graft': + if not self._include_pattern(None, prefix=dirpattern): + logger.warning('no directories found matching %r', + dirpattern) + + elif action == 'prune': + if not self._exclude_pattern(None, prefix=dirpattern): + logger.warning('no previously-included directories found ' + 'matching %r', dirpattern) + else: # pragma: no cover + # This should never happen, as it should be caught in + # _parse_template_line + raise DistlibException( + 'invalid action %r' % action) + + # + # Private API + # + + def _parse_directive(self, directive): + """ + Validate a directive. + :param directive: The directive to validate. + :return: A tuple of action, patterns, thedir, dir_patterns + """ + words = directive.split() + if len(words) == 1 and words[0] not in ('include', 'exclude', + 'global-include', + 'global-exclude', + 'recursive-include', + 'recursive-exclude', + 'graft', 'prune'): + # no action given, let's use the default 'include' + words.insert(0, 'include') + + action = words[0] + patterns = thedir = dir_pattern = None + + if action in ('include', 'exclude', + 'global-include', 'global-exclude'): + if len(words) < 2: + raise DistlibException( + '%r expects ...' % action) + + patterns = [convert_path(word) for word in words[1:]] + + elif action in ('recursive-include', 'recursive-exclude'): + if len(words) < 3: + raise DistlibException( + '%r expects ...' % action) + + thedir = convert_path(words[1]) + patterns = [convert_path(word) for word in words[2:]] + + elif action in ('graft', 'prune'): + if len(words) != 2: + raise DistlibException( + '%r expects a single ' % action) + + dir_pattern = convert_path(words[1]) + + else: + raise DistlibException('unknown action %r' % action) + + return action, patterns, thedir, dir_pattern + + def _include_pattern(self, pattern, anchor=True, prefix=None, + is_regex=False): + """Select strings (presumably filenames) from 'self.files' that + match 'pattern', a Unix-style wildcard (glob) pattern. + + Patterns are not quite the same as implemented by the 'fnmatch' + module: '*' and '?' match non-special characters, where "special" + is platform-dependent: slash on Unix; colon, slash, and backslash on + DOS/Windows; and colon on Mac OS. + + If 'anchor' is true (the default), then the pattern match is more + stringent: "*.py" will match "foo.py" but not "foo/bar.py". If + 'anchor' is false, both of these will match. + + If 'prefix' is supplied, then only filenames starting with 'prefix' + (itself a pattern) and ending with 'pattern', with anything in between + them, will match. 'anchor' is ignored in this case. + + If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and + 'pattern' is assumed to be either a string containing a regex or a + regex object -- no translation is done, the regex is just compiled + and used as-is. + + Selected strings will be added to self.files. + + Return True if files are found. + """ + # XXX docstring lying about what the special chars are? + found = False + pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) + + # delayed loading of allfiles list + if self.allfiles is None: + self.findall() + + for name in self.allfiles: + if pattern_re.search(name): + self.files.add(name) + found = True + return found + + def _exclude_pattern(self, pattern, anchor=True, prefix=None, + is_regex=False): + """Remove strings (presumably filenames) from 'files' that match + 'pattern'. + + Other parameters are the same as for 'include_pattern()', above. + The list 'self.files' is modified in place. Return True if files are + found. + + This API is public to allow e.g. exclusion of SCM subdirs, e.g. when + packaging source distributions + """ + found = False + pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) + for f in list(self.files): + if pattern_re.search(f): + self.files.remove(f) + found = True + return found + + def _translate_pattern(self, pattern, anchor=True, prefix=None, + is_regex=False): + """Translate a shell-like wildcard pattern to a compiled regular + expression. + + Return the compiled regex. If 'is_regex' true, + then 'pattern' is directly compiled to a regex (if it's a string) + or just returned as-is (assumes it's a regex object). + """ + if is_regex: + if isinstance(pattern, str): + return re.compile(pattern) + else: + return pattern + + if _PYTHON_VERSION > (3, 2): + # ditch start and end characters + start, _, end = self._glob_to_re('_').partition('_') + + if pattern: + pattern_re = self._glob_to_re(pattern) + if _PYTHON_VERSION > (3, 2): + assert pattern_re.startswith(start) and pattern_re.endswith(end) + else: + pattern_re = '' + + base = re.escape(os.path.join(self.base, '')) + if prefix is not None: + # ditch end of pattern character + if _PYTHON_VERSION <= (3, 2): + empty_pattern = self._glob_to_re('') + prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)] + else: + prefix_re = self._glob_to_re(prefix) + assert prefix_re.startswith(start) and prefix_re.endswith(end) + prefix_re = prefix_re[len(start): len(prefix_re) - len(end)] + sep = os.sep + if os.sep == '\\': + sep = r'\\' + if _PYTHON_VERSION <= (3, 2): + pattern_re = '^' + base + sep.join((prefix_re, + '.*' + pattern_re)) + else: + pattern_re = pattern_re[len(start): len(pattern_re) - len(end)] + pattern_re = r'%s%s%s%s.*%s%s' % (start, base, prefix_re, sep, + pattern_re, end) + else: # no prefix -- respect anchor flag + if anchor: + if _PYTHON_VERSION <= (3, 2): + pattern_re = '^' + base + pattern_re + else: + pattern_re = r'%s%s%s' % (start, base, pattern_re[len(start):]) + + return re.compile(pattern_re) + + def _glob_to_re(self, pattern): + """Translate a shell-like glob pattern to a regular expression. + + Return a string containing the regex. Differs from + 'fnmatch.translate()' in that '*' does not match "special characters" + (which are platform-specific). + """ + pattern_re = fnmatch.translate(pattern) + + # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which + # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix, + # and by extension they shouldn't match such "special characters" under + # any OS. So change all non-escaped dots in the RE to match any + # character except the special characters (currently: just os.sep). + sep = os.sep + if os.sep == '\\': + # we're using a regex to manipulate a regex, so we need + # to escape the backslash twice + sep = r'\\\\' + escaped = r'\1[^%s]' % sep + pattern_re = re.sub(r'((? y, + '!=': lambda x, y: x != y, + '<': lambda x, y: x < y, + '<=': lambda x, y: x == y or x < y, + '>': lambda x, y: x > y, + '>=': lambda x, y: x == y or x > y, + 'and': lambda x, y: x and y, + 'or': lambda x, y: x or y, + 'in': lambda x, y: x in y, + 'not in': lambda x, y: x not in y, + } + + def evaluate(self, expr, context): + """ + Evaluate a marker expression returned by the :func:`parse_requirement` + function in the specified context. + """ + if isinstance(expr, string_types): + if expr[0] in '\'"': + result = expr[1:-1] + else: + if expr not in context: + raise SyntaxError('unknown variable: %s' % expr) + result = context[expr] + else: + assert isinstance(expr, dict) + op = expr['op'] + if op not in self.operations: + raise NotImplementedError('op not implemented: %s' % op) + elhs = expr['lhs'] + erhs = expr['rhs'] + if _is_literal(expr['lhs']) and _is_literal(expr['rhs']): + raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs)) + + lhs = self.evaluate(elhs, context) + rhs = self.evaluate(erhs, context) + result = self.operations[op](lhs, rhs) + return result + +def default_context(): + def format_full_version(info): + version = '%s.%s.%s' % (info.major, info.minor, info.micro) + kind = info.releaselevel + if kind != 'final': + version += kind[0] + str(info.serial) + return version + + if hasattr(sys, 'implementation'): + implementation_version = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + else: + implementation_version = '0' + implementation_name = '' + + result = { + 'implementation_name': implementation_name, + 'implementation_version': implementation_version, + 'os_name': os.name, + 'platform_machine': platform.machine(), + 'platform_python_implementation': platform.python_implementation(), + 'platform_release': platform.release(), + 'platform_system': platform.system(), + 'platform_version': platform.version(), + 'platform_in_venv': str(in_venv()), + 'python_full_version': platform.python_version(), + 'python_version': platform.python_version()[:3], + 'sys_platform': sys.platform, + } + return result + +DEFAULT_CONTEXT = default_context() +del default_context + +evaluator = Evaluator() + +def interpret(marker, execution_context=None): + """ + Interpret a marker and return a result depending on environment. + + :param marker: The marker to interpret. + :type marker: str + :param execution_context: The context used for name lookup. + :type execution_context: mapping + """ + try: + expr, rest = parse_marker(marker) + except Exception as e: + raise SyntaxError('Unable to interpret marker syntax: %s: %s' % (marker, e)) + if rest and rest[0] != '#': + raise SyntaxError('unexpected trailing data in marker: %s: %s' % (marker, rest)) + context = dict(DEFAULT_CONTEXT) + if execution_context: + context.update(execution_context) + return evaluator.evaluate(expr, context) diff --git a/pipenv/vendor/distlib/metadata.py b/pipenv/vendor/distlib/metadata.py new file mode 100644 index 0000000000..6d6470fff8 --- /dev/null +++ b/pipenv/vendor/distlib/metadata.py @@ -0,0 +1,1091 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Implementation of the Metadata for Python packages PEPs. + +Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental). +""" +from __future__ import unicode_literals + +import codecs +from email import message_from_file +import json +import logging +import re + + +from . import DistlibException, __version__ +from .compat import StringIO, string_types, text_type +from .markers import interpret +from .util import extract_by_key, get_extras +from .version import get_scheme, PEP440_VERSION_RE + +logger = logging.getLogger(__name__) + + +class MetadataMissingError(DistlibException): + """A required metadata is missing""" + + +class MetadataConflictError(DistlibException): + """Attempt to read or write metadata fields that are conflictual.""" + + +class MetadataUnrecognizedVersionError(DistlibException): + """Unknown metadata version number.""" + + +class MetadataInvalidError(DistlibException): + """A metadata value is invalid""" + +# public API of this module +__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION'] + +# Encoding used for the PKG-INFO files +PKG_INFO_ENCODING = 'utf-8' + +# preferred version. Hopefully will be changed +# to 1.2 once PEP 345 is supported everywhere +PKG_INFO_PREFERRED_VERSION = '1.1' + +_LINE_PREFIX_1_2 = re.compile('\n \\|') +_LINE_PREFIX_PRE_1_2 = re.compile('\n ') +_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'License') + +_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'License', 'Classifier', 'Download-URL', 'Obsoletes', + 'Provides', 'Requires') + +_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier', + 'Download-URL') + +_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'Maintainer', 'Maintainer-email', 'License', + 'Classifier', 'Download-URL', 'Obsoletes-Dist', + 'Project-URL', 'Provides-Dist', 'Requires-Dist', + 'Requires-Python', 'Requires-External') + +_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python', + 'Obsoletes-Dist', 'Requires-External', 'Maintainer', + 'Maintainer-email', 'Project-URL') + +_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'Maintainer', 'Maintainer-email', 'License', + 'Classifier', 'Download-URL', 'Obsoletes-Dist', + 'Project-URL', 'Provides-Dist', 'Requires-Dist', + 'Requires-Python', 'Requires-External', 'Private-Version', + 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension', + 'Provides-Extra') + +_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By', + 'Setup-Requires-Dist', 'Extension') + +_566_FIELDS = _426_FIELDS + ('Description-Content-Type',) + +_566_MARKERS = ('Description-Content-Type',) + +_ALL_FIELDS = set() +_ALL_FIELDS.update(_241_FIELDS) +_ALL_FIELDS.update(_314_FIELDS) +_ALL_FIELDS.update(_345_FIELDS) +_ALL_FIELDS.update(_426_FIELDS) +_ALL_FIELDS.update(_566_FIELDS) + +EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''') + + +def _version2fieldlist(version): + if version == '1.0': + return _241_FIELDS + elif version == '1.1': + return _314_FIELDS + elif version == '1.2': + return _345_FIELDS + elif version in ('1.3', '2.1'): + return _345_FIELDS + _566_FIELDS + elif version == '2.0': + return _426_FIELDS + raise MetadataUnrecognizedVersionError(version) + + +def _best_version(fields): + """Detect the best version depending on the fields used.""" + def _has_marker(keys, markers): + for marker in markers: + if marker in keys: + return True + return False + + keys = [] + for key, value in fields.items(): + if value in ([], 'UNKNOWN', None): + continue + keys.append(key) + + possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.0', '2.1'] + + # first let's try to see if a field is not part of one of the version + for key in keys: + if key not in _241_FIELDS and '1.0' in possible_versions: + possible_versions.remove('1.0') + logger.debug('Removed 1.0 due to %s', key) + if key not in _314_FIELDS and '1.1' in possible_versions: + possible_versions.remove('1.1') + logger.debug('Removed 1.1 due to %s', key) + if key not in _345_FIELDS and '1.2' in possible_versions: + possible_versions.remove('1.2') + logger.debug('Removed 1.2 due to %s', key) + if key not in _566_FIELDS and '1.3' in possible_versions: + possible_versions.remove('1.3') + logger.debug('Removed 1.3 due to %s', key) + if key not in _566_FIELDS and '2.1' in possible_versions: + if key != 'Description': # In 2.1, description allowed after headers + possible_versions.remove('2.1') + logger.debug('Removed 2.1 due to %s', key) + if key not in _426_FIELDS and '2.0' in possible_versions: + possible_versions.remove('2.0') + logger.debug('Removed 2.0 due to %s', key) + + # possible_version contains qualified versions + if len(possible_versions) == 1: + return possible_versions[0] # found ! + elif len(possible_versions) == 0: + logger.debug('Out of options - unknown metadata set: %s', fields) + raise MetadataConflictError('Unknown metadata set') + + # let's see if one unique marker is found + is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS) + is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS) + is_2_1 = '2.1' in possible_versions and _has_marker(keys, _566_MARKERS) + is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS) + if int(is_1_1) + int(is_1_2) + int(is_2_1) + int(is_2_0) > 1: + raise MetadataConflictError('You used incompatible 1.1/1.2/2.0/2.1 fields') + + # we have the choice, 1.0, or 1.2, or 2.0 + # - 1.0 has a broken Summary field but works with all tools + # - 1.1 is to avoid + # - 1.2 fixes Summary but has little adoption + # - 2.0 adds more features and is very new + if not is_1_1 and not is_1_2 and not is_2_1 and not is_2_0: + # we couldn't find any specific marker + if PKG_INFO_PREFERRED_VERSION in possible_versions: + return PKG_INFO_PREFERRED_VERSION + if is_1_1: + return '1.1' + if is_1_2: + return '1.2' + if is_2_1: + return '2.1' + + return '2.0' + +_ATTR2FIELD = { + 'metadata_version': 'Metadata-Version', + 'name': 'Name', + 'version': 'Version', + 'platform': 'Platform', + 'supported_platform': 'Supported-Platform', + 'summary': 'Summary', + 'description': 'Description', + 'keywords': 'Keywords', + 'home_page': 'Home-page', + 'author': 'Author', + 'author_email': 'Author-email', + 'maintainer': 'Maintainer', + 'maintainer_email': 'Maintainer-email', + 'license': 'License', + 'classifier': 'Classifier', + 'download_url': 'Download-URL', + 'obsoletes_dist': 'Obsoletes-Dist', + 'provides_dist': 'Provides-Dist', + 'requires_dist': 'Requires-Dist', + 'setup_requires_dist': 'Setup-Requires-Dist', + 'requires_python': 'Requires-Python', + 'requires_external': 'Requires-External', + 'requires': 'Requires', + 'provides': 'Provides', + 'obsoletes': 'Obsoletes', + 'project_url': 'Project-URL', + 'private_version': 'Private-Version', + 'obsoleted_by': 'Obsoleted-By', + 'extension': 'Extension', + 'provides_extra': 'Provides-Extra', +} + +_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist') +_VERSIONS_FIELDS = ('Requires-Python',) +_VERSION_FIELDS = ('Version',) +_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes', + 'Requires', 'Provides', 'Obsoletes-Dist', + 'Provides-Dist', 'Requires-Dist', 'Requires-External', + 'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist', + 'Provides-Extra', 'Extension') +_LISTTUPLEFIELDS = ('Project-URL',) + +_ELEMENTSFIELD = ('Keywords',) + +_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description') + +_MISSING = object() + +_FILESAFE = re.compile('[^A-Za-z0-9.]+') + + +def _get_name_and_version(name, version, for_filename=False): + """Return the distribution name with version. + + If for_filename is true, return a filename-escaped form.""" + if for_filename: + # For both name and version any runs of non-alphanumeric or '.' + # characters are replaced with a single '-'. Additionally any + # spaces in the version string become '.' + name = _FILESAFE.sub('-', name) + version = _FILESAFE.sub('-', version.replace(' ', '.')) + return '%s-%s' % (name, version) + + +class LegacyMetadata(object): + """The legacy metadata of a release. + + Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can + instantiate the class with one of these arguments (or none): + - *path*, the path to a metadata file + - *fileobj* give a file-like object with metadata as content + - *mapping* is a dict-like object + - *scheme* is a version scheme name + """ + # TODO document the mapping API and UNKNOWN default key + + def __init__(self, path=None, fileobj=None, mapping=None, + scheme='default'): + if [path, fileobj, mapping].count(None) < 2: + raise TypeError('path, fileobj and mapping are exclusive') + self._fields = {} + self.requires_files = [] + self._dependencies = None + self.scheme = scheme + if path is not None: + self.read(path) + elif fileobj is not None: + self.read_file(fileobj) + elif mapping is not None: + self.update(mapping) + self.set_metadata_version() + + def set_metadata_version(self): + self._fields['Metadata-Version'] = _best_version(self._fields) + + def _write_field(self, fileobj, name, value): + fileobj.write('%s: %s\n' % (name, value)) + + def __getitem__(self, name): + return self.get(name) + + def __setitem__(self, name, value): + return self.set(name, value) + + def __delitem__(self, name): + field_name = self._convert_name(name) + try: + del self._fields[field_name] + except KeyError: + raise KeyError(name) + + def __contains__(self, name): + return (name in self._fields or + self._convert_name(name) in self._fields) + + def _convert_name(self, name): + if name in _ALL_FIELDS: + return name + name = name.replace('-', '_').lower() + return _ATTR2FIELD.get(name, name) + + def _default_value(self, name): + if name in _LISTFIELDS or name in _ELEMENTSFIELD: + return [] + return 'UNKNOWN' + + def _remove_line_prefix(self, value): + if self.metadata_version in ('1.0', '1.1'): + return _LINE_PREFIX_PRE_1_2.sub('\n', value) + else: + return _LINE_PREFIX_1_2.sub('\n', value) + + def __getattr__(self, name): + if name in _ATTR2FIELD: + return self[name] + raise AttributeError(name) + + # + # Public API + # + +# dependencies = property(_get_dependencies, _set_dependencies) + + def get_fullname(self, filesafe=False): + """Return the distribution name with version. + + If filesafe is true, return a filename-escaped form.""" + return _get_name_and_version(self['Name'], self['Version'], filesafe) + + def is_field(self, name): + """return True if name is a valid metadata key""" + name = self._convert_name(name) + return name in _ALL_FIELDS + + def is_multi_field(self, name): + name = self._convert_name(name) + return name in _LISTFIELDS + + def read(self, filepath): + """Read the metadata values from a file path.""" + fp = codecs.open(filepath, 'r', encoding='utf-8') + try: + self.read_file(fp) + finally: + fp.close() + + def read_file(self, fileob): + """Read the metadata values from a file object.""" + msg = message_from_file(fileob) + self._fields['Metadata-Version'] = msg['metadata-version'] + + # When reading, get all the fields we can + for field in _ALL_FIELDS: + if field not in msg: + continue + if field in _LISTFIELDS: + # we can have multiple lines + values = msg.get_all(field) + if field in _LISTTUPLEFIELDS and values is not None: + values = [tuple(value.split(',')) for value in values] + self.set(field, values) + else: + # single line + value = msg[field] + if value is not None and value != 'UNKNOWN': + self.set(field, value) + logger.debug('Attempting to set metadata for %s', self) + self.set_metadata_version() + + def write(self, filepath, skip_unknown=False): + """Write the metadata fields to filepath.""" + fp = codecs.open(filepath, 'w', encoding='utf-8') + try: + self.write_file(fp, skip_unknown) + finally: + fp.close() + + def write_file(self, fileobject, skip_unknown=False): + """Write the PKG-INFO format data to a file object.""" + self.set_metadata_version() + + for field in _version2fieldlist(self['Metadata-Version']): + values = self.get(field) + if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']): + continue + if field in _ELEMENTSFIELD: + self._write_field(fileobject, field, ','.join(values)) + continue + if field not in _LISTFIELDS: + if field == 'Description': + if self.metadata_version in ('1.0', '1.1'): + values = values.replace('\n', '\n ') + else: + values = values.replace('\n', '\n |') + values = [values] + + if field in _LISTTUPLEFIELDS: + values = [','.join(value) for value in values] + + for value in values: + self._write_field(fileobject, field, value) + + def update(self, other=None, **kwargs): + """Set metadata values from the given iterable `other` and kwargs. + + Behavior is like `dict.update`: If `other` has a ``keys`` method, + they are looped over and ``self[key]`` is assigned ``other[key]``. + Else, ``other`` is an iterable of ``(key, value)`` iterables. + + Keys that don't match a metadata field or that have an empty value are + dropped. + """ + def _set(key, value): + if key in _ATTR2FIELD and value: + self.set(self._convert_name(key), value) + + if not other: + # other is None or empty container + pass + elif hasattr(other, 'keys'): + for k in other.keys(): + _set(k, other[k]) + else: + for k, v in other: + _set(k, v) + + if kwargs: + for k, v in kwargs.items(): + _set(k, v) + + def set(self, name, value): + """Control then set a metadata field.""" + name = self._convert_name(name) + + if ((name in _ELEMENTSFIELD or name == 'Platform') and + not isinstance(value, (list, tuple))): + if isinstance(value, string_types): + value = [v.strip() for v in value.split(',')] + else: + value = [] + elif (name in _LISTFIELDS and + not isinstance(value, (list, tuple))): + if isinstance(value, string_types): + value = [value] + else: + value = [] + + if logger.isEnabledFor(logging.WARNING): + project_name = self['Name'] + + scheme = get_scheme(self.scheme) + if name in _PREDICATE_FIELDS and value is not None: + for v in value: + # check that the values are valid + if not scheme.is_valid_matcher(v.split(';')[0]): + logger.warning( + "'%s': '%s' is not valid (field '%s')", + project_name, v, name) + # FIXME this rejects UNKNOWN, is that right? + elif name in _VERSIONS_FIELDS and value is not None: + if not scheme.is_valid_constraint_list(value): + logger.warning("'%s': '%s' is not a valid version (field '%s')", + project_name, value, name) + elif name in _VERSION_FIELDS and value is not None: + if not scheme.is_valid_version(value): + logger.warning("'%s': '%s' is not a valid version (field '%s')", + project_name, value, name) + + if name in _UNICODEFIELDS: + if name == 'Description': + value = self._remove_line_prefix(value) + + self._fields[name] = value + + def get(self, name, default=_MISSING): + """Get a metadata field.""" + name = self._convert_name(name) + if name not in self._fields: + if default is _MISSING: + default = self._default_value(name) + return default + if name in _UNICODEFIELDS: + value = self._fields[name] + return value + elif name in _LISTFIELDS: + value = self._fields[name] + if value is None: + return [] + res = [] + for val in value: + if name not in _LISTTUPLEFIELDS: + res.append(val) + else: + # That's for Project-URL + res.append((val[0], val[1])) + return res + + elif name in _ELEMENTSFIELD: + value = self._fields[name] + if isinstance(value, string_types): + return value.split(',') + return self._fields[name] + + def check(self, strict=False): + """Check if the metadata is compliant. If strict is True then raise if + no Name or Version are provided""" + self.set_metadata_version() + + # XXX should check the versions (if the file was loaded) + missing, warnings = [], [] + + for attr in ('Name', 'Version'): # required by PEP 345 + if attr not in self: + missing.append(attr) + + if strict and missing != []: + msg = 'missing required metadata: %s' % ', '.join(missing) + raise MetadataMissingError(msg) + + for attr in ('Home-page', 'Author'): + if attr not in self: + missing.append(attr) + + # checking metadata 1.2 (XXX needs to check 1.1, 1.0) + if self['Metadata-Version'] != '1.2': + return missing, warnings + + scheme = get_scheme(self.scheme) + + def are_valid_constraints(value): + for v in value: + if not scheme.is_valid_matcher(v.split(';')[0]): + return False + return True + + for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints), + (_VERSIONS_FIELDS, + scheme.is_valid_constraint_list), + (_VERSION_FIELDS, + scheme.is_valid_version)): + for field in fields: + value = self.get(field, None) + if value is not None and not controller(value): + warnings.append("Wrong value for '%s': %s" % (field, value)) + + return missing, warnings + + def todict(self, skip_missing=False): + """Return fields as a dict. + + Field names will be converted to use the underscore-lowercase style + instead of hyphen-mixed case (i.e. home_page instead of Home-page). + """ + self.set_metadata_version() + + mapping_1_0 = ( + ('metadata_version', 'Metadata-Version'), + ('name', 'Name'), + ('version', 'Version'), + ('summary', 'Summary'), + ('home_page', 'Home-page'), + ('author', 'Author'), + ('author_email', 'Author-email'), + ('license', 'License'), + ('description', 'Description'), + ('keywords', 'Keywords'), + ('platform', 'Platform'), + ('classifiers', 'Classifier'), + ('download_url', 'Download-URL'), + ) + + data = {} + for key, field_name in mapping_1_0: + if not skip_missing or field_name in self._fields: + data[key] = self[field_name] + + if self['Metadata-Version'] == '1.2': + mapping_1_2 = ( + ('requires_dist', 'Requires-Dist'), + ('requires_python', 'Requires-Python'), + ('requires_external', 'Requires-External'), + ('provides_dist', 'Provides-Dist'), + ('obsoletes_dist', 'Obsoletes-Dist'), + ('project_url', 'Project-URL'), + ('maintainer', 'Maintainer'), + ('maintainer_email', 'Maintainer-email'), + ) + for key, field_name in mapping_1_2: + if not skip_missing or field_name in self._fields: + if key != 'project_url': + data[key] = self[field_name] + else: + data[key] = [','.join(u) for u in self[field_name]] + + elif self['Metadata-Version'] == '1.1': + mapping_1_1 = ( + ('provides', 'Provides'), + ('requires', 'Requires'), + ('obsoletes', 'Obsoletes'), + ) + for key, field_name in mapping_1_1: + if not skip_missing or field_name in self._fields: + data[key] = self[field_name] + + return data + + def add_requirements(self, requirements): + if self['Metadata-Version'] == '1.1': + # we can't have 1.1 metadata *and* Setuptools requires + for field in ('Obsoletes', 'Requires', 'Provides'): + if field in self: + del self[field] + self['Requires-Dist'] += requirements + + # Mapping API + # TODO could add iter* variants + + def keys(self): + return list(_version2fieldlist(self['Metadata-Version'])) + + def __iter__(self): + for key in self.keys(): + yield key + + def values(self): + return [self[key] for key in self.keys()] + + def items(self): + return [(key, self[key]) for key in self.keys()] + + def __repr__(self): + return '<%s %s %s>' % (self.__class__.__name__, self.name, + self.version) + + +METADATA_FILENAME = 'pydist.json' +WHEEL_METADATA_FILENAME = 'metadata.json' + + +class Metadata(object): + """ + The metadata of a release. This implementation uses 2.0 (JSON) + metadata where possible. If not possible, it wraps a LegacyMetadata + instance which handles the key-value metadata format. + """ + + METADATA_VERSION_MATCHER = re.compile(r'^\d+(\.\d+)*$') + + NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I) + + VERSION_MATCHER = PEP440_VERSION_RE + + SUMMARY_MATCHER = re.compile('.{1,2047}') + + METADATA_VERSION = '2.0' + + GENERATOR = 'distlib (%s)' % __version__ + + MANDATORY_KEYS = { + 'name': (), + 'version': (), + 'summary': ('legacy',), + } + + INDEX_KEYS = ('name version license summary description author ' + 'author_email keywords platform home_page classifiers ' + 'download_url') + + DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires ' + 'dev_requires provides meta_requires obsoleted_by ' + 'supports_environments') + + SYNTAX_VALIDATORS = { + 'metadata_version': (METADATA_VERSION_MATCHER, ()), + 'name': (NAME_MATCHER, ('legacy',)), + 'version': (VERSION_MATCHER, ('legacy',)), + 'summary': (SUMMARY_MATCHER, ('legacy',)), + } + + __slots__ = ('_legacy', '_data', 'scheme') + + def __init__(self, path=None, fileobj=None, mapping=None, + scheme='default'): + if [path, fileobj, mapping].count(None) < 2: + raise TypeError('path, fileobj and mapping are exclusive') + self._legacy = None + self._data = None + self.scheme = scheme + #import pdb; pdb.set_trace() + if mapping is not None: + try: + self._validate_mapping(mapping, scheme) + self._data = mapping + except MetadataUnrecognizedVersionError: + self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme) + self.validate() + else: + data = None + if path: + with open(path, 'rb') as f: + data = f.read() + elif fileobj: + data = fileobj.read() + if data is None: + # Initialised with no args - to be added + self._data = { + 'metadata_version': self.METADATA_VERSION, + 'generator': self.GENERATOR, + } + else: + if not isinstance(data, text_type): + data = data.decode('utf-8') + try: + self._data = json.loads(data) + self._validate_mapping(self._data, scheme) + except ValueError: + # Note: MetadataUnrecognizedVersionError does not + # inherit from ValueError (it's a DistlibException, + # which should not inherit from ValueError). + # The ValueError comes from the json.load - if that + # succeeds and we get a validation error, we want + # that to propagate + self._legacy = LegacyMetadata(fileobj=StringIO(data), + scheme=scheme) + self.validate() + + common_keys = set(('name', 'version', 'license', 'keywords', 'summary')) + + none_list = (None, list) + none_dict = (None, dict) + + mapped_keys = { + 'run_requires': ('Requires-Dist', list), + 'build_requires': ('Setup-Requires-Dist', list), + 'dev_requires': none_list, + 'test_requires': none_list, + 'meta_requires': none_list, + 'extras': ('Provides-Extra', list), + 'modules': none_list, + 'namespaces': none_list, + 'exports': none_dict, + 'commands': none_dict, + 'classifiers': ('Classifier', list), + 'source_url': ('Download-URL', None), + 'metadata_version': ('Metadata-Version', None), + } + + del none_list, none_dict + + def __getattribute__(self, key): + common = object.__getattribute__(self, 'common_keys') + mapped = object.__getattribute__(self, 'mapped_keys') + if key in mapped: + lk, maker = mapped[key] + if self._legacy: + if lk is None: + result = None if maker is None else maker() + else: + result = self._legacy.get(lk) + else: + value = None if maker is None else maker() + if key not in ('commands', 'exports', 'modules', 'namespaces', + 'classifiers'): + result = self._data.get(key, value) + else: + # special cases for PEP 459 + sentinel = object() + result = sentinel + d = self._data.get('extensions') + if d: + if key == 'commands': + result = d.get('python.commands', value) + elif key == 'classifiers': + d = d.get('python.details') + if d: + result = d.get(key, value) + else: + d = d.get('python.exports') + if not d: + d = self._data.get('python.exports') + if d: + result = d.get(key, value) + if result is sentinel: + result = value + elif key not in common: + result = object.__getattribute__(self, key) + elif self._legacy: + result = self._legacy.get(key) + else: + result = self._data.get(key) + return result + + def _validate_value(self, key, value, scheme=None): + if key in self.SYNTAX_VALIDATORS: + pattern, exclusions = self.SYNTAX_VALIDATORS[key] + if (scheme or self.scheme) not in exclusions: + m = pattern.match(value) + if not m: + raise MetadataInvalidError("'%s' is an invalid value for " + "the '%s' property" % (value, + key)) + + def __setattr__(self, key, value): + self._validate_value(key, value) + common = object.__getattribute__(self, 'common_keys') + mapped = object.__getattribute__(self, 'mapped_keys') + if key in mapped: + lk, _ = mapped[key] + if self._legacy: + if lk is None: + raise NotImplementedError + self._legacy[lk] = value + elif key not in ('commands', 'exports', 'modules', 'namespaces', + 'classifiers'): + self._data[key] = value + else: + # special cases for PEP 459 + d = self._data.setdefault('extensions', {}) + if key == 'commands': + d['python.commands'] = value + elif key == 'classifiers': + d = d.setdefault('python.details', {}) + d[key] = value + else: + d = d.setdefault('python.exports', {}) + d[key] = value + elif key not in common: + object.__setattr__(self, key, value) + else: + if key == 'keywords': + if isinstance(value, string_types): + value = value.strip() + if value: + value = value.split() + else: + value = [] + if self._legacy: + self._legacy[key] = value + else: + self._data[key] = value + + @property + def name_and_version(self): + return _get_name_and_version(self.name, self.version, True) + + @property + def provides(self): + if self._legacy: + result = self._legacy['Provides-Dist'] + else: + result = self._data.setdefault('provides', []) + s = '%s (%s)' % (self.name, self.version) + if s not in result: + result.append(s) + return result + + @provides.setter + def provides(self, value): + if self._legacy: + self._legacy['Provides-Dist'] = value + else: + self._data['provides'] = value + + def get_requirements(self, reqts, extras=None, env=None): + """ + Base method to get dependencies, given a set of extras + to satisfy and an optional environment context. + :param reqts: A list of sometimes-wanted dependencies, + perhaps dependent on extras and environment. + :param extras: A list of optional components being requested. + :param env: An optional environment for marker evaluation. + """ + if self._legacy: + result = reqts + else: + result = [] + extras = get_extras(extras or [], self.extras) + for d in reqts: + if 'extra' not in d and 'environment' not in d: + # unconditional + include = True + else: + if 'extra' not in d: + # Not extra-dependent - only environment-dependent + include = True + else: + include = d.get('extra') in extras + if include: + # Not excluded because of extras, check environment + marker = d.get('environment') + if marker: + include = interpret(marker, env) + if include: + result.extend(d['requires']) + for key in ('build', 'dev', 'test'): + e = ':%s:' % key + if e in extras: + extras.remove(e) + # A recursive call, but it should terminate since 'test' + # has been removed from the extras + reqts = self._data.get('%s_requires' % key, []) + result.extend(self.get_requirements(reqts, extras=extras, + env=env)) + return result + + @property + def dictionary(self): + if self._legacy: + return self._from_legacy() + return self._data + + @property + def dependencies(self): + if self._legacy: + raise NotImplementedError + else: + return extract_by_key(self._data, self.DEPENDENCY_KEYS) + + @dependencies.setter + def dependencies(self, value): + if self._legacy: + raise NotImplementedError + else: + self._data.update(value) + + def _validate_mapping(self, mapping, scheme): + if mapping.get('metadata_version') != self.METADATA_VERSION: + raise MetadataUnrecognizedVersionError() + missing = [] + for key, exclusions in self.MANDATORY_KEYS.items(): + if key not in mapping: + if scheme not in exclusions: + missing.append(key) + if missing: + msg = 'Missing metadata items: %s' % ', '.join(missing) + raise MetadataMissingError(msg) + for k, v in mapping.items(): + self._validate_value(k, v, scheme) + + def validate(self): + if self._legacy: + missing, warnings = self._legacy.check(True) + if missing or warnings: + logger.warning('Metadata: missing: %s, warnings: %s', + missing, warnings) + else: + self._validate_mapping(self._data, self.scheme) + + def todict(self): + if self._legacy: + return self._legacy.todict(True) + else: + result = extract_by_key(self._data, self.INDEX_KEYS) + return result + + def _from_legacy(self): + assert self._legacy and not self._data + result = { + 'metadata_version': self.METADATA_VERSION, + 'generator': self.GENERATOR, + } + lmd = self._legacy.todict(True) # skip missing ones + for k in ('name', 'version', 'license', 'summary', 'description', + 'classifier'): + if k in lmd: + if k == 'classifier': + nk = 'classifiers' + else: + nk = k + result[nk] = lmd[k] + kw = lmd.get('Keywords', []) + if kw == ['']: + kw = [] + result['keywords'] = kw + keys = (('requires_dist', 'run_requires'), + ('setup_requires_dist', 'build_requires')) + for ok, nk in keys: + if ok in lmd and lmd[ok]: + result[nk] = [{'requires': lmd[ok]}] + result['provides'] = self.provides + author = {} + maintainer = {} + return result + + LEGACY_MAPPING = { + 'name': 'Name', + 'version': 'Version', + 'license': 'License', + 'summary': 'Summary', + 'description': 'Description', + 'classifiers': 'Classifier', + } + + def _to_legacy(self): + def process_entries(entries): + reqts = set() + for e in entries: + extra = e.get('extra') + env = e.get('environment') + rlist = e['requires'] + for r in rlist: + if not env and not extra: + reqts.add(r) + else: + marker = '' + if extra: + marker = 'extra == "%s"' % extra + if env: + if marker: + marker = '(%s) and %s' % (env, marker) + else: + marker = env + reqts.add(';'.join((r, marker))) + return reqts + + assert self._data and not self._legacy + result = LegacyMetadata() + nmd = self._data + for nk, ok in self.LEGACY_MAPPING.items(): + if nk in nmd: + result[ok] = nmd[nk] + r1 = process_entries(self.run_requires + self.meta_requires) + r2 = process_entries(self.build_requires + self.dev_requires) + if self.extras: + result['Provides-Extra'] = sorted(self.extras) + result['Requires-Dist'] = sorted(r1) + result['Setup-Requires-Dist'] = sorted(r2) + # TODO: other fields such as contacts + return result + + def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True): + if [path, fileobj].count(None) != 1: + raise ValueError('Exactly one of path and fileobj is needed') + self.validate() + if legacy: + if self._legacy: + legacy_md = self._legacy + else: + legacy_md = self._to_legacy() + if path: + legacy_md.write(path, skip_unknown=skip_unknown) + else: + legacy_md.write_file(fileobj, skip_unknown=skip_unknown) + else: + if self._legacy: + d = self._from_legacy() + else: + d = self._data + if fileobj: + json.dump(d, fileobj, ensure_ascii=True, indent=2, + sort_keys=True) + else: + with codecs.open(path, 'w', 'utf-8') as f: + json.dump(d, f, ensure_ascii=True, indent=2, + sort_keys=True) + + def add_requirements(self, requirements): + if self._legacy: + self._legacy.add_requirements(requirements) + else: + run_requires = self._data.setdefault('run_requires', []) + always = None + for entry in run_requires: + if 'environment' not in entry and 'extra' not in entry: + always = entry + break + if always is None: + always = { 'requires': requirements } + run_requires.insert(0, always) + else: + rset = set(always['requires']) | set(requirements) + always['requires'] = sorted(rset) + + def __repr__(self): + name = self.name or '(no name)' + version = self.version or 'no version' + return '<%s %s %s (%s)>' % (self.__class__.__name__, + self.metadata_version, name, version) diff --git a/pipenv/vendor/distlib/resources.py b/pipenv/vendor/distlib/resources.py new file mode 100644 index 0000000000..18840167a9 --- /dev/null +++ b/pipenv/vendor/distlib/resources.py @@ -0,0 +1,355 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2017 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from __future__ import unicode_literals + +import bisect +import io +import logging +import os +import pkgutil +import shutil +import sys +import types +import zipimport + +from . import DistlibException +from .util import cached_property, get_cache_base, path_to_cache_dir, Cache + +logger = logging.getLogger(__name__) + + +cache = None # created when needed + + +class ResourceCache(Cache): + def __init__(self, base=None): + if base is None: + # Use native string to avoid issues on 2.x: see Python #20140. + base = os.path.join(get_cache_base(), str('resource-cache')) + super(ResourceCache, self).__init__(base) + + def is_stale(self, resource, path): + """ + Is the cache stale for the given resource? + + :param resource: The :class:`Resource` being cached. + :param path: The path of the resource in the cache. + :return: True if the cache is stale. + """ + # Cache invalidation is a hard problem :-) + return True + + def get(self, resource): + """ + Get a resource into the cache, + + :param resource: A :class:`Resource` instance. + :return: The pathname of the resource in the cache. + """ + prefix, path = resource.finder.get_cache_info(resource) + if prefix is None: + result = path + else: + result = os.path.join(self.base, self.prefix_to_dir(prefix), path) + dirname = os.path.dirname(result) + if not os.path.isdir(dirname): + os.makedirs(dirname) + if not os.path.exists(result): + stale = True + else: + stale = self.is_stale(resource, path) + if stale: + # write the bytes of the resource to the cache location + with open(result, 'wb') as f: + f.write(resource.bytes) + return result + + +class ResourceBase(object): + def __init__(self, finder, name): + self.finder = finder + self.name = name + + +class Resource(ResourceBase): + """ + A class representing an in-package resource, such as a data file. This is + not normally instantiated by user code, but rather by a + :class:`ResourceFinder` which manages the resource. + """ + is_container = False # Backwards compatibility + + def as_stream(self): + """ + Get the resource as a stream. + + This is not a property to make it obvious that it returns a new stream + each time. + """ + return self.finder.get_stream(self) + + @cached_property + def file_path(self): + global cache + if cache is None: + cache = ResourceCache() + return cache.get(self) + + @cached_property + def bytes(self): + return self.finder.get_bytes(self) + + @cached_property + def size(self): + return self.finder.get_size(self) + + +class ResourceContainer(ResourceBase): + is_container = True # Backwards compatibility + + @cached_property + def resources(self): + return self.finder.get_resources(self) + + +class ResourceFinder(object): + """ + Resource finder for file system resources. + """ + + if sys.platform.startswith('java'): + skipped_extensions = ('.pyc', '.pyo', '.class') + else: + skipped_extensions = ('.pyc', '.pyo') + + def __init__(self, module): + self.module = module + self.loader = getattr(module, '__loader__', None) + self.base = os.path.dirname(getattr(module, '__file__', '')) + + def _adjust_path(self, path): + return os.path.realpath(path) + + def _make_path(self, resource_name): + # Issue #50: need to preserve type of path on Python 2.x + # like os.path._get_sep + if isinstance(resource_name, bytes): # should only happen on 2.x + sep = b'/' + else: + sep = '/' + parts = resource_name.split(sep) + parts.insert(0, self.base) + result = os.path.join(*parts) + return self._adjust_path(result) + + def _find(self, path): + return os.path.exists(path) + + def get_cache_info(self, resource): + return None, resource.path + + def find(self, resource_name): + path = self._make_path(resource_name) + if not self._find(path): + result = None + else: + if self._is_directory(path): + result = ResourceContainer(self, resource_name) + else: + result = Resource(self, resource_name) + result.path = path + return result + + def get_stream(self, resource): + return open(resource.path, 'rb') + + def get_bytes(self, resource): + with open(resource.path, 'rb') as f: + return f.read() + + def get_size(self, resource): + return os.path.getsize(resource.path) + + def get_resources(self, resource): + def allowed(f): + return (f != '__pycache__' and not + f.endswith(self.skipped_extensions)) + return set([f for f in os.listdir(resource.path) if allowed(f)]) + + def is_container(self, resource): + return self._is_directory(resource.path) + + _is_directory = staticmethod(os.path.isdir) + + def iterator(self, resource_name): + resource = self.find(resource_name) + if resource is not None: + todo = [resource] + while todo: + resource = todo.pop(0) + yield resource + if resource.is_container: + rname = resource.name + for name in resource.resources: + if not rname: + new_name = name + else: + new_name = '/'.join([rname, name]) + child = self.find(new_name) + if child.is_container: + todo.append(child) + else: + yield child + + +class ZipResourceFinder(ResourceFinder): + """ + Resource finder for resources in .zip files. + """ + def __init__(self, module): + super(ZipResourceFinder, self).__init__(module) + archive = self.loader.archive + self.prefix_len = 1 + len(archive) + # PyPy doesn't have a _files attr on zipimporter, and you can't set one + if hasattr(self.loader, '_files'): + self._files = self.loader._files + else: + self._files = zipimport._zip_directory_cache[archive] + self.index = sorted(self._files) + + def _adjust_path(self, path): + return path + + def _find(self, path): + path = path[self.prefix_len:] + if path in self._files: + result = True + else: + if path and path[-1] != os.sep: + path = path + os.sep + i = bisect.bisect(self.index, path) + try: + result = self.index[i].startswith(path) + except IndexError: + result = False + if not result: + logger.debug('_find failed: %r %r', path, self.loader.prefix) + else: + logger.debug('_find worked: %r %r', path, self.loader.prefix) + return result + + def get_cache_info(self, resource): + prefix = self.loader.archive + path = resource.path[1 + len(prefix):] + return prefix, path + + def get_bytes(self, resource): + return self.loader.get_data(resource.path) + + def get_stream(self, resource): + return io.BytesIO(self.get_bytes(resource)) + + def get_size(self, resource): + path = resource.path[self.prefix_len:] + return self._files[path][3] + + def get_resources(self, resource): + path = resource.path[self.prefix_len:] + if path and path[-1] != os.sep: + path += os.sep + plen = len(path) + result = set() + i = bisect.bisect(self.index, path) + while i < len(self.index): + if not self.index[i].startswith(path): + break + s = self.index[i][plen:] + result.add(s.split(os.sep, 1)[0]) # only immediate children + i += 1 + return result + + def _is_directory(self, path): + path = path[self.prefix_len:] + if path and path[-1] != os.sep: + path += os.sep + i = bisect.bisect(self.index, path) + try: + result = self.index[i].startswith(path) + except IndexError: + result = False + return result + +_finder_registry = { + type(None): ResourceFinder, + zipimport.zipimporter: ZipResourceFinder +} + +try: + # In Python 3.6, _frozen_importlib -> _frozen_importlib_external + try: + import _frozen_importlib_external as _fi + except ImportError: + import _frozen_importlib as _fi + _finder_registry[_fi.SourceFileLoader] = ResourceFinder + _finder_registry[_fi.FileFinder] = ResourceFinder + del _fi +except (ImportError, AttributeError): + pass + + +def register_finder(loader, finder_maker): + _finder_registry[type(loader)] = finder_maker + +_finder_cache = {} + + +def finder(package): + """ + Return a resource finder for a package. + :param package: The name of the package. + :return: A :class:`ResourceFinder` instance for the package. + """ + if package in _finder_cache: + result = _finder_cache[package] + else: + if package not in sys.modules: + __import__(package) + module = sys.modules[package] + path = getattr(module, '__path__', None) + if path is None: + raise DistlibException('You cannot get a finder for a module, ' + 'only for a package') + loader = getattr(module, '__loader__', None) + finder_maker = _finder_registry.get(type(loader)) + if finder_maker is None: + raise DistlibException('Unable to locate finder for %r' % package) + result = finder_maker(module) + _finder_cache[package] = result + return result + + +_dummy_module = types.ModuleType(str('__dummy__')) + + +def finder_for_path(path): + """ + Return a resource finder for a path, which should represent a container. + + :param path: The path. + :return: A :class:`ResourceFinder` instance for the path. + """ + result = None + # calls any path hooks, gets importer into cache + pkgutil.get_importer(path) + loader = sys.path_importer_cache.get(path) + finder = _finder_registry.get(type(loader)) + if finder: + module = _dummy_module + module.__file__ = os.path.join(path, '') + module.__loader__ = loader + result = finder(module) + return result diff --git a/pipenv/vendor/distlib/scripts.py b/pipenv/vendor/distlib/scripts.py new file mode 100644 index 0000000000..0b7c3d0b36 --- /dev/null +++ b/pipenv/vendor/distlib/scripts.py @@ -0,0 +1,415 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2015 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from io import BytesIO +import logging +import os +import re +import struct +import sys + +from .compat import sysconfig, detect_encoding, ZipFile +from .resources import finder +from .util import (FileOperator, get_export_entry, convert_path, + get_executable, in_venv) + +logger = logging.getLogger(__name__) + +_DEFAULT_MANIFEST = ''' + + + + + + + + + + + + +'''.strip() + +# check if Python is called on the first line with this expression +FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$') +SCRIPT_TEMPLATE = r'''# -*- coding: utf-8 -*- +if __name__ == '__main__': + import sys, re + + def _resolve(module, func): + __import__(module) + mod = sys.modules[module] + parts = func.split('.') + result = getattr(mod, parts.pop(0)) + for p in parts: + result = getattr(result, p) + return result + + try: + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + + func = _resolve('%(module)s', '%(func)s') + rc = func() # None interpreted as 0 + except Exception as e: # only supporting Python >= 2.6 + sys.stderr.write('%%s\n' %% e) + rc = 1 + sys.exit(rc) +''' + + +def _enquote_executable(executable): + if ' ' in executable: + # make sure we quote only the executable in case of env + # for example /usr/bin/env "/dir with spaces/bin/jython" + # instead of "/usr/bin/env /dir with spaces/bin/jython" + # otherwise whole + if executable.startswith('/usr/bin/env '): + env, _executable = executable.split(' ', 1) + if ' ' in _executable and not _executable.startswith('"'): + executable = '%s "%s"' % (env, _executable) + else: + if not executable.startswith('"'): + executable = '"%s"' % executable + return executable + + +class ScriptMaker(object): + """ + A class to copy or create scripts from source scripts or callable + specifications. + """ + script_template = SCRIPT_TEMPLATE + + executable = None # for shebangs + + def __init__(self, source_dir, target_dir, add_launchers=True, + dry_run=False, fileop=None): + self.source_dir = source_dir + self.target_dir = target_dir + self.add_launchers = add_launchers + self.force = False + self.clobber = False + # It only makes sense to set mode bits on POSIX. + self.set_mode = (os.name == 'posix') or (os.name == 'java' and + os._name == 'posix') + self.variants = set(('', 'X.Y')) + self._fileop = fileop or FileOperator(dry_run) + + self._is_nt = os.name == 'nt' or ( + os.name == 'java' and os._name == 'nt') + + def _get_alternate_executable(self, executable, options): + if options.get('gui', False) and self._is_nt: # pragma: no cover + dn, fn = os.path.split(executable) + fn = fn.replace('python', 'pythonw') + executable = os.path.join(dn, fn) + return executable + + if sys.platform.startswith('java'): # pragma: no cover + def _is_shell(self, executable): + """ + Determine if the specified executable is a script + (contains a #! line) + """ + try: + with open(executable) as fp: + return fp.read(2) == '#!' + except (OSError, IOError): + logger.warning('Failed to open %s', executable) + return False + + def _fix_jython_executable(self, executable): + if self._is_shell(executable): + # Workaround for Jython is not needed on Linux systems. + import java + + if java.lang.System.getProperty('os.name') == 'Linux': + return executable + elif executable.lower().endswith('jython.exe'): + # Use wrapper exe for Jython on Windows + return executable + return '/usr/bin/env %s' % executable + + def _build_shebang(self, executable, post_interp): + """ + Build a shebang line. In the simple case (on Windows, or a shebang line + which is not too long or contains spaces) use a simple formulation for + the shebang. Otherwise, use /bin/sh as the executable, with a contrived + shebang which allows the script to run either under Python or sh, using + suitable quoting. Thanks to Harald Nordgren for his input. + + See also: http://www.in-ulm.de/~mascheck/various/shebang/#length + https://hg.mozilla.org/mozilla-central/file/tip/mach + """ + if os.name != 'posix': + simple_shebang = True + else: + # Add 3 for '#!' prefix and newline suffix. + shebang_length = len(executable) + len(post_interp) + 3 + if sys.platform == 'darwin': + max_shebang_length = 512 + else: + max_shebang_length = 127 + simple_shebang = ((b' ' not in executable) and + (shebang_length <= max_shebang_length)) + + if simple_shebang: + result = b'#!' + executable + post_interp + b'\n' + else: + result = b'#!/bin/sh\n' + result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n' + result += b"' '''" + return result + + def _get_shebang(self, encoding, post_interp=b'', options=None): + enquote = True + if self.executable: + executable = self.executable + enquote = False # assume this will be taken care of + elif not sysconfig.is_python_build(): + executable = get_executable() + elif in_venv(): # pragma: no cover + executable = os.path.join(sysconfig.get_path('scripts'), + 'python%s' % sysconfig.get_config_var('EXE')) + else: # pragma: no cover + executable = os.path.join( + sysconfig.get_config_var('BINDIR'), + 'python%s%s' % (sysconfig.get_config_var('VERSION'), + sysconfig.get_config_var('EXE'))) + if options: + executable = self._get_alternate_executable(executable, options) + + if sys.platform.startswith('java'): # pragma: no cover + executable = self._fix_jython_executable(executable) + # Normalise case for Windows + executable = os.path.normcase(executable) + # If the user didn't specify an executable, it may be necessary to + # cater for executable paths with spaces (not uncommon on Windows) + if enquote: + executable = _enquote_executable(executable) + # Issue #51: don't use fsencode, since we later try to + # check that the shebang is decodable using utf-8. + executable = executable.encode('utf-8') + # in case of IronPython, play safe and enable frames support + if (sys.platform == 'cli' and '-X:Frames' not in post_interp + and '-X:FullFrames' not in post_interp): # pragma: no cover + post_interp += b' -X:Frames' + shebang = self._build_shebang(executable, post_interp) + # Python parser starts to read a script using UTF-8 until + # it gets a #coding:xxx cookie. The shebang has to be the + # first line of a file, the #coding:xxx cookie cannot be + # written before. So the shebang has to be decodable from + # UTF-8. + try: + shebang.decode('utf-8') + except UnicodeDecodeError: # pragma: no cover + raise ValueError( + 'The shebang (%r) is not decodable from utf-8' % shebang) + # If the script is encoded to a custom encoding (use a + # #coding:xxx cookie), the shebang has to be decodable from + # the script encoding too. + if encoding != 'utf-8': + try: + shebang.decode(encoding) + except UnicodeDecodeError: # pragma: no cover + raise ValueError( + 'The shebang (%r) is not decodable ' + 'from the script encoding (%r)' % (shebang, encoding)) + return shebang + + def _get_script_text(self, entry): + return self.script_template % dict(module=entry.prefix, + func=entry.suffix) + + manifest = _DEFAULT_MANIFEST + + def get_manifest(self, exename): + base = os.path.basename(exename) + return self.manifest % base + + def _write_script(self, names, shebang, script_bytes, filenames, ext): + use_launcher = self.add_launchers and self._is_nt + linesep = os.linesep.encode('utf-8') + if not use_launcher: + script_bytes = shebang + linesep + script_bytes + else: # pragma: no cover + if ext == 'py': + launcher = self._get_launcher('t') + else: + launcher = self._get_launcher('w') + stream = BytesIO() + with ZipFile(stream, 'w') as zf: + zf.writestr('__main__.py', script_bytes) + zip_data = stream.getvalue() + script_bytes = launcher + shebang + linesep + zip_data + for name in names: + outname = os.path.join(self.target_dir, name) + if use_launcher: # pragma: no cover + n, e = os.path.splitext(outname) + if e.startswith('.py'): + outname = n + outname = '%s.exe' % outname + try: + self._fileop.write_binary_file(outname, script_bytes) + except Exception: + # Failed writing an executable - it might be in use. + logger.warning('Failed to write executable - trying to ' + 'use .deleteme logic') + dfname = '%s.deleteme' % outname + if os.path.exists(dfname): + os.remove(dfname) # Not allowed to fail here + os.rename(outname, dfname) # nor here + self._fileop.write_binary_file(outname, script_bytes) + logger.debug('Able to replace executable using ' + '.deleteme logic') + try: + os.remove(dfname) + except Exception: + pass # still in use - ignore error + else: + if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover + outname = '%s.%s' % (outname, ext) + if os.path.exists(outname) and not self.clobber: + logger.warning('Skipping existing file %s', outname) + continue + self._fileop.write_binary_file(outname, script_bytes) + if self.set_mode: + self._fileop.set_executable_mode([outname]) + filenames.append(outname) + + def _make_script(self, entry, filenames, options=None): + post_interp = b'' + if options: + args = options.get('interpreter_args', []) + if args: + args = ' %s' % ' '.join(args) + post_interp = args.encode('utf-8') + shebang = self._get_shebang('utf-8', post_interp, options=options) + script = self._get_script_text(entry).encode('utf-8') + name = entry.name + scriptnames = set() + if '' in self.variants: + scriptnames.add(name) + if 'X' in self.variants: + scriptnames.add('%s%s' % (name, sys.version[0])) + if 'X.Y' in self.variants: + scriptnames.add('%s-%s' % (name, sys.version[:3])) + if options and options.get('gui', False): + ext = 'pyw' + else: + ext = 'py' + self._write_script(scriptnames, shebang, script, filenames, ext) + + def _copy_script(self, script, filenames): + adjust = False + script = os.path.join(self.source_dir, convert_path(script)) + outname = os.path.join(self.target_dir, os.path.basename(script)) + if not self.force and not self._fileop.newer(script, outname): + logger.debug('not copying %s (up-to-date)', script) + return + + # Always open the file, but ignore failures in dry-run mode -- + # that way, we'll get accurate feedback if we can read the + # script. + try: + f = open(script, 'rb') + except IOError: # pragma: no cover + if not self.dry_run: + raise + f = None + else: + first_line = f.readline() + if not first_line: # pragma: no cover + logger.warning('%s: %s is an empty file (skipping)', + self.get_command_name(), script) + return + + match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n')) + if match: + adjust = True + post_interp = match.group(1) or b'' + + if not adjust: + if f: + f.close() + self._fileop.copy_file(script, outname) + if self.set_mode: + self._fileop.set_executable_mode([outname]) + filenames.append(outname) + else: + logger.info('copying and adjusting %s -> %s', script, + self.target_dir) + if not self._fileop.dry_run: + encoding, lines = detect_encoding(f.readline) + f.seek(0) + shebang = self._get_shebang(encoding, post_interp) + if b'pythonw' in first_line: # pragma: no cover + ext = 'pyw' + else: + ext = 'py' + n = os.path.basename(outname) + self._write_script([n], shebang, f.read(), filenames, ext) + if f: + f.close() + + @property + def dry_run(self): + return self._fileop.dry_run + + @dry_run.setter + def dry_run(self, value): + self._fileop.dry_run = value + + if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover + # Executable launcher support. + # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/ + + def _get_launcher(self, kind): + if struct.calcsize('P') == 8: # 64-bit + bits = '64' + else: + bits = '32' + name = '%s%s.exe' % (kind, bits) + # Issue 31: don't hardcode an absolute package name, but + # determine it relative to the current package + distlib_package = __name__.rsplit('.', 1)[0] + result = finder(distlib_package).find(name).bytes + return result + + # Public API follows + + def make(self, specification, options=None): + """ + Make a script. + + :param specification: The specification, which is either a valid export + entry specification (to make a script from a + callable) or a filename (to make a script by + copying from a source location). + :param options: A dictionary of options controlling script generation. + :return: A list of all absolute pathnames written to. + """ + filenames = [] + entry = get_export_entry(specification) + if entry is None: + self._copy_script(specification, filenames) + else: + self._make_script(entry, filenames, options=options) + return filenames + + def make_multiple(self, specifications, options=None): + """ + Take a list of specifications and make scripts from them, + :param specifications: A list of specifications. + :return: A list of all absolute pathnames written to, + """ + filenames = [] + for specification in specifications: + filenames.extend(self.make(specification, options)) + return filenames diff --git a/pipenv/vendor/distlib/t32.exe b/pipenv/vendor/distlib/t32.exe new file mode 100644 index 0000000000000000000000000000000000000000..a09d926872d84ae22a617dfe9ebb560d420b37de GIT binary patch literal 92672 zcmeFae|!{0wm01KBgrHTnE?_A5MachXi%deNF0I#WI|jC4hCk362KMWILj(RH{ePj zu``%XGb_8R_v$|4mCL$UukKy$uKZHLgkS~~70^XiSdF_`t+BHjmuwgyrl0Sro=Jjw z?{oin-_P^UgJ!zA>QvRKQ>RXyI(4eL;;wCiMGyol{&Zas_TfqYJpA{+|A`|xbHb~c z!Yk?TT(QqI@0}|a2Jc_%TD|7M`_|m^W7oa+Jn+DSqU(n%U2CKVT=zfVD!rr9_2UOu zth|2c(2Tr9(NA5t&2BDL`2=vh9AO<+De^2=$}gv zmS4YS#XaIZf{>Aqgm(N*!QV0b4f^Ln)z=$f!r^I1aH3)=lNe*rKaU_ZU%zJUntKt) z+ln>|cjCo%Iii5`T)$@Jss{o1@0myk4S0EXeFttfQvct-{|_jzNbRiew1NS4Gz_05 z6uzl=d*xc2AbBHRr%#vck#O%NT@UJz5kcY;ANvDFj(j-FNbm)xT=WR+p`nOt_W0P8 zEK0P8OnSD^?h(|A-okg706sq2ikj34TcA*nl=b=?2UD8I&k}qKn1+r28~3R^yR!lj^nQw?s+{dbRh|=(1`mLGGLq2+l*55pQpy9$cP}GL+h0rM8RRhgu4c zx}%OKT7nA!v4FXBT@RT9y41`3IS_AnE*m8XPb*%Q(%Yx&^5HyXQK#aKyQ8%hr8Zva z2W*_ct~S75vx4y|(HP0bibhZgHnoctqFDK`%N-TRsa>Izsz~hz=bl$+9aw}7MCRoLu4 z?|8B~xEgIzq)s2ZjiSAs`QGkO3TmtZ@Y4nkR5g3YCJ4YrK0GB~>d2Sc^UpnOF6;>j zerni!qbjs1!0tswy!f`U&F4=CpFsIO*7*&mOQdwBzVvP_vqp99--U!4_b@T7+#Ox} zrDjpQT~yT4(a7%Ys#?aoR_?U>L)U{qg*}QCXIB7;sw#BqIDasB-7JH5fPu}gXWPIS zND<4lhXTP@P;jFzcwOF6oJwM);=0wVHNLdYC4fjm@{PtPtTw(Sb{ zNOnDY1_8uVB~uyl8T?0MWB86>(JX30dPqQyTtF2zdyMpsczx$tbiOg14l50Lr|||( z26Gkafq+t)m#b$_rAkgmO7on)&}uw3_(JKGdiE4VqgcDVG0(YLNp;tK=<;JJV<0x3P)i8KVWg3Eac>rsLVDD)X(b9NGWK@OJz1$vbe z-a66{&N0e`bmFghcnvo4VhT7Sh;|y%=NJUW0?=J8DgD$Vy!JAHD$&XMht$8~%t)CH z($2A0r~%C<$nlBdn2^oKB+OvMx{@8hy#}!KJ~9kdt8H?dO}!L*hq|=d7P1HTQJKsG z-YPsAZieWo44y{R0`{wmx*mBX$FVm}KAb}pjG(edC(0I+eOnpK?Ir3<07vWPs2Mp3 zJd?n`z!2c5d|o5pDyZkh(T=^TlyD-M0EEmn#i`QgiG+QL1kqO5T%)8SHNcjFAu2Jz z7ow)IdPrDY|2Yjw$P^#@<^t90tdZRlrK^xdo;k77@kDd5kz@4_Jl(tYXOd|cLd=3%B8 zn2SgxXIs(5HS+X{qBZ2wQbH5uW^2^~A3Fd@qobnXcC_&b*k8+wtTt=I2#4QbV&Nia zaCORVf;8m%L7F}MA+YLXUO@@HPZVv+ZUz`_Xf#aEA0kp_X7x#WDLh)E*k?z=T?qTy zj46z*MElivVRKjqNim*W-%yY4jAJ}S9-|qgu%}9W&mCWz-88K3;!x3EcQHduo8>;T z<}1ytevOPhB;Tj=Y^x|+Rb?dH4MFT{OBM3Z`vW0cF!l|NsRAHMBD?U6`yAz2!ShT< z9-?!DM476pBD?8XQ@ouX{XDZBb2O)i!87Bf&v{Q?8Qg|K(C0qZb)Jg=^D?8qRwXlJ zSk6;-xmzX1vs@8uPG&j4vl#F*z6U-M?j%zAmF@IoKf;d^?!a$hbMbb12D_;!V#PHm zied>c=;}+vEYoO4ep_&UrFY3t+DH%BSCbm)}c6+j0Jn>N^M7BGX#qJ z6Hvk(m9p4}V+0{8jD(zFKS8jtS$hN!lAWsp&^$gyM-!*M^)!*>;{Y z2RXH)(2Qz|-I9wn_7@lGi+HX-NZON{r zLN-{@jx=_OpajgPyckT4HR>X}W~*_(B@UOHAsK8n;iFPlO|esiut|WCQYu~t6fj) zZ7A7er9@~QhpYleL+*4IHdh9Uy-r61t;4`BVB0b5H|XjFr}z-u2Xb$Yy+i=D_OLE~ z0;MY}QqjcgX7)p$?yu}|=h3B{Nykj=3dWTl)bl=FyV zFaB@KZ>g*86_$!=YDHYWXZ1JBApDI+mXxDw1;6w#BmuRwo*KgWY!qt+mnT|UgCK9I zcCT7t4<8l(oc}dil=-a|9Y>3fJNBBs)1nsMBH(qB@H#HGa=Z@Zw`e24Uz~A?Q)CPR zG$zSOm81Y%YG41LKOmP74+>Han|}kie>{8YIxLWMV9QNsrDIu$mJ%1x%wDVWfNNJVEhpc|3 zh|<{B%MwyTV-_!MEj+oO%GFYK5WHeH%PlVXkhT6o9Yn^)FG77w0pSEhKt0qFPf@Mm zI%sR^MfvjyEuW{VR{e{)Yu<_kxh0RM_+2pB$P*)-n{lpa3 z4IK0$s*8<)BpoDNc>CO4YbMtBEl1t!$Efe-A8EOeBDXjfu$m%4sGn~a>d-VTLvC|n zVX*|%P4*SUiX6|X9Vs_EeXJP3P&Dex4S0wYuN}M%-JP-w2qNBccgvayCA`9%`sH?g zv##g2prO2=Q9!+_y4A?Ld{EvB8x?sWt9C>p4@Z&}eiytn&t3^pbEmp6&sKP*X-S^_ z{2?eZ5D-ln@*&erZ;NYWW)g2QVx=!+W?eHppk8YEi_P*0J)D+Lw6V*e1Bsc*93JG5 z{(g5W!TwdvD17@3y{~VR<%0aRUicn$-lu}eR4=xxKj=mISKg$Fqg!H51nmf#wIjaR4j51QwJY`hM-i$-ET{y*gvDnsDP0O zCPz>eV*i0~afNN|FkUHJhuF}>ST&@g`|VA0LhXeo7oY!Hj+@uq94Sq=m5{At{Rnn| z3O?*^6?3D)F^FAl7}O+MW*{m(DiA&7W*fwqdK%JrD4W3Rr6HvoK4KV%Gulgj7C0j3g6Rf+uR=wmty#|IOcWtlZvDXk0(5KM?4%Ubt-YN*!Y_ghWnrh?u zpFpBtQ`@W7cE!Sga#we+St8eV3*vHQrt=&(FRjj;Gi=Wps}? z5$vLS#u2^>wX5E&*y}Xu)M6owZnjhR*w`rGk8WcvAVO4_2&`j| z6V!aWOO573WS^Iuu?8c?sdYlR+@?dhYzH`*V>*f@r+7oLlqFtUEagbo@zNbAoeVPU zRWyJKU%?B<6eF-S%Gk{QiU+j59AmgEM9ZAZxaC7AwlD<_QW#T^9SWnyvpr8z!VnVu z*|3U7op*6Q%&Kk$s=El)BC7F>QcZert<8OjG}~6x{2tbf3GP~hAlN1LCaQpTP;KWh z;#sBE7GO~fg(@&-&s@7ldN9C#fbQTVA1lZEpnDx}xtIb0@#%z?Pg5=SCuz#kQuc3v z*48sCZ?kj__0DJl%~JUk(>|f4J=J237=ZgYpeL_R%wi=27`2n>vZ6yTuI`Yo3@{CK zs?da-K8$aBfPD8rHvz%He`x;ZTQu*S70{6jBB}qOd9l8VZX8^G5!~*UMJGBSRF7< zkn>6esRF3+P=sOJsIXx?k5lP)6blRhUc|BvGWVw-yJPRL0O?HEJNC{*wi<|n;VM>R zhr~f^>@FA)1VpqzlOG0X=?^t>v7l7+iZdV)9ebxk+ozn_j=eWh<~G0{0<4+r0myud zAW>$@1oIuYW0>%cCO|rRd-Ge)pB~$MrMGt(EO`md*j@?ogxS=62`uvr@J+PwRs@M< zR)U6DmKC|FgQ{SkEM8`X#dn!CWUBPD-`~au0Bk|-R>#&$#K8ef%CtEl+4ARFW0Me4 z)6_d`>goJHD%IURhb(BzDPpNC&PwuU6Iwn??J2#qHQN=7x?|7NYjs?e;`uF> zLoJt5P*Ws#J8>n}d#Z)kT7X&~h7l8@BF;W5=Z%4Yl3eOs%uF`R5iPxLdWK}ty*3Y& zn{(&q+65OTC=cb}^6@{7OyTB-Q$Q|lI#(mXbL*Yz9rm6Un`k@VLKC8BQRhM;qvD>@ z0;^S|BB5wO%&FdPi???vDe@T7$7x9a5bYx^-iC3Cp3P>K{syyO!zNBOO(tP51WW2F zTBOm-wUA;kk$-0eT7}GftoR7p=y+Ozs%7>UWXZ`(G^k1C-Y2(zCD%GlN|{~C^s_%e zPMM&et#k@iel~tGh+1Z^YG{7gCb#zjMjQEpNgV!yP0W0enkl74%W_DQHs(b?>z&SJ zeA8UC=qO|*q=n5qz=ln;8%-QK&2+Bp{);KX?uNf(Go<6 z_p!bo2*OT=y%m;&5PCVCHG=2SDYqM$fYU6#z;+Wp3y@Z&#P!P>Uy@r7A zBjMc!iS%W9QcL_fLYS*GQMnm%0%F0e6o8TB1}7%r8mN4E2p0 zJib7#R@kfq0rrB8w;&f>Gl=g3@_RanoW-u=Rq<)_I3R~awbGt4yDU!kv)z-ZTjFfm z?Rc`i&;op{20Z`;gb%g%bZxj=mJ1bTh>wl@3QefV#jI6h7iitbS*w6(n1d>4o*@em zOfJds^m|m7U@$*|#P>r{wMQJvi-6fCk6Php|Ni$RgRvPzz(I^f^R@N?iuJSe1eIi| zPH>AEtFzS*6vPwz$0wJ!M`5w5g6<#63i=4SM^JTPPjS(6U_xn#ADdWMiLJt9w6EeW znz>Me2kSiQ*=ajwAY8wXVrc(e`eOeOh}N3o#vH^*XXSk&o|)_3FFabjiy??Xrc`vW zyTJ9}Fk2{>k-lEVbQn5#gp0cCg(e?0kk+moLx9 zDCnS3@Oec7%Eq=66kCoC;@Q&KR*DFj*uB(DFd-H@4^z|*8cREubnNU1(%0yLY9AMJW<(y2BzU8y*Wea_$AhEhP^l}z=XRlMzTZHGYcpTh{p z(g2@eLDk#NR$)J(m3<6^V^2aJ@>#CFb265RJL3}|`iFMYZ*~{`j_ah~B1XR@9r&%; zn(cJaW2lus#__W>TyJf30$i0Tz~_Tp9bT6YR~heol}PVwAG8ciuj znhF2ypv0ZMpkOqm3%}`Bp*fn;jSxD~u-Pl&(^$jrXvA{eu)yls8>s_4C;~+NH?*h< zvrhH~Lw~f%|d%2@=TXV)@nI^k60kb*N9ij@%7>;wgr5c7%bNy2!-Yzvmm@?0!_7{g=gf7 zUXzyoS~^;SpxM}fuzw}|+lHWEDiK6|nI>gGgaX}LM%XMiF$ZVl_ zm&`InZ#n1yq_Sm}>IjcUiRW8|W)Ryui4zoFv@pQU9;ZI|F^cn)QST+57pDV{0DLl%GV z6?8glUI>(F&)*Sl1d!a8Isk+oERiJYN}eSp_&Rd<*`G8%&M@ksYGwcpOw`&eY>XV? z$p;4~J1N;LXcI$e!LvO1U;2~B%59mHY!U|XOCdH(W{ShvJ(hkZu_CDD2J1i&T5Wr2 zGY}KsXO)C`7DP79vo5UH^ptjt0J0gE+hL1THdvME$_AUVAy+AP^0jct8C)$uR4hP| zg=e_6AAJ7&MDRIQEHo*$ySY8i5qS&L;C8o&bysnYcsH3vNWUq6k;pF1ij;jL$DQkk zN6KK;+HnO+01X?SNaoU~?((y5Ad#x7cqyuNSC0pCk=^HK3;#yZW!lfwIOaR;-q3Vb zPJ&Gx%I$pC|Aa+je(*UgNs?J*ZXv6~;0rhNIB5hbU_WLkh`%ejyR@;W!vG{xnvr$J zF4Ukbv%4>eBkS+uHaFzq^mq?}20Zt=alyoIfJu8d0-#`w{*KALfteoB886 zujBE|hS&fV;pzZwQ2%)bXmL3sK@X7(lx#lu+Tb5Dna zAYEz@S1%&c>e-FFT+vdkw|{$e|65G0#|oQ$^p8dH0>{!DrP;Bf`1gqc`^E#eN0o0>o^e^Zt@(3$**w(;FrFl+eRh~0~ zzx;M=9dl;65uQSC`jnLn%Ogn71na>I2X?a+J1JkQTG6#a!CDdYTt+6hzg90WNCDjqtmoUYw`08Pf5E#K z8$H$P@#(#+r{C0 zKQW-buO4ClWJJTpMFR0#SoNSk2V?aay`!1sHZ<^BOqDP8iB|XD*Igf(x-PQh_fB;PFqR*&3evHliCQto#t!)eVL!tBOpoBRH`T^QSWY`e)dh1(8C+ox#sQmIZA7vw{Fj$vtURp6$*B@Q=x2yA9D$eaI$+;GBiY zoYb;y5C+_j<;j+vw7;dcB*r`0hQzT6Be~maU+Z8+kXgyisOnb7Z!7HBCB=%!R94t5 z_qDGd;Sbr8JGHd!g%N*~TtYiuf|%=P%d#-o5O~TKAFDV(Y%){MU*_Nb9~~6jotwSG#xzlB;1Zb_Y&hLlnXm zpW32qvMQTw$|ifur_LcQkxkB*UV3T2kVSlL2XOwoZ&1%SWtkeCo;#%TkuBr!dJys( zaW=%wm(DLsNYMJuTrk3*`6v(xGgv%*`Z}wg{REoKcPD6q?nO%qn;RRr*P+K9UDMqZ z{t}>VVVVYA4b5UfWcyc$aO^qa*kf@YSwAwr#p8=SF_h9nt~*&angA4==9sXv+R!YW zLU*kr=S*ZmeLmDpps)mn1U6>@sykDOc*J6|3G^oikg1aO@S$Cr06;$u00g<&gMdzO zpgf}6Rxef4(_#`c>*l47b2e>Fp<=aRJuPN2o1$D4g@PKlrV_!lw8m$6fZFV!!$`?nkx6`XDvY@@u zsafE)Jj?ywnzrP$_x#5+?ZMcvjWn#UU`J(7r(?9nckrF~xvRx-^5#{7I7(d~1asO# zF81%3Yp}b*(ol74Xei4icL6d#0R*d5cM;#Np9Y)A7|fi{7_954?;|b|(_qZ~g!CT* zQsxF#4vlO8eF~sS#fC(L_ES~rKm~usW_5C5-RZ1E&(P-0b0|g`my1ybfh3KOrce-M zz%cw33YuQsD|!>#q;hmxZqh_GXC6w1a6oN|r^KVl+Y=7S>_4GJ0$HzSIV(8!!z z*kq=|Rig0ZZ1A`8h*eo@FJ8nPTWHMG)qaU0-$y7SebtoNfTb50Kyd6S!$>(AdlBJ5 z#e5BMuU2%Rm>(T2fKna#PY-nx3=jEDWhM-=YaDxKI`%Zf=;Cc}s+)pDTd8{-N;A!M z$Jc#9PP1+1x|xD>937`)iQZ4G}P%7!5eN>wUt@Un%jVaO~)R6RnXO8d9sBH|NAcp(ag#fQehQm+4<;R7KnxQhnD zXE2h=7416PiiwF7{(BP*u8^o4O>wSWr*BQ zD>DoU_0qZL6Cu(C8*sg}^l z&_C=cTa88R7s%F=LZj2<2>%H$7$Hw*Cx_r1>&_`?AEw@&1^j8>ITg>sX4tIccuK9a zMx8gu2`4T6jRZF4>`4Q|rW`NC-@2yU~!X}~U4*;J+ zMWQ0EDR8Bi(4ZYx83}|MNy7hYXhA8b6961Bvi#W8Ew2MF@-=7`A1tw92`&cJEkrRy zEQO!IUFsGh8Qw_`mRaN>PDvxa(h<^w{ z%GhjVEJev4b<1JAT}MON$9w=#w~&$NjXM0~M}4e>M;%YR-M|ZL#v98+5T;;t3(>!1 zGWFKj;-?5FLigZpkhXg$iCsEPwMI7e_w8n*Z-=RAzp=7y z6fH-2S4aJ97rkEA$K)jD#^MBAG1adYxX+7|1Ilz3qM?pCa4fd35yX~Wm4r!f+ZbaK zTuUshMwgO*I{F0@@Ntqm55R`ZaxhfXE@J{NTMf-^6DHtXW}@iTs}i$t9yB(Zh3k<6 z+1Wpl^x>O8MdV8-x2^KCDs&i$n||v&N)WVzfPUObxuuR)(pnq9n5}yD%Xn~SIlo@C z8b#>YyAZ=&`N!%-GaxRE)vnsr5AX^Bv@LDjv5Kn17Vt0ni2Cg9Oz?v@URPAs{UvQ^NWZ99li2S zt%7|98>Ykuw}5Dz7Db*x^a0c4;OGR46Fb1#ewb)8->So_C*9BHoI-424{B;gJe|ED z?VN2!MZ6wc$jNdctiT6LTS3Mg6Udm4tsLNtZH|UG+M$-^p%Uza+y_boMh$FeKZd!%Ba18hjG|eh^3HK4rs@M4#vcsWYN(-=S2Y1|f zAdZwv2oO$+Fwye>W)CTE2aT+q zl(K_HLo|gl9+~aIJ_JGWyvBgsnHV{ah8DEV7>1Z-ND1V!^?49VFQV*f5shR0lmU}K zRyWEskTr(pP6Jt92m1^Rimtp@Eg?HrP$@+Tyfpno{rJx0s4h+N^D_`S34SiPoSy-X za>f!bPl2LzIWN;WoHVY_!GCd?F$wJ>Hx0Qni(E4t4UeI5m9%{uspw>F?-K`is`Inp zk?^*Z4dEIof1^geFnYbU2DVb{9B8+5zmAZJdv=Vc9k#wdp<2)dP99a_6!oVxhdB0F zO`0pRsP|6zc`UNQ*1M^}KP7Yt)GCXPN7zLjsgE^mp7F-gcVc9_& zULm}QE%2U#8ujCe`IKruLZX%;`LVrYAsb7<@*5Jv#;yd7Y5C%3kAsgPJ=qgjXZzXW zFLcCxbO(jsluc3VKKwJ&Sz< zkl;cFFd}gPPAE><2yS&WoJRlb+<;({*ZHp^p75%IUj7`S^`b_UqZScQLUlW>R3C>s za8NI5Kr|wtkAI+4!*S`f{FN19_oX$rvzso!@RcV14KFkGn<*QcfG8zRf8QvNqLM`v zSD%$qioK`BOe&}PxZ*v{OI53nYcEB;9jifu`r3|-c&r@;e=LaFi2p*&~>%$L7@wx4FBc;T5U<$x7+ z!u70S6#zpPHX3FW_>jRXC(VekQ3RL{!jPPyk?&F$4VcIU`+C@D(OJ*Wken% zwBQ9L@OYpkJ+JSkCL^vB3Nc4h`dQHFG6})u$Pi%nSMX?UX(j!OJq%KXy7lboz*y~a zpA*aAATQ1;Y;Lm8ZQPn-Ls>P&xpPIEr=%P0T*GjTi7N0#!j$G~tiHrHmV<`L2pCO{ zQCZ1F?1#trBG$s51&%~|F&q8xGkPK7B*-p}3=+lJB$R3J!dQf8Z=Hk*r0vcZU}a1S zw<3D!-{*kWBLp8w7dnAg-8yi-q;nq5h`a(3c^VjnJR#RoKU;-fsj9+OM~h^`Vms!* zdt{pcM&HR@u!=-DV!02kohCP@$mN&xny5z?GL&))0uzLcHqRA!DQqmiK`kP9oRE(A zF4ebD0dNa@r!r7eT=AKsArr*H@nCn0qXD-92x<W1p`0)x-x*=4T95Y*laP`|6&wFmOI3Mgg?jkRrZu$Jz}4R+w8s!YcQvJxHLwD%VbTzg>;sSt zBrQ?T!#_=p!do7WX_l$R$pFfXgD~FSCZVy+%6AweWp?B;b`~8Cv?SBZY_d0QovXtM z@6yJf7M@YhQ4ySMw27d@Nf33X*3GxpX%DrPS?l3$of7IP`= zL`dg-u4f-dlc8$e4JSl$yy@Y*habh4|9Q+9#>)=dDbw!q}!7aKprPym1|A&~h ze5W*WOQuGC#tSr1Ly6A+X^97n60s}3oTgYe_R6^DFV-7B18rzeJY-p>)V8}z=#Wb7 zLiIe~RxZxn1&e56N85qD-H$Nni8J7Z*dgm#8z&pP&&mDhvmiH*p-t<3M*+;=uxUM4 z+mTe;F_U5Fb+C)r9>dhbrkR0(AxI1}Lz!JYQunE)@J!tWv*dY^?0;f0HueJQ%zP-_ zo2CS?w|0cca{D*rUYJIn+Vb1_GGvr%tQZbU)mH4t82!yx zI}+AQML?!XyTQ*kg3q{&BG#G!cXz>qYP0-oEh_S{mrzgD`O{Tnn`!w?j$&DGQ~)i% z!iE#~FMz=hjhRi2!IJSZ7XulUa6*ua!E|w{DsUG8Kbp}B@e6Txa<;OlH%Uvi91fr| zyvG;WB%FQt0bxc&9}l8yql;^8QWot3pg(R%BuSQZI5^ezGRQ8WOlv5FGTff*2tPZ< zE5Qz=p<>|l08|Vc?t18ecd7R*Ta7kQPrQr-=%3i%qH;kh8eDJe!(ftU{Nr`3SxwTo zi1i=)Xbn7_k6^t(j^-rAifG5=l(+GHNO^47$ax$PBUbxb)hpF;#2o&Elo=ffNijmk z@c?mXKz~2Lwqmav*8)_*{9E65Iu{3*&T`0QYBN9((_F5xE##ba8(`-1rKM(=!~l|k*(^c9sol`rgDUF6vnDX zwI7Fa*#Dx1BGlSTl7sDUAJ}`-e4z}sn23deQ#@YE=d^&}GsLSjD!^WALsr(%p9yaE z+7M-?hUMpTl$7j?#b}UZvA6z-P_? zKA(Ne(XMWVTL2+#3t&2eYp>)imh94S?4JBPuz}emji17V=W1$yX726HdQbweH+(MK zm)2dYPM=fh4?g>AtYr>h%E1bXcK7G9cc`lA6QwHFijXp0^Qk$31mF_}U>h#$!2H}N zjfOI=!~ON?M4n0PamtgU!N>IBu{calKu-1(L>k9P*f@ebq7PUEfe=kTgN_7U=;PQ7 zl2-68PBtu?U565kV_qk)f>qo2-ZVdMkV1#MK2cBQ;|Qh=CVSc%!O33Ha)$){9P`iz z0APPZuFyn&@=1F=F^J$_wF!C!P#r^zjkN|5iXx1;N6+rygNuWc)3trwaI697$bgvc z!6pp0sMmbWJwz5nu(O_zlOGOC%h;nsTB>4S+${+Gv1!TJ4-m_XTR=SMXX#k=Dma%0 zKk*kH1xd?*W|S_nfqe_I94vbSrh*sXY|HX_(nKU_f5Gk^T**f&ORX>9^eUMJ)cJ5S z?^7}{51=seOFv>p7!Vk*FVbNrX$rd$!w{AMoRGD%Nj&UvcS%FhS~k8K6u>yc&f{B4 z5X5XilTg6XP)DWXQ1MJ$m4g$*^K3C%~QnSV9Uw1V94RV}R+mu1m*q7=g`NYQ%agBuBr<0F(O$O9?-u#B7oh z8C*(W|1T*h$YIM66yGC7qWy_nir|noq)3fYx~cEK5F@?NTN0kA|AHWz_}_?;|3Iq- zMw^qp(Vsb{B8mML@82UvezYHAs;|q@*TH3d zMH=FK>^|6#iO=aYpre840xoqlJc;#?( zp@V@?3#S6e7x%f1HaA~|teL9uX2@urnubMH)4T#J zR&O}E5H>RZs6Vq7tiMQOW&M1dSaQGbXh=mNQ12Y!Z(#Dnkvp-dsk9)^++lmt081R?_>c!lsifvT0E7(75v@gL`O#R1QkprL zCjEt(Q&flL-JV(2av`fESdy-wf^XAL@6s9%n?lws@`VJ-r7 zm>}M&ru6{Taxn`oh#BJkHp@^ot*Jt9oR^xSO>$RvVWCY4&!L}mYu zC%BA9vRY1S9@WuPdLx=NX-?z98&hB`*qGilLUlAQ%$zib>;=iUtLEgN)`p)y{WKgS zG5Oip8+`5O#4;woy6Xg^2@xLSU2v`&xVeW8`Zh~bllPR2rhOi{qLVxzp|H^Y)3DbN zg<~TSu8y#Z?gxEhvhh?$!4TDoBQX}ZJajAbMiyvo;E5r)yXn7W3i6GBlO1$0`2yJD zk7%%bVW>E)Mj1l4bTpgM^ReBCr7eV(KA4Wi(~UWDaRv;XWQcNxGWh9FVxk7h?RDa? zA?Fe^UAT4`Zx7;|Dtu;x&CM-oYsRpV39w5i`>T8wLG7g43Nf7&(dQtpA*Izc z$3dL2l-o^W+dh)XZm)A}vj?;3d&onzy~2wjVXEz|Wbdt@368wjFenSKmQ85zmF(wO zWO6OALmS0557hmbQ4Sp}OD+KI#09X1bRwx0&8uXiR-)McwJo?eo6YF2mwj>qMU(!b zdYl96gDgz?bUNZ5I#P)HfrcQ1u|oJQ;Bh}tIhU9tu~b?!44Y<<`!?2nJ$0{Li(=py z+XfSf)o|95r0Z*dU7N{TkUzOr_+4n^Vwy)6=Gn;y7pIc%hanoixA2Y}S%0w(xz}XM zC97Z-#qqOPW({;^^@4oSy5`37f0RG9i1z#wjcIb!B*#or4^Dlz+bk{gaN_Zn{AWu` z%q*s!dkF<+7;s+@94f#LU}>Ipz<2}u4;Tc8B58Yo%r+a@J+Fc=q|b9gIM@RIPCET^ z$SIv48A;q?AkD7~pzm$h!mx3x@EW<|O0G)wGIpM-6zpF~BO+x`!g1x0lDb&Ig$QL< z_{iQ$UaT{fr8!tfKqoN|BLTR~b9cfZWN6uRWzyBOoFNMm$`waL-@!4E`Wn0bB@nF1 zq3aLHJ)sJe?3sn5gQ@bv$dsqwX5BDE9oA^pP2@0V$5f9C*UtVup$EgnliI4M8YHOi zti$XyXk#VeT3FZ&4GDATbWlG!4mPw*$7?99C2p-!!dsC8djyZUkVnr8Pg)Jg z2%RbcZ5#1Wc5}Mz=JednDY=^tq$s-&<2M$=;uUq^q?-5xnOVeXxY0$NR9;Re!z_;Q zTS%581aFHS>gHbM0O8{9 zb3|74gIdq?6Ev~A5To+G|50;>MpK#gij&fXb)|h#G(Y|UL}p3lZeEa zF}f@EGLj7HIAhQChh4EJ5N@)}m?n*{d&D$V%E45V$O{T3@~#HVj6x1^lL7HOky+o2 zuHnoOn@G>eG6zM5B8m_1321mnH^jz#{7>}p2oA}`h-nWr3jWC~M z&mpJ~K1iW(b5of3t_qipM2;g6;rzyO;M>q-nPXJj05xhCA})jIxdc)k#3G1TCBDM( z_#UVaj)uh;;{3SdtLS)fp3G*6POwfM{%qytj_^xZDAXNtMZ=A#3^@dY?_+-CJI}{? z0dRJNpGDFjia(Cmfn+ITAW7w%4LgODvY%*${x<-f)b;@eqXS%yhCZwYU{D&eqXV~N z7^k{aezq&hr3fJuI|dk;fqE06Xan!f`Pgrx))D?15>;O6_f#YnIQGu%^>N?$h;cC^ z&Sjxuc-`HDLg_fSI3dc#7FDHY!LG+jI)fAj@<0X4rbN%69BsKArtxjX zwTyVEt9w}hmLF2ee~8tiQG!df*QjBVabyIv89^m=fJU*Iv_3T`&LxV+s134BPQCrLo1TM=J;g?+U3oDfEL@g!!9Da+r_^7qx4o|$nJ|Jiz3AbH(4$^5NY2&p{CZM;bVy0xtG527aYp^h5%-s;ce)jr{v?0TV1-0|46w0NmF}!xH_8 z)8C8pWpHR=@Jdr>}@UyU3I-ZAMP)Zzc z%om9bX>9~(Ns*SPF-M*p02&iMxq0M9Sb)|#&z~M~>ikCoEliB5Z9w^=dRj6U zev3UgFN~47R6cLqeR3IJsI5byQtB0aN{vY8aH}XMb?AL&ou=?he{ z&wqfy)l#5rH&_Fg<6S7;lxpD=ZOojn9f)|(<+qh3@B$TZIu%9Ya$5X~KLm57sqfYm z7l;9!O8}MswwVe%+O4k5A36=#1Z;#3a}6U z9RSbsxGI$^7EP8$t_I-j%Lp|>`hqcLn~ulUfK1<`I2(ex-yx^$MRLg5_Qrj1A6n@V zzQo_W8jtW4{&wOohQHB4kFjw==3YPhcoA9!oOT&Uw(1#XUkaS6*ixM_5@ zBNMr4kjLQ+ypX;NwzvD31-Ysy!&q*;Ox!PNEQ;|h0BfD=n|=oZMoaOFt!P$qDgHaW z$XFczGoAyMQ`#H2Y$>iLz*hHzu@MOVpO@m5tcEx6`xe?gB)n+5g%;W)2TC4qRQ7!f zZ5c_%Li<0cSYtsY5q4F>Z*y37!9i92HZU0dbEC9#e$nKTo$`87&P(B?J-4casy z9lKq?=#zugeq1KBE{i=f06HE)7$lZ~b^m|4Kz0geiT(>@u@hFK@{26FK=#^B#LE+Q zlLfe_UgZ}ykuyxMno0*-d}>Jn1_xbr>8r$9Byt676=#LaxB(v9UUW917ZC+G+3tgZ zbsE876kUs(;ot!HAP7zNhz;5Njwalvw+A)?A|nm2o?@I5gtt;Jd*;_DO4HzBp%&3C zQTR>)F%zw!w}XH+a=b(|&GoZlkgzHumL>0Q|Ew}(of}|tfe9@3I59={Pl0Rs9bzku zva}*UGa(<{>QNQhU=k|a0SBL_@(o7`%ROx;9R$VqSN939sC zJW?kSW&#ePMN{ayE1GxUSAdhytvbK=ik;$6gaW?_3Fj7#iwk1td7R>h|5Y~$oh~fb zzb329($<>dOc88`i$-ixJn`(R%x{YFF0rs( z`;6OJNbq4Nsl#VTKGC;>JNxySr1YLTVnGuO?YQhKx5rb8EfQSJupgiy6AoSMqCB`@ zi%vw-mvO2f8_Q7@D3P$XWB!D`;%5R};9F=Y7o2n?2lgD8Ds5)S z$Bz)-FCTx77a8(#J)Q&dk&wJhKK>{H=IaMz=MMbOO|I#?fy zNmTqjhR3z2&ya`DQZWNIHojdbj>lfx80`G9*iLT6I*-LFxIjrI>sXnU%z+6n995{F z&aXANR^H&WNO`zjw#1e4i_v0s$rbd-ESX4;v=YJdv`I=~yK(dazMwd85qxi*2i`jy z&2hxN5GHxGy)J*mFm*v%KYV63d$F3j_@ADhVrV^O-tkz z#WrY^_WBD{{>H!IUYJcQN`8v(DoN?lvK2BSwM`{RGv4dz{ecpQN8_FPS6f>0i{yKl z-shJ@lJAew`^*x|1O`0qr)bxg{5<*IMDOEEcAFFF$S7!;C9lvs?#f#ML~tB^1rGe5 ztWq|ufWI3WxPV@kF25UcgxE2805XMr4F?B^8oG+h5H&d@YDkvPFa*tF3@-?pR8vzb zjJaQMDf21L5|R6&QnG}kj4r-ylu)S^`q|aUP)7o0F$ow`CHp;{JmTh4@m4=X;WIdb zjRA{cH5bbZ%Q-sadqn3bu9T)Z^FvTIxtvH&}8m4(fI zB~AT1uDFcSz6z%!6ykk$RuZ%rPDgiiXgq}uc3t-=@us5aZUV9_HN3#f*4LKXmh&S;Qjk5Z%`6bbD1$SWiAc0$>D?&K0wJfH`Y#Q$W8d5#C>}>gZZX;) zgpO&r;yYn>_g6NK%gQI0y*LK_4!SH(DO!b|#?+dIwoT8GEVx`wUDQjvU6qxQ+HRHs ziAKuGVS5Q`y>;ymX!GoXzIL`6Z~5FDu{yA&Jq_1I(Kb<66@1XHNo2S51^iUNQBuZv z0p&aCA~}U$Du-PYath{?biz}{j&nuE)OEVB$NjN!zhg~tVPfhkNK9P?QWw5+(~Ac9 z{r>z`|B1NASLyd-r_fLv+QjKT763Y2XJ`|z^<(EHj%~_rK#|r!PQATs+p`2A_2TP0 ze98lN(uavCoX{OGmF`=vV?97Wf$u$M!*9s&?+X$X{ropjbo!^$$u|$=m2u9rm4P?r zf984ZHHZ{k<|qygl!ik&4>OQ499`zoh4Kp0S5!03G58AxC6GkBK2Q=;*tM!QYtdGq# zc-ImB7&fSVLLKH=uTvU+-s=?b(I7g*b5^w0Rp@otp_SV$`K|krxtWZtb>f_IadNrn zVjp7*M9Gmeb=HEAv6HqEA+;^`F#wf{Zfz`ZgP@^e1r*z9-0$PTEdq=1;jyfcvnszu zycvJj;%^-OoHFxB&lfN1=EJvB8xPkh3kuV+5inE0jsUd;WmMx(h4WPu3>UEdf|XVi z0+QShP?UfcD8OH4P?ZQ76*oMM{sf(s?fAr;@o30COK zSFj%f3)v+oc5L<4@8@0p8!VQ6(?bYZcJvm+PsemCRI>a_2we#Tn3FX>Eh>=g`L_8fls zol!A38Uc~^RgcqFS^u@jQ;VJ-dLean|oU7 z91Smkdq5zwxElV4DF2sVpCwUe9+G7x9htoRiYgV)jUGMK1P2Ob`HI6K1I@d_En1;dpsC{gejhi55R zCq9HN!SKTzhT-FfTOL3V{j?4ade(LMxHH2Mz8g`FgWkSE9VXoIc)^CpTs+7#vJWbz zIW`<`SeW6)eAZJy#BmNeBp$=xlYs zvlxPtj3fLqFvIb~uU>mYkQP&`xkDcvaRP$xAQ7OBE%$@*fu!TH00N2HHzaF!G|*84 z1A}{w$SV&4gD~luu{2Z%M}sl{AG&>@iaqn62@!&OzGKVKuo7ydG&T@2 z17-pCzY{ng!W7KOKa;ofW+O%WCCEaUhb(u)^(czZ*Ol`4r(WNQ&Fs$&|+eXu<^ss2(q927Wy#Gqf9nK zX&02xw#J3=tPRAF|5Qd~=Sg<~@LxVSbK*UovfCT&JXlLw_o zd<#cP2K%KG590oaC2{Ice1f1o>BN!^27w1Jim}j~=>iV82LT_XD6Z`gCl}YYi=47( ziP2RF;-bf_b-cw_&PI!kiJu=;HGK5BpNgGbK}>r%C$Z8b=M>V&@Jb4~jlPqVjSmjh zkVaeMHsjbJZUj1H);>d|V{b-&OXAu>es>}L7z@@4TjI846WuF{(q_%DwA4@Mmn46M z@9h}ZB$wwno;ai)x~z!)1#kHb3ygBJvMT+Ky$_`po(y0^oxZ^_7AFvJh{t_lO*(GD zv-}a~i!)}+&69Be5trw1Z{2=mlK6!Bg5~Hx<8H+rpr_!IJLwCSTv5Bx8^?u;{kJFL zW<`*mfPxTB0=t$|2pcitLTKaHQ5?2TDaFTA=%$fdR8L+Dn{XcU1^g;|(aE^UXy6V; zegz{w(u3=h3s2V571H>$B3e$jCnvz^(C@c1P&=Sd0?$Px*Mn?}2Xml}&AUSos?k#1 z>-gRK`fh?VPnKHVTX=*m{yD#|&#C$*->LfY?qpeLlziCso$LBg19CYR`9P>HRFb%V z((r*fOdq_o8aGPX%UO`LxPSY4FE7ftT> zH%-7uRNuO7dJazZ;zENS`KYeqTUq7qL$xN4;?03BTwI+e4MBI)g|$}2o2M3$;gWpe zC&MTym?!gNlSkvkEc{0Pr^Ob+xBo?H7r!ZZC{u*bJP!tTMXK_!`ygq6v?tGP=0=@tp?Zxq~xuw@9@Xhq5-!HZDix$WJ5W-7V`!vQ2alv==9u zg3&bkd=NH-wJ|>SAHVoE@`jlYfVW~*hAO%^{swv&FB2;(i>qCdwX#x6#jR7^<3An% zVe|BCTJxa=0XF}ixboJ`ya+%lS4CEK5ZCi>FmHUEc5)JHN|b9Odw=fFFz}?w7|K*q zqFf@HA?$qYubAiL!+Dn(;uED@_Sq*|U2`tT9n1x}16<%DF393s;2hwBT;c+-0A!xF zdDDz~y$ci7`l*Baeg=*Ue!K4<#5ldY@9Eky@l_n~@P+U>Rt8UT%<)7YY6)=wY62OD z(J3OtVj^5&P_2^XJeefcz}J@U`04i$>nl(YWa7k1oZCv0Nh9s&aPIe!iHyT!H@p`b zA1-8MH&7|CU|!9ib~b@Ooop0;W-$kU=CCw+PGbUpb+I@w(%0p&F8-X%7=KP-?fhB5 zPV?tfcAP(R*%AJn&YJmi2HS_HeAuI}^RVCWs8aSkf0ncD{5g+3$)C74fIk!_ zor3?tgUuA&$%BU}_!JKwp-lkIR$eOT{MHo;8qBVxx6Ar!x!isY*M&WvJ&~qjFO!0 zl$=D&R3j$Kosye~nP|l1xKmt-7^e}F>rTl_#Pl_BtX=qwXdWG(HVA1DEZ6?P~Yu?%~ zar*GEEBPHK?5X$zWYsm!%#L6uvCCsD6V@SwWkMkq-LOwBzZpbS^kQnFXFX=>T{tQ?xmsnp6+v%$<9%IXr9 zl%|;E{(rywoC6m`vwH9M`~3g^cVOLp&K}oVd+mAewNKi2xb42U3z8?SeoN5BcSAJa zgFpm2c5#4LBIhzlCi;kU+LmqpAuFUcd zDl;uwjp%XjCgRF&VeDjY6hFrPy~+NaDd@_i1Y51*Mi%U#+>6EqyTPzy9sAa?bd-JD zx%JZjq0)a?uxR-P9qq-Q**JXa;js@phdp60{foo{7O@;=K0cQ>#*YP%1ZaB*OA)o9 zGj;J`wV|uUlBR-w8F3Q<%VrDxGt6`JYC^yx#q{d$BhVL!#!LV zSGXdM?~&#wfc=1X0B->{0bT&C131E#oh}T!|1?Y|Oef4UFwej&g;@&oJk0Yj%V3tl zEQeWM{~pd;V#w|Fh`XVHXw* zA#t1PhqxDvsRZoYT@-Sq;_df}w{rbWVRU2lr$efW(+6cpRh&N;MWD4~%?Y)M)7&xD za{dYI0DIykRFjrD=;_|fcbYqwDcS(M0eH8CI!C?; zlAti{2zRq`otWK$w~68!{*;WCvnMzXYxhDGWnreRB-Vj@a7|bkb$VG_55cW2j#Zq& zz8Tr$?26Zt*WV^iYxq-g^V=kJ4S!1NzD-is@CQ?XtlF{Cv{;Q3PC}>s{F7Ly{|vT$ z!%y03LoZbq%tH5t+7fgmj=Y6Nks61~?U%iAzuV<{xZmxvr|lNUh`S1-KPeo17wl~V z9V3zoqYv&KoWve3Z8|&Z2ZEirA<9v|Ctf_%XW!^!^P4%MkAb0%_z8t!4ZUUfv68Qx zrsuIt;^jKe#W-5Y*-3G7^vQ8J{x;Fu0i|-dSqd82&`Wz0SnXDBRndYboO5+Q*c`$4xS%6BLtf(!cf8;(Rgc|4yR%I(Tzwp}6$oQB*mg4%Yr}S+ zvb|lmwRYPn-D8S+zNSkpmF!_4>lmOEM}A)Dg>6n)%3Q0E3HRofLJWU7Tpg3<32j+V zV9gB5RiOS=lX`|%p0V4hR+=B~zQ$=NZVXEEnYMv)y81Dcsh?4%RAItI5+|x$_0iTL zl{hc=7Ci2D9)wSgft+*#(rV@sdV16zFQ~7Pa%&cPQCjka_wgOO5$v*K_IJjm0`@ch zl_#lC+~P2?35~B9T_YJ2w&(FcqJ2OZvIB#Dr)~bUbr2g|@Nx>(rPAHa&c0*7KIG4| zm2gr!!c6(<$bBy|3fecPEvCa-Mj}7ww^e-)srVkNzK0p#Ye(S?m5T2)ixwlotc`)) z8vfuMv$oqEiy?#i)~8=urb#?rkJg9G<~Tvo*wuE|3_yVEyTga)fqJxF|bJ zZ{Q!A9!@Gp3PQz>R_lU_p*_b4RaBWwe#Gc+df`o1Wy0GiI7h{E3|~1u!Mf3S>FofCcCKI#FsJZebMK%vNf9bDK|z(mkMJ(hQgT9N?{Bn zb>eQ<&hMuy4P@rx4V~Ywv<;yth3+K>(OWdIa>w<3yKp0r%?~}|pEYC}=*V<{rj?R5 zj-La5F>Uqn((lm5Mh&kKR*#{!67JQbE(falE|?2>MJ5L#c8YRVPu+xa)y&!XLwO?{y0F@#hw#I9CZ{Wn;$|$U_eK_kOs9yiR^e`k?9T;Uj zqqc6=!*q;uRUQh~MEx#W>OJvxdLg4wrDET3NgxWSTLktipi(og6!D|LLjjjx;dJwV60`hRtMUZ4QM(G zdVY(hU|S#c8;IY&SfS)Z>PuKuhyJlv&Sx4%`J%&;nl$FOR+U zIXE-XWJyfV#iP$Jj{entS0Aj6@@PQGP}AExabu&OA_R*VMNBi`1CMCz=&}UuGu^u$ z5yNjm80@j_Y&v`*W7U%3KRj{NMk+)~ZowWk%@cNrxcH$`3l65!Y86GFN99;l#E4>X zZh$<|Lu)g>+HS-F2!NybirN_LjX59VC?HV|0oG~CHOcY1@a9lSJBlbR9y<#QC_8;O zlTD_j7d(LHHqtLl`COl^h?A@7m67fVKVQE}#4oFWjKs~fbR#}w0pph{_F_9?>W>wz z{_eKcrma1oV&)1sy^~r86f*9Gn@L|`5mVMZj+DyI`Qq(ha!Qcmq^Tg1>8MEEbv&)N zK?Oiep>lWTRq@#olmtG+5F|!*cN`Q%^^O!Z1^x;>-M^SqyiI&`-%LtT&_0yq1576{<3VNQ`H?vsdosA+2> zkK-O6Y53cLe{;9Z%+<8|<5LR#9EvQDJ#L#Bh4!0L=YC(i zK!ujQqsN6YW2TM9YFklJX$cBsQPB`Y8?aNI%ZzdCj2WYA`6xeWK{qVuxGDc(y%ecj z1sQu{it>9ga7|fj_3_wDk3q+CKPbWCM1Mr1i8gE|I255;7Hj2JWpq8Tqa+x(FeH`C z$jz*dWY0cE!N-_N@zlPa(u){bCaT77S8a%}rQ5eDKh`c#jL}yWK`01{UC!2nyeu)Riy#Q=+y%38(>m7!s%%={qI-L+!kcp-UT@@3 z&x+QlZCp34>nmV!&WtjoZ5-+esf;;NORT0tJuksY+r<6_qa{sF(i97Oou)?43(H(- zSyPpko1C9lI6LpgYst}T>Im`jq>hk};+!9vU1;!v29WM?&KTNZ6zhM=!ZQW+bkV|2 zeB4fR8oPfnQf#JHcyMtN?pVC5BH5Y<`xLGkVL}n6`bDu9LVYaQ7U`&s(J!{c<34B` zX3~7zyh;XQKQ(tQF9^g)W{HrvH}C`JL)##u*l#>g+8Wq{J7Hhd2OEQ(xv-_z+)tqd z!v;-i<%PA4dEpySF!2KF^{NUcHqb^LX0A!W#5(25bAh;~7eCXm*iu;VIKI)<3~-La zr`~HS#~MVQe$WmICU_>+P%x3`qF~}Ewt@f06ii^-Z-s&hb&kJq^AQrD>wDlC$VxR6 zuhdmXdUwFmP%=>nD;FgbTk=+87^f?la1^}-pVN2LF>T5B-U0hG@10K1NtzB0G%)#R zG3HIHJh^~5K2vtw?4A`So2Q*e^ ziQj{39i^$_->i57!g7x+i$R6(J1W6LAQq9kKq8>Ylia z&b2yyeI4Bs@4=7KJ;A=Ip?l(0;7Z*S+#s#%G`L#H#dUN~+}R3|8oDP~qmlMM);%$o z$yL!k(O=U&(d&kEPxK@yTGkhL#CsLx6Hh>0`M6@N={P@6XNZK(W%@(Bsz?PX9t z@hT9d@`*WAKG8`jpZErDx&i@>7g`(NcfCxR4G<6la4u%@^Ppm{%{M$57ti!pZ3e6L&=`p`ip?QKS-MHonHj)@h zvXoq{d4f?D{VB~8D!S`wo-jNt=bR_hSU@$!H8fAKBGDB76c(}J*0oMpb*&TQ(FCcM z;%(%JmI-?c=&u9hNEaGctrNZAe~I#NZLJdx;m6QA(UkH3HLVl3K*My;XVlix$;)%Rw$Vb-fR6IdjDxRR}*ye(1rQ(Sk9DuNIV_a7& zo?w8giYIU+4C^2@DV|V7U8Q*98*Her!Zo{6yP*_Mutsu@$Hf@-^?b!#XLZFBCau8s zxB#USNnoe0dITc{rGuolsh|k>)X>GQri$Xt6pjzEBHiyfi@0NhMWh1W1vGrtB3c5b z03L!{)dgQ_`t}UK?eiB8w%zA=r=2LpFneEiUB}LG58|YZr~mFQ0*ej>qNG?G&ct%L z1uFyCQi+M9c$}aschbYh#LJ_>d0b$nhDg>}iI=yD9ec`%KNEx4U@ zudR_b)Yfum3oImz4@fH}UntWdOx4goivj<*F4ylt0Mg7%D1zbI% zshWi9xnbQs?Wdq>GRArDO)kSoDw4!rM}0KRN$k&AS5mS5vBJ?OOPV>mR;JKfOH@PI zSf%sElD&S>LIP(7jFn-feE7*06^Dr%_HL%SX=U%+KYL?!L zZ=5*LHA_Q>#_lB+fB)S6Q19ymL1Uc%)B>Zhk8v(>iD*H!h%&Ab5tgT)R1rnHL=@r@ zQLkzdwYw^!3l`5j>qO)cW_{CY#qbcN^PDz;&&J_3lyFfp5&Dznmo5l|lIuA)Ik0Fj z;5?KcH_#PcHvkIQ+9~-yQQ%?%BgetMEP5MsswfgqC zmG@zLV_&$ou!YrJEC8z#TI%eIwJc~i={vTu?N-f`muX7_EPuJ)myL=1k`G9?X^U5k z^BwS0sq~yrwJ3{Uz^DC^+k$qO{hep-@iCTpOb_iE34X}y%+3&Z!V+x z2B{#~=020$a1bMp;gOgrA9WcHJe1iJvwknW6YtLN=TT}qY3^u+H9aU?t_gxO_tEoc z43@*8O}{kFt!iqff`0H+@`kFwc=`vcpX!Pp>Rmu#trTY1bKkfB6f{3uu$d#e)KRz( zi9*XuNIQ{-ag?jd6@8~SWAs+{q>aNGUDfJ!{}>*hsJFw`5t~}D*~j0f$Hy0cb{xT* zH_TGU?u$vV-{;sv)8kOdV7yO&4b`^7&!OT&Ump75(2;uY+0I`)=O~3QDBOgL@5S#t z4rMn8g1_0`*`^@)omFRe032=^<&TRM@#c*;pNmJ)?>Z_R?>i1VzF<0&cKK@hh;Xe9 zREOE;;DCE`GS1lv-N|v|Fvf&V6Wr)k3#WsyLB&hw&UNOoLXCN>UJx78R!(Ha;GT4> zeMuafcgIu~?#AU@mTy`x>=(d(oSMu!Skq+I91fcDZ^A``@1ku{i@|7ape>avuk(G1 ziZ)$lZ}=1bt~$-%f)~_pnfg7Ve$T7lW9oOK`aOtW=g>s_Ja#w3JdSTQnY9$3`ear& zyyk7&0T-n$^)0*@lUYC3#oEV(pexn`rmaoU7l%{f<}>Q|9re3`zYm?nZ%WW-ru=pA zkNr9xmkPJ7h8^_n;n%cu4y-ZN1f4O|Xu5Tmsp@3YX2zvWHU+v)Hqn}sO(V$Cvf8Hm z>LVWPimUgoHq}IOLDNbYg#{YD8Xq(cXq+Jjicexhh;*stv~sEmyNR@^rY&%-vzgwD zx8l`a#8=Pa=PTabil4;$LS>KQAc~hWg!(Klz-x*fQ$hg_sFe0JGKYv@3|g2{5eZbB z(z19IY@l`wubda!s;f9vPJQWlJ;@TqU5t3!Rf(65jJJV`S8<@&UB$?E*BJR-{JpnE zcv+-1)?PNvYO$9=&8fW%YEJjVNh687Zi=_zC&eC|ZfodqNw-EDTl_SvHHP>WKU(o_ zE?$Or)7IMdvfj34DfV3Vp0=AXSkeQ6N5wPfxvYogdb{Sjz6?0YT;MfAx$4SIG3eLk zm^kLo@2Q+H%M_qqFwN9PyvqWCyIFBXtmZIbCdSZa}&i?`vu(#=*|w|8t)Dd8|l zt?gtIWa)y6!K{gtV|;nxDkf^mzl6F1yEN+QlPt8fuO}wLv6&y3iCoqY^ia(PuBpVE zR((KeGxRlk{l*Fp4YylFgj59d-NwN44i+Cn#A-t71n{RK)Q5<-v$iS!JlYIc6ubc+ zrmYn89v31E{5Bs%a6|Cd;oUlDalt;AMFpGii?uBpP)mDJv6pboRykXhOyp+<+w`u zDE^tVP3wuUDE=PrEe6c&p}4$EL3_?Syw_YJ@umUwa{a) zs?;df#TS_~s=|RrRK|~*P?sW+M=T$KH;?0v&@x9{dGV+Cu-$}OX{s$=lS)QXGBju( z^n)uYb?jSsX)Wv)+)?zhrp#2WL#dh^%1k#P1@IM9N|k)aVKgW+rI0e9!$VhQx*IVr zhovJF%1j@`i=OFnGfR@1QeqfQJTT;>s1>OY@vh2DSFx~AndvtmM=3L9D5cDF6JBDl zt?!Si|WnHGq93kvolLg*RCuYE@>zCXen zw0`5aI3AvKxkM;a0lzEDwzY*8uSMezm70bsrKX|fkCZgk-N0Hyv8ihMb!%%)(@X}% zdXmeLQ@VCjyQ*LWr^YPK zYW36}5m?e+Reai{dZl}10WYaDLQP3|dF;gW`?&xW{7{*eihbKgM2Sq;0O}p8c7;Ze z0Bqid$a$u9DQSS)YCO{dO1yCEP~$Z7xRk;oX6;_Z1#-->?FhaDRD~I^jl3yTqPW4w z=3jEF)+nW!wN`0_bBUVSU}1*NZR#{VE;lm_CT#e->J$7HDd9m)NN>*j)YKAr!>Ofi zT26b~+B;M#CC$?UwYVL-M>soIkNs==wu1;MY||a9&fo>Nv?fAJFy5+E#6}IwnmRsa zsPo-lkZTyc7ckeL2-RP1rjtgDmYj13W@9|I(ZjfcFLO7Rbj2zcK4eKdtwd`SNtKHR zU5cPB`m_>1#JnClLDo(>L07RX9{w>Q%D8ow*|%+ASSmE-i_>Eae5_Y?MjseN{Q81nq$s9W0&+4)s;NOHM4Y-++lFH(1ut-PJ1HigD)TQToKvQ*T+sQ*YoX z3ZUDY7I6>YKEQ{7ci^UN1H@1@9r&5e*6%(%Su=j5uZN2mhi_ypT zvE6ES3g}FSx^!EkxU};n-f?NamUzUaUBC^{rx1DV!WLdVc8o8%+4*G#JM8G`3FkL> zwVSzXf;$&A1fspQbJ-uv8y{4k^F29nj-8ljaQv)r&^Gk(qNfY$9+2Ml{(;gOsH0+Q z8SsJCH`3}Ic?~S=K3*7ZmNapWuEb&@UZH?U>7_ET&}O9koFN*9&h{1F;jhZPOLJ#S z-H&^PALsfRkf=|u)|+u5%o|fqA38j})zz6DITh9n!FV=`_X?{UhC!Qtxv;)ZABxB( zdE0v7%E}Q~xmOoq;=9>Z_xeJQ*TmDf+Sizz3IvaFTbs3|id)+QsVkf<3hP5fwG&Pv zYq0hDDDd5lTZ!j;Bawznk%*of7(~~kq=RAg3qbv*4IveAh=H3bc<|v^T0Q4C4wf+7 zpUFXfB5EAitzg8^bHSV8rNvYf#LBDZHmZ~48RFN0E-toncq*G(Y72d-$^K7RUx>h^ zq~q-iu=%17Fy!&eaZu%k9r?=cmaAD&3-fd(9=vxMCqWB*k2-Ta|ai9 zMj2NZR^M_T!eIyfN!0#{MLvoSOaf__S34Rm+@)yRmD6;O1sA1x%RQD_b*W1b*Hj}= z$yYnSuLYernj{>+^&PmmL(i{06dc^Qjz))E^>p38!lJ}XY?6*l1e;@dgmHI@>FkbJ z6di1YK!99qqW(H}r?a;84*dX7iYeC(5aP=pGk*g4W8qH>f9~Q>R#9Odq90;Ah|Sw~ zICf$4gw<5yfq81Ux)nwG4uQUeuT9n#j$J*z-1&pM)w{4+QKV-S)V7`UuzD?S7Ba;4 z+xW4&9Y-#HY2WP|fD3C!Iu7F)AKctRqHMqIEMXYLp;vs;;N$sP!9`b z*E3lnaJa+~j=NUX<)wbkiOLQ-SeirJZ^j&yAH8aGbC@Ya4wl^P_$Xi>PM^4sEvW|$ z*zcJh*-;cG+>FW|YBH(Ow!|MjXv|>!{VLX-JC8dg}Sm@)!iHHL@zA&tBZ5-6y>1na|6}F3GENPxG&e?VlUy4#{ zE64nicUm3ioCToGQ5(rL3AhsD+=o$@I&9*MBC2e zjx9fDU91o3Gf*$$o*Y(qEHiPqff5x|&~a;W+JHFcPtiyh+v70@H9F{oH5NxM`p$M& z`svEnkfNYk)9`Dn>+Fr}S*vXJ*ygOEPEK48W$l5kKsV=28{kG=!OqUlu#Yo0UgFm7-l&)ori0o)#U|+?4TO&B#qMWo;t=kI& z9ZKCXkbgCRiiye(pDzw9E=HV6grRH7r(gWJ!r+-7mK@~dqUQbQzm=#dFi|dv(H*V#r@C2kP^6HMR%p# z`44;{>&AgP+&g!av<&wgT-X5U_w}-!Q?*90$vzzXPxHhmjNEXZf;9>aw_)@$GNw2H zZ-~|gPRw_|c%o>qJ5+xyEkKL|;DR{r#%oNPryj>DEe=irCNfp1+Vpv?uwmg$PqL@G z%IxAV-~#2AW5zg}BqI{w`}I%*UmSf1U_f=Oh{~D*jJ=G*Q&eT1Ml+lIOs{s2MKj;F&CD(4$Z{m$x zE1`hK`RX_5FNHgm(zL?SxXe#l$MG6n7U75C=GfQveZ;{_ctd#fd%kZ#=`FvR7VkkW z=6a)Iy7w)-sjI-^pi{R=3~Dv>C&t3Sj4|@DsdFpVGW2^fU*NKaP$%7{afX1YG=WI7 zoy7r}d3AF=gU)4pI(B2pX%DIqND-`8*pW~H#7{&d7gQ{oB=;aV_;ML3J zAl*P=6j12#rMhp?IT-2M`_!`4b9Pe5VDFc(evN4(Z~(88u9qo zQW|#%oASfJNG9_lI_cb^+6N*^O-j0E_to<3aI$iR$HkFow%FKXeV|EsLMps zmHlqye-r1{$wpP?yc4gu3lARZPrw3MA(j#*?v8itQT-ZI!A^my;gJ1Q?#>@-Ta$4M z@?)?-=Ooh$FdUtm%rR#COk(GzHedv-a^qo@n*giK6bpVbV(>HTF8nOWg2PnU+P<%VY##O z#Yj-OL%V}~je4)RgZ$Bxpb&D0JIEvWT6qV#ok?hSkh|-5kOzE#OUMhPaS3^+gNntd zxJriWw>z^5z!}3Ezl6L=9M6))I!_$0tU++&4$_^7MP$E{mOP(Tj=Igqfm?B5HL=|J z$^j$YzPOFN9&aPpmal6&cDKVUgQ&cY9OG%Muc|W(xQ>AJ$M7f6!_0C^b06b;EgZ;d znn$gz;0E>o=kiq4V2CG<2l{A=4;M~iC8JL8xh|0^{T^{x3az-ax+u8xzLE7SEKU8D%`##&N-#4?}-M{O%7jL`qwx{1oTpxftDi8H|uir^) z9jsqUneBe@3&+m!>~g8|VjeMR9@CH&mT4`1vp_bf=5Z~BZ?_?WR-8h+f}`r%{Q{M% zxLkzg(rvwc`1P^X!MEqdQ&>ZdyLd`p#>JAXhqj=5%H!~OILUTPA^ZP*{$Jog85Br) z)p8Slfc5|jU?d;~Fb}X2unF)!;3S|Na1-vNX%FZPhyY9iWC4Dv>n4r?*5Q34;4Q!> zfHQzA0N>gO2j~YF1F!-X12zJ701g6<0e%2n05pI`tM-6EK!3n+z@30;fLVY%z=MEw zfHwg90Y?Bo0LlP$>$r(FfKGsZfC#`?KsI10;3>dsfR6!R1Ihq50e>?f5HJuh9B>!F z3djen2D}2;5BLqhXDMi_{_Jdt1Ngxf@y$x;GkFiY)Mi^Myqx^hBC>C-{H}1&U*4Gh z$(?*f3nHTV!f|(r5Tz*4Lt2H1Dfr8Q)o3wFM2Ie;kIQ>^(OV1?;jp3ma1kj&#Rw6m zY=(#-qMw+7zkUeM7=%dD|2hjZ($fCS%8oX3^*`bfExIZDZpw~fV_?T8L^s1kGB8U< z{FCvUt=xu-OfjpP-3a)y!rt%|2lp)4xQ4_)PfP{mz@ASO-qVq?@ty(Sd_oX1TcpB` zI40tK3iXhJFUg2M8=+`tgi90|E;bsz0$d`F0(>G~7?>)27&mb+($>rjd@~)!sHJVB zYotkkOo#C#B0d|^Ptrrs53#NM9tCXaBge%q9_c3`hGZApQSjyZ9Sxi_T*Ab`z3Mm9 zHqsN26s7~!?J915Gd|+Zc!(>*^FTts88iCjDB(!L)7c!2$IO?xctmt`x1^+Qc)=5c z><$9#0&y`OK!%7;oGTCq%xn>nJXu5~W{9{%t1UYT z4tOH6Q`Ot3X}0Vf-7Y>kDI;0`7-iGmqBAp;Yn)9t6Riv@5Kh3qfIk600`6icO4Ue6 zPdG|k4{^KbigGp#e=5E7oQUk?WD${`6PIiqlbDWhcpvQY9+IA(IYoKKkDI%PXDzSV z-gWBM^Qqs!(fcw47{&Rx283+#S-kDk4H z-_fUUzo7mD1_oO~28D)&M+_bk88viR^zaceu_NO~jUE#}cHEugCrq4_a985wDM`sG zQ>Ue-O;4YZk(o6!JI899HG9t7yYHDde?hJY&CCv;lWL90&YY6W+@As2n*!O$hLj|O zvLuu+<_}9$1|%yLK9W&Gu$*Tre`ZBWeZlo=%GWTIr#Sq%`q5nDP%8}=gKKbsEFn}h zN)~-w9a4bby+t6n-9s?0F7OiqY_z(Ab%+^|iC@+n#4j2cL;@GHq9#e%r6`PND8JJ{ zNei(oBVWI)3lg{jpTlRi#dgpZ=2I zK1I2+Br{DjQez!shD!#1=K^=8O1CWhF-9#!DqJ#<4`xt9Dz#W=z?LAj#lrJK1!Br$S{QyYgXdbRpl<_$jI;8EAl%7VM%c^{E=Hz zL8}=lWFahDAI7T1o(@x^mbQ#nbD0632KI)$8tHVeNT+7GVk}kjn{gZb4h6oW@XdT7 z?==^V!{in5>-ry&i|TX)R?uPKWbmyf3X-bv`*!pxjPk|YPE@5rqlcxdrZ~(><|wxY zE|vLrySSqwJ_C;%%fH!3tL7B1&O_JqdjEy=Sdv&q|4MqjD$>h>Olo;Q3vp#5PWD04 z!L_SPj!_mXIi|_s?V@Kzd^gUo1Ypiy!yKe*MVTdsj4w)}k&Bh78Re_H=v$FqP5GUP zTxEV~H6P1!rm7uSOD3aEWG$7fVqhNd(dg)2O^%2SV`4p^)h(>2C^I$H^{(+$$`A3o zI-VKeGHW?fK27mIQPo{q9Web5BwV^y9WK0<&fNGtzboc%6fDf{IV5b zFWBI%Rx^_`MjmPL1iIwUjmraL)nt%z!SnH;u&v9&H{V%{vvp!ir*Vd@hgQ35VJKadyr4XAOce7Iba=un`_ZDd zNvwv+UdLFNoG2798^Tz9#v*XkM2v;mi1sl3U@R}ewY4xUFrj8i9Q?r|Zh?6hOe(AJ zg?TIOi!GuROmCQGn5&%@(HiE)?<|mG!~>I^ODoK~VUC4a4l@QOhiri`qgB~p`^Ykr zqG%oiJJPMy3ZWtZe`b^zN;V}}>sbxM8%Hpejj0zA@&h$`{*T*3?>P z#x-4Wb2fel!Z-7#Y6{^9r}f=hBj&mo&$-6dPtn{Fp;@xhA+vlsX4ulx@ruo_UYG#~ zzdgK!m%FcLczAd%KD`1F4?UXu#Eh-&E$#>mjE}+QJF}TtCcN*Ob{8HY=48#m;|(9U zSjyWQhByBB`QHZ|Fkki85%q@lceUHqHbamz*Za#CSN~P@zfe^ExrrP5bB$qJ-+IRCs(g|YVEr9Pd~Ha+2@{r;l-E!wejUwUfr~L%huOkf8))!w!OW5 z$Ie~5-+6b>-hJ=A|H1wbKRR&m(8q^A`Si2Tk9=|T%VS?1KXLNZ*WaA}_Pg($#Xpps z`SGW-r9c02?)ToHYbxdkVLv)2IeWz9wB#w)$c&WC>>0`-UJElU zF~=G*#hN-RIVLm9mZjp+zO`sXG-lxvrzQ`|oD+|E{5Un!SbdHWQ3224Cow0)CkjGt7xu@RS7qocRSq zy1MwuPEJfRr(|c&fNvFCv~A6GhY(;i1UwlF6Pve~D4wXy$-t|E)#jPDy6m!88jCoVjjnrsEjQmy7GnMuj!%oHO8`~4jEl8XYPd(LoX!<>w9LIzB2w5J^L z6Fw&kf~Vzz#%aViV@4u)4sJ7PklLXu@}>jda;7CuPK0H8YDO~hGaWO)HN-J{TBU-EDGeMz`dQSsjdkl{BlAEAyWz!DDK6X2y)<46EV4YFf$J zGg33aeqaNZLs+`Zv}J;E$X6Fpx)#!-T!L%iW~W-GG3#=yiP_N`WRGks(9_$S5H-Ytc&V(@##<>$v$Fm~OnUIq@BP%^Q!KnKtB&Ft9Cs=#j-Zd*p zRet7Pm{+(1Yqj^*j2!l$acV$(qMOEdKy!-41AM1a8_l51Q@BU)P>$|^t+x6Ys z2VCF1R_Chj`(5ap&;|E}0Qea6VONmigYmuO_NwmH>7N)>)!j9I#@h{R?R<>*s)v7d zkcG|_?nkPne>~Ju;r64;dv$-S!z=y0;PSqsT6`fW>s~sj^}szRoz|r_1L`@@e+WKfxoN!$%icBG{Dup zIv+oLxT<^ge2sdfs(W?%$F9G=d-tcSx>u(!Yg1MC>gjjhTh)DEH97cspXM&`biw-z z9&UV9&jRinIf=RgdvJ_rCG5gZ8DCY+|L)cK_wChb=H|NGeV-fp>!DizXc$_fc+t`` zE}0$Dm_+Necrg=SuDy8lG_{_+*dRhxzs?v0U8`o#o+)GeCw|?-9#hu*(RfGNP#-(YADJ>Y%ySW{&YS zG<@Xn@L^~@lhU!dAlxm^nvMTR;2k$)SbRuKq;fdmJ|sCYOKqnRAE%>nYJOkaX z(CkzzI_&9jXrMXt5`8^}B`3~GzREsTqaqu5FlufVxpQx|d=C+aRs2y*}Bg7r#;fU~PzSjjE*x8brq~s8z zRq?LpsPr6tU&~&;!?U*cWgox56zyvdzf^|$F+NRdH3>nkf$jhG&(U0@(K9?mODH~0ux3kL<&>mtC1}t(T(JVR}OZxa5?ef zDDkMtK{Tr51><4~M%imv%P5+oGAqifct$JNG0E9#yqhrvbqM4G67c|I8I?L^x=!~_ z7w+km1=u%N(LXl_8?#2GBApz?8N7-6_3}@PcoFO|EHg1_SnA|#Y{mlBA1j#}nXF~< zqbhE_@`6OX;PQ=31!v;jBGPR+(-_$xTS^Lg)I!`xZn@MZo{%FQv&`%WjFN5HC}zp3 zTqI#<(u}Oc?Boi*$1}7G|HdR{r*dc!FXA+pq!B4h4)Xz|QID842zuRG=|&k7!e5gX zz19M0|6e{kdPBtU(9~v}bvF3wri;O~S2vgM>aTPs{P+1U2X2%Dl&9g}S>AlP+4eAo z;rGn|LzXy3=es9>YxlJP^#L5Ca~`%ffb+1NtEEXhnw*fN8|RJfJ#X1F+e9l z;YvE_KMz2h7wYCBn54xHpnE=m_+ai@t;9c}f3JZ_eAfY(-ZKFD+X^5}9|7q8Ie_kd zU<&y|AYcBokMA`fEnV|9pZ_dg|5LGFd+|%d;M$8X|5F(L=hL~S2rf%zwP^05);jB+KB2v=S+AK3pFGJeP{OhxPnjFwf9KkxYt5ST zRlf_bXjT^8+DV1egGb0fYf8fc}6$Kns8` zpbi>KH=QzXd<#I?H^2+v1e^pM0qg_32G{_25ReDR0!#pm0t^F$0r~@a0y+cy0WAQH z0X_gvK>63Ws~T_wuph7kK>wRyZUC$V(-$4K8Wji`)o z!@QRLwcP)#es`%(Wh9X1LMps)K;+ zwg~uR$kiWD_&3A-(T*67G{6_eDtR1pErtn0J(|DTDozJ~qEYuInNhW%^Tu-|tL`y}#`!B+IFTTC;aTa0mJ$p94od=+9L4Ctk3UB5&(lCqye3@W8tp zK#9gROuEybYdFSJ6Xe2P<_R}|2cR~<1ZX8G=e__l;E&|IXV0EE?~D_qadG1AyYE)G z88W_n`Ev2xbI*xQn>HyK|Ln8R#JAsmTOsFJoNn2OI&|aK+LZKrvhI;vQnriS?Ps^A zOwSa#$fA_(P{OypBmt5zJ@=D?Hp(>^n-}1+c7dHwe#rHtnbE{U;w{|NjJahoNV$?1Cn#abn`c ziDE%ggqS*Ysz^&q6EkMa5ZT!{7mE60{`~o3jV)L_fA;|K>VhC)pBgTfP7f6iW`>Bz zvMu7xh5f{fd6DALg_FhBm04oX{X@mUwbMn%x25R3ON#D$qzHaTieB$a(f=bUCVVJG z=qFMPJt{@)2`O>_qraA7{P$8!IVr{DGg2&ExKI=p7K#-sR)~imepo#6$RpzM#~&A~ zSFaZ9*RNOkyK&=2v3c`mRhPZ>)?4E6?u}y6&r)nImEzrZ-xcq@_n!Fh!wtIjrVfQ18#)yps+V6g`CQp!~oe{jF+)uuAC`W$`xX>d>Q+P4jJ{SXpHb}V$i;3 z2{B-~5W_ZN{t@A)mZGhc4aE|Ke;naoLiimB|1rX!b_w4e;Vm&j+?j>5Ov{B>wo!;@ z5q?*x5Qh-{2*Mvn_-_!t7~#(%`~{cr-P&VMW(Z_`Jod$66>;M-jLDzHzJ}c>gdaB) z@@K4Lr(-V5O|X}S^PsspHhO3{gt=9`2Z*j>m8u|nQGQ^F!c&ij`v5OeqemkmA_OQj{F34DXHbajlSI`^!=sJyaRKYSoaSJ+79ap@TvOg@h@qVVyd*^Ka9p{oo1@ zA%mhKBg4X?LW6@t!VK@uBAbfBLBM6O3xTR5}W}3Ug(Z7uuNJdt~ zpU|Xnqeepqs0acSm960p{KFVNBns}08?_v&<2I}lQ9$^F;E?FyQBmPh3C$TnGry)y zZ}#!=X)%mA(wz!AqLE5M^C}(^$OgKHhDS$6MMZ~4x2oa+?j1U*_ymmUfV+V&|rvblo1^KBYz-ZmU;~vj7SKL4i18>RXD@lc!u~k>>C{d zK1RAYlmB7L2kh_Y5gLS|;_9s8NB%~IK@cOud-bd4>=HjRIx?hR)zBy(RiEf8k)wW< zJ95iRdBG>qx!3{7)8Oy)=W-E8b&xgnXk&6_tzArhjQngwm{*RET) zZk=dvZrb^l75(96Z92AV*P&gvhQ6lT>f^h4>$V*_z;8p}R^0-+1&9`H zI(6*UvTnDA@X(-s{aahKZr8C}y}BK5)h*2Cj-9%Bd;4@mnA>h@P`|lf(@x#$d3)Eb zQ>&KGZ6;H5Pp{^kTGsQfON(y4t(w$!tK9~EyLD?>rxxSC+0VTZzUsBDTc=I{#sRI{ z-Qv*#t_ac+-$*~8MdJ=_1G;q!=m7kYey4x{|A2tj0gApBc+7ZOw^pAb*93h5wc!zc zWd&|9YkFvJ_@RG<6RiYJ9%Fm~xC`JW%=rCVk2^x6$F8<XN|HN}G>aUkJ z@vR4F(yCRf)-VbFfcACj)WHY{$5a%j(1jK_N~~?eFgT9Sf6GJu)CXX6b3+e#>kFXx zo1c90$#}FoZ=OAS_Pd{c`ssVLJzxL$HL@xb#fm`A)H<7l~k z`*!*L_uosjrxNonoS>2?PMnY!e@nW928l8FS5Bw17_^@H_~VbC*tv6O?w~<~dLSO= z6V-e)1vCT@7v^hS9r#Wj(~VniaO_kx#au;?va+(@@Q#M_hVgF(ejh*??8!LpxZ{rY z#1D8W{NI27eTg|z3H;=1uf3-5#vGFT?z`{g!Gi}S<`k4ahCv^J_NNi%$(LV#dH&X| zTj!(O7jC!PM`UGXg)LjQEC&5*;&vM#plQ>lJutU%=k2%OPTu*2g@tuwym zPNFZfqHWu@y}-j|Km726#GGygpAQ^3AiwzH3xy~0N8!%AIeGG={PN2$)i-G}0DT_y z4w*au^Upt*LGCUiPUmmG{U(3;<(G4xe){R_-+c4U38Zz2VL;~tC~v)h!!m~bv-qPw zC6QJI5Pt*6R|A+Q1`vPpil*_-Z-PMwP2yt!aFzxj&!qu|onihJ{CDr(y%hP_1~QRP zT6XQ)rD&jhV7^H*4=~T9b;GeC}H*f4y+wFv<$c|BXBf|F_?MdxgKhe=qdmm!ZCt$PYyW>m23*`AT}2 z7sQ?K%>U!Zk1OCic}{*4U&;b$A>QOaW%Q{tQigpdrR8H>NrEZ(JFsTZV;^XEN6Jp1 zq5U=~+q@y=vSU~qC@+8fMv#Xeg+Jz<$&@Me_YDJINTNbDfmws zkO#d#kn(oWknuUzJ8lx)CORnZu6bg} z6;1M=?rawrmi3J5Gv+kPC~5dg%1F=<4jMN8=<4H|??1!k(Q6RX?9!!6675VCAPoi> zbkvk51}(01T)uo+9(sM1Tt6>LJ~}g4{xj2}5WDj`DMx=JW$Z~Qqe;UTdU=M-^f$^g z>m-zC)=BMA4p^SMK%Q8puV9_61{xIp$nT|?yJ&-YJ)g9&KBQ^TK$CJ$xvox!Azzer z%F>Dbo8&XI`^&Yq0rH8Qfr}73G;U=;gU9>m<~v?NBGR z1`VxV)9O}4v#=Ts3ja23+Emp4Xye(=UzHy$zibbT{9t+Dw^2@rKk7ZXDdG1Q=nlLXyB8G`f~zk7>hc76mI_@4Muq;4Murpoz#6V_>LPPZX*rgzZp99N1&d< z^HELsqrO-2kFvIm{UMe)gARih<^kIS*E}(3p-KE%Pi|fqB44^ENInM|)`NyMRt^80 zvr^tw0vepSiV8HaJhM)ULY-ukXVPGlXVPGlXVys_-&FWttd2j+8QT~1vnqfz7*L%K zqpY~n!FSTYXKQX>`O3V0@};|jj@F*U}&4=P1skAptaCjZMb8lxNmSEYBe* z3#^m+piW}@Y}82|w&Pj{4gc!(QZwR@{{7Nky?V7lA0?l3uwJA|nIRqQ^Ux$Mv}0Rq z^vmeR_LhAHK5yjpm0K3{l`n&a7eT`Y(D2qHnezNu2+s{X#h`Nr@}v*jXV75uF*>}h z1+LD2))$8S_v_cMJ@diH@OW_ci=jXYr;@7h0Re~2_v{&z1PD7S%z*FeLj`Je%1f#sPruspL) zdIa?nAM1YyI54f6Tt zpO@^H8errH&FhsD%*)DyPbA8n_B-TT3qb?Q!mFU+UwV0FowUX_P_D`zC|70$%Lg+o z^8WM?=>QG)f`&z)VLoW!Q@xKd31tJ%RrL??hb$=hhg|2AmV58LSHAGV3yL0t2AbER zgEUdL7}j~{RkqG%N!ROF%;b%80fE+EG9wG}SLh4Jq)l4_0<(AKd2`A{A|WN zNBg@1`xv4!GBVyLt}Kr%0}B=`P&By8S9Myd=Lx@AC$KF1(ewE`FIDt0Se}dY@?0(4 zb^AZWpLsuI$Png(eD>LARo{z!8q5#KS+izU&~QCEu9qjohjr2>)=7UQzaIXTj5waTSSm#T7&DIZnuurE{-E#y7h2G&*V z3$Z`S@c*iA+Gp23#v^)pUXHTBrzT_#JIqy>(AOV@Z-sxCE?s(K zYflEQQz$_{TIIu2Pdz0^j2I!Yw@4Nh6-lfq$p;^NP~pSzJ^4)<*cPyzpj;6+h9M2C zPbr6N3(2E*9AWa~XNdm=`Tn|Dm3<791@?b7IwzXw|Ka!xbAN?c3SCI~fvm5< zxW5X|0D}&ijE_K>GU8_4`r)d{@~r|3+Gnkg!S?z2`Jr;_15@RfA8e5q ze*N_@^81G8AF!8F=I7_1!yYBMXwjly@4WL)nVz1m_>OUa>02Y;zl~E)519j zw!@Tr_K{dtI3KYc<4M}FkHmI@wAAo`1(%L9zy9p}5931FU5z=)6ZhP6&lTc{eWMCk zrVSc8b?PLscTMF3+YHJ)`#uI8#FzL}=1C{V1~ge7SVmYLj69)98D!tYXnQ#J=J*-% z@~7rMS+*$ukfk-)FZKz`DOSYgym|9fK9C01tC(AsW5pC~`sQ!Z?gY5qpd?h|7PMlEq zAa5o57Ti^=$^-ISLf(`Nu#F<0>7T%F(!hF@JZ1g=$}6wPmtJ~FwSoWo*S}Oa&Jlo5 zPSkA^(MHY#?z>=jACTs{$BnMvG$X$3|FHf?d0fVCmN%Njh562U0dlJP5?Ciubt}rc zYTsDbP`)X1#GmDW<&t?qIbj}fK8x4x>>mojsAC8F##GQ0K`Q($FV_c16I)4^- z(x~t^`v2f}K4~!OMS~WD2AbqI>n60_YMelsVq5FVU*gJd;?KM>`Vd^#q1;oJ$a9t< z)EO&*$6vv{0)JQeXC2|1A2sC(>Eaywgb5QQ_T?)1HhAu8(jR4svQB%p0mR){AHf)D z)!)Ef;mn{EPNGpR|zwGz~gv8g$SkPg%dPED)GCv|~Q7?qoS- zp0O_CS_0RgNDKLnH2z9GQ;BiaH-*0;|L7~UC!Yw{%MGm96%n|A^E>6Gp-agBR`G#Pt+3?^FO44Z72ILtp6wnY>(J>lE)l#lK0F9 z_63Z5;5X}h*0rq1Fs4xJ8ld^#jXUX3^6x4e)#cpyHp;E5Nm=JN{V*>m^W-yWq^v`Z zuAq4*mKYDJ02kt@mPXg26-Usf}_}h=nL*uf2_Uv*|TV4sCJ^Lii z=agzD-qiQM&-BpabJIzbKThhXZMCfm>_tz}Rjk%5)j)GxRxsMSWY0w%`ovrK9MdKZSX+H1vVP z;J-Vd4f-2rr(%tR>tvh@wP601Yu;RI{p6gK2QVv#^GJMtg8yqhEm4QBMVe)-KUqg| zyhI!b#u|p+=f8q_^&INl!>BjkV8mQA<$5F6xwyWG`7EN*Er5)y6i`jCp!JA@1(`3{c^qRPR!kMy^m{Un@U|>YkcP- zma9Cd^f?}6AAvv|2&~@;k^y~=QH_7tatsOt((RH2d?{a4+Q7- zx#nxgBiDPm&e$L3r&VRL726byUlY;K9YZ_}T$umt0}~gvKW{!VL(OS(&6#uZM*75I z5^&(UC)dxFJOT%Asd6x{Fze{7=OfYa@pMyMM z-}oc53p%1t`*bAdP*YZ z6~?&Y!L%voH2HA7jcX)aFXTGamWQ+caLw?C-*8j=39NYn2kz%#nc$i&AA^4OD{!xF zMs99y8vCFG0}sxdkQaP7zs|KLu5oa!jO$EX-{3kK*O<7r!8J0jFU^~x!9N$JO5&j8 z5$mqT+Bf5KO`mlDfqff-D;~s!`M>kNV9E8aSAYZOG&wiUH5SSv*SWa9!nH=V#-*n} zKPiGqsWM^6;{fmhPeuN-Z-#Yr7nh<2qTcjsp{mIiaoNPe9toF4Cr=4r;~zC1sH1 zkbQod#DhS75Qqo)#C*8kb9mRk)S4;R>hggD*GsECSJi(^-{Ej1KJmm8W4JcN{y6a< z&pEEOI!G zZ2wsQQx?b%$|BPyE__%fe){?o`Qz80p-fbhN0bT5BcGZQHsqh8YsJ#G&JU%ryLca1) zmMl4q&Pk=LRbj)xfdhMBzIQI^z&d8;`Vdl)4itnrs*bXvoLk5@@ z>jk5%qMazmy3AC_at``P)HTLEPk%I~YDHdw_sek!&mOMvaE=}a{w4E*>uYG2RXXes zknc>Nz&;uKXoiWl>NoK79>nz|)+>HQ+8he}(WB&#Wsq^PZ%2M}E|)UMxpb~;uzV0t zWA2K1z zpw^gKE{Go=^1+znWq+A#D(ts|hR2cUjiycfRQiTIldlBgL121pkDwz#)eYRMO4=!N z%rEkqbhA#z+{@E{GHsPU(?MOM>i?SXF#5nab0BfvQOy;zU&uKp%H!WiTcuBWjrNza zM0yz~fps3s9LqN8q>OR@4)n@=m!U!Cu+{AV5zSogB-V?IMC1m*8X z%!d^s4$hza)rV(IeE%Y_eEm`Vc1^s>Tj9*ETg7?ZR(aqBzzra70O-#M(+WWd!LTzR z7w-g_SA!0gysOUbn#Hvq?A2o2H9nBX&?ldKaue2QE})M33Hw6+@$}PASE+Zf25=T} zWIp%YbIKlmJlC#W8;SYsw_kkmMU|gM8^(M_o&K3?Vq8zd{%6j!UPc@zA%Evt4mmca zyuO4nNF4fg+}9Y4vDIT32jbak#6iE5Y4+ia{)|zkSeGSW+{7^x=MX+dx27ldb>cDl z$AaqzOp9fW^%8;d%CLMAF+AZIc&pYWQ+E2#uQ0c;ZelqiuIxKdwhz9wPOiw*`i4{V z@f*jF9KUj`z_Cgo#!8O>FRrz6OitV>|4jGU1(B+ca}Hy$$AB~A;8>hvFV019+{bZe zAB;OWN6kJJ@n*fnhhrFyp5!B>V2{w{zUUvD5tI z!77co6H;!#xEANUWo~Y++9SesHRdJd#o)j4jGu!$H>!UBe2jhchs16s|IjX|dW&mv z+&{puhRnUZV4(crHEOn$Qw9%olnUybz_<%ab(`&`Tq)~Bwx@SSbB5tb(X8~IP(8U3ykXeXII z+arz>7&q%>wEelR;aN`;Z^lDjz+IImw%MFdVpxu|*>+V zEinAhKfy%5ZkWh4n{huYDobiya}&@=tiGsk%^hyE^H$o{Jm98%QP-L$G#c^CtTe6F z(tY9!e!O&_xRn=maBa~)F()T^#^m(5<~cLcGjayBv1MoU%b7AQc}8MRml>&3vNLls zQ>dLu>_J}6eKJXB4SixsYZ(sAu8Gkk*0_g5D>y_5u!$9P%JnFjRP2E)H0+DrTc}J zrK^AXqkp*6Lu`VVgc4lGcHyuonl`<# zx!=rxX^mW&2Qv$y*CI5qc%a!%7#?O?9`r$kJ`cGW)9xvTz6f{c6<$5~Co40a(Hs&*(QuH96Y7CU{c<+gz)rxQgd>k(S}W!IDT?rUV<~pS8e}v@>Tmk`o@2p z-6a3SSCf2o(J*wxv z54gu&rCaKDyUXsnATH3sy#Xu?qPqfy{^&#UC_PIr(VJOMwuZgKQvLP*D3K;>--ZYrQ=A`LulPt5^uC_JJKGfO0_5gVDmHp0s z58hn1ZCxi9=eoPT&U3y?bwk`JH{MNj#qL2@3f{fws@-c)-zsuPV>=8}(Gv6qYC!$G ziC&qvz~)M-{kM}dcULSD#nRCQ6O5&RGBI3)W@oe4(mQz=u~~Xenda1EA&deLGRKh^sQ!$ zxz8Lj=S@Gm*1m5CxI0{)yWh=sFSsRcliTl3xO483YZJ5&x&^6#=Yzq#;L{*-b7>H0 zXCE{Ty@j6eHh9T+Fdl;!;GK9EK8WjaBR+x8;Y+v;=}h8DnDiu=2-2SnC!@$XQc9jB zTggRo4ed)Y6?7P#NaxTttT!WU5-Vg|*$(y*JH!qHYkpu&>@thu9bu(i`7OK;+!)e4 zg%99Ecm^NE$MS4mz<rY^sg6> ziMiq>u|lj7JH;NcUz`w4;yT$?66xdsd8f>h_sOMlwLA!%I4A!hd#iWVXX=#lfrq1Y zkuK4X>T+GH-`7WgE7Z&~tIPrOrD-&st*{O_y3($*@7NgE&GmP8!Okys``j0eqBw6oo=d7oC(weibP;`pzD8^5K2U&OY!sW% zD%m>N(@xyyo&9)!l9(yhiml?3h?bpYcbOzlsLQIY?x;KKcs(0WDg-Aqrzj51?Q zHpJp9rpD|ryUc#`nQ3b~*>1Mhr3J4BJAyBQzeUzIE7V-$v<-?!nP>(YN(vxy_K}n1 z?<9sYnn|NrJim!2^Pzk^zZ=+*>JRnbZ01Ic7%hGfJET$LRFnG3opEi0uE8&Y5kU^% z_IU7o@aJG#uu%xi026$H+x>dYxov^hM(uH{5F0^zq23qOF>D?{dmz!_`-<+qDE{Hwc-PDR$LGv z87B+mboroMAZz3s@@@H{te3~-8F@iQsrKq;>IQYIN>WleHBgOES?W2p2ADq_lrYPz zH5*N>xnR23CAJl4Sgl(Z9E#vw6$+)nz)`jLFdjx8Av#v`nq7D=9!|2zbg}^U z?;I(mhiNoRVEtGgTgA??m-q^v0a@dIIYG@-i`2{ZXvEg`=32#}p6DL*4BCLIaC_E? zbzyPr26i*+&U!J;hOu#MHv0?P&%R^r_+6kJi}+^#4UhG21}=>CbNy<+#{a>O6Fo$_ zco_7eR&12p{AUqi4;Oc;7%`; zgq}b(sI|ApJB}$)WCZyQd5A=EhF`UbTv8m{!S&GfRoY>3EwugII>t?*WR z>)cjAd$;?mt9M_!WA3!O=voIIg4p23phwU*pn(a}g7jcykQLdFQ&$R)oOVsFELaxQ z1&t7d$oQ6d_Ia#21iL(5PdYDCdqtcN_~Wx}}@dez=`ufYr9Fiyc)I2)Hh-me19 zX}}@S?-Y_vCX-N1t57WPK7_!UNR)jgl2i(5WQt6agJinQkRu`MWXWuqEA!=KIaL

!TG+$y(27OsOl{GM!(U&}^$OrDabWs|%p zL#nlkRvlD~idFIIMio{)RHEvukfKT`qf%6w8l=)yh8n3dfw$QzSLLh8YN{$yGgOJ1 zrDm%#)d(>i4Z4!3({-lK(%EoLkq>I#V86DF_Lz-!9b61tO~kt!UD)+-iIBsEGcLuY xxj`=7Ww?76e)lXhEO_f&V)M{t5GqzHtBm literal 0 HcmV?d00001 diff --git a/pipenv/vendor/distlib/t64.exe b/pipenv/vendor/distlib/t64.exe new file mode 100644 index 0000000000000000000000000000000000000000..9da9b40de922fb203df6b9a1d0ad4139af536850 GIT binary patch literal 102400 zcmeEvi+>c=+5c`fOO{-i<+2dK$O?<1QH+h3#3i}|yE0og5*0L6Kr|S!pwciSs33`x z)NGF1(!RE}t*y4z)>^H#RSBrDA&?6f33v;j62;3|2Q{E3;X1$Xb7nV*Xy4EK2mJVG zX69U<^PJ~AxAUB{r8lm%IczptI{r6Jo2?N~`WFy?|Mx%L$R0Lf!!X;6LwBClXpihX zYtG_3mV1{~-FLyd@)%HZ5-8PM0*4k}Pmf=}#w{-!O z{(s$e+fo4F{>o-Mqd)Qg@Y0R8El|I=@Kp7-l`3);yoWyo5RILPV9-CW&9-oA)uLO} zTWq#RFGU90v=!n15Apw?e*uuoRI8Y+79X|(06YrMad-~;7qHplQ>+zX>^uXsX3&85)|hA+a$Y9Dcqt&YkdVsiLh-O2*2UjjND*sx~aq>z5*p0 z^m+MHvu!s1k%Tg_Akt#WLM7&jpFm>87@KW9&4=k(d%$Mf(Y#}a5}oIBDN)tuzCqCs zd71N^LiEFqDeQ3J{s?Q2#HOE+Hg<$rJAZ08b=#)Y#nn9KG=D(lUYGs$uoT=IHk-ov zC>$(4VRR@=^%W_sSz+_gzrMwLbF=8jP5tC5(N#Y0TzQT>SH51pL1Bl`Zy>@Fk(kpD zAOY(~)6sOSv>;UYQ6zd>0UwoRv&n2nT^xB{6pGv6pM&zt9*8uy(2fK10P|wlb6RF&}&Ar*C1;S zvv+_szTQLSU#CXzLvI)z#o5r2L`cTc6G?sy0e*sL;SW{zshlUwI$5wA=eyg`?^mcV@UcOoCf3Iqsw9Yvm z*_Cp!L(MGBKY+yLM+`?PJ1B7cCAeQCHqRFbvn^qEQ&E^L$Lsw{m>raFmKVtL<$3c{ zv^Dp7BCLW!VB~P@hN(3B3;C~yVx+MPvSwhSf#nP2^+d~Jln!DCc^j&CuMeK|iM=A1qmK3ONf8l+mcU%OU(8$PS~2>WjH zzZK1E;|hs(eG4%Om!WCCho+}4uC*9W(Wz*M+X!mVbXo5KWqe$jv=y{naPtBh>S^mJ zps;-kJ8F8aLHmk6$<~UMND^Qp4M70X7Gc{J_6}CW6n&A;izQiyvaA!mW}BWn@UHg_ zk%I5jVk)=RQW^RS&|1e7SR# z*iv6L7zuHBFp>omfOKLcp=AKEa8F^o>G=$GBe77IynZb;z&aIRqih4li;wMst(cyd zPzaMXyk?cJjEZR)f|nR+n6a2A<0$ZU`Ek>%Deblu3~nyfjOi&v3n+S`4+CxEIVfDX zhh}gYIM`yo`f6~e9@8{|tYwSQf$6zXRG1rAQ2ckaDr24lR^^Ukrn&wb6gEAVScTfC zkY*LyXBGMf75Z8fa$AMKo7#ApRp>3N&`(iFiRJtai5m!gt)XXac$%Ki_gPe49sw>D z!pO1^5ogdBP%vdhK=dMCfenAASfu+-VLL$gQX{5kHfjUhwdf?mTG8v7=qCv30g=w} zl>@E8XkMmbN6{b^1?4U@J<;TAxxNh}8=v|jS}!v_ldLuuS%B|6YMQ8p^ya-_=KVCd zlqMdG_6Fjy#MBk`86deHwXg>GDkWzl7esrQbS!QqX z+eXlQYhzJ&9Mz4@a+w}ZcocQC9ZKDPH7o3RPhqDLyQr<0$Bu$>Bk2*ubs-x8{81!!$@zY} z?KP4?h3#SXc93l~J@=x^%Hom{$YC8?777i;tD=9FroY0a&u%N@ zZ}+BVq`;byq%8apY(a_XmaINOEXm)wd`xw5jw)3bXR4Ws*`)Z}s=hEBU}-d45Wq1{ zI!zcR^GVbW)}JGITK@3}Yi4h+Pn@*bf@~Lr3?vN}t?7CHZ9%L;%Q)5p3|0tAxE+-- zTZ>keDKRiU5Y-}InM$TjkBy8A0=yi4ZfDD(KReZ8v`|n8-OHsMWTy*0Y2nJ3TN*U)TBRf8udTX z3Q0BM3AHZFPR7d8DvvymRjl;xj>Ke272hZEt(ff*N0XHj z79|F!pg=e_&y|50;S({L=%h6TM4v4EY76@2lhIuSh%SnND-}dyIRqzW0g4R!~k!!bU!a2eHBe1t{}|SeK>Yh6+EI*;b{lUHVlVQ!$2rF#-JFhPpO>KVA#ti5k3% zGJ8ZB^<8{mrq8p!Ugzm&01og&&`=p^3_1K6(MB%n8JkdYsy31TroUMr$YPEWDC%c; zVQQ`|j2jc6R)?^lE!}W8>VgdsPFtMx+DbeC5<`W80*%9B0KiA!I^ynO2EMvYVS6zS z&BOE2*DExFXf9uOCIoFW^qN74_{180K6=|fIy4*0#O&46W?wFBW{s% zQE-p8hR?qMRiZQ83R@eep+-k4tiDma&2lMz&oyV+ZR%)N-G-z(=SUd(nu`bWsU%p2 zGwlp8rQ#Vbb-co6xLLg@mU9TH+E5+_%S77%VTQX$w2-Ea(nXoHmKnKra8Y;KSJE2$ zU>LubKV!NA;ICl@xkVP1%(j%0a=VQH#q5pacjomI;4x5EBXpNFiGPUfJ?aOfD`CEO z^JAF92#CTPDy|r%U8&^vO6$(JqEbIDS7qYwjVsbC^=2<|7`;y<^@*goml%&??ooERE{=ON~y6lnAQN*Z@icvLrA%#X%>c>RdF_E^}8`d|ch0QjCxt(C% zrf!keZI;%x=eL1ysYxhvd@e19l)emUnzDmwIq65cR;;qr|GIWX3LOQxzi=)H1vBO4 zMj;dykf)8;T!c51-dYJ?Eazbqg3d?FVs8hfbQ5Pdzu1`%XN)?G7WPrp#;9|QfAIS- z&Q{@i;dl4HDy+pA!QVhBeq(3z)O0%x-&ytVQ&g z(P#rtHk|%uDblP|v~3`{9(8(9gxf9yU{Pn{N*YgG^Tol8Mb|sOEI^rL$QP&M`7a=GT~yL*(ei=DUL8}i^M#k61xpd zN@?nI>K{x9v-Hi%lJ@cl8)Xmc$4qFD`ms~0vlQ!iNNXDA!wj2oYCUYWYpIxvKU^e3}crk8L%*n%{=l1up zPd_?p^+>(R%{QK2?A7!wHc|Lf%=sb_v6 zH3e>MOUwru)x{sUo$Sl$T}tm^Mej?iDxU=g>Q-uwlDA>SMM@0L5`WW*Or^1#j6|sF zir&@D(`h*QU8L_7K$BF@ZOz!R0}Egze;Xu5q8SL5T5a#aUkirnHbCg7P*8L8=f;W00>E9VG-#&@PYB{62SyUfLFlpL2Th?(=h%{Bb(obc9KX@Vzawzim5Nn z$ydXkAkC>1Q?!#GC$*Si1{r3MVFnpyFm{`pTrluT2;UxgUjopI$s=Kjj}HC{S|NXj z*HXdwr_iygSHYtFK1L4C4Mu~7op=@_f`3x@CG|r^a6L{5M`v`0^$HH&7a(M8DI%T( zd)W)WPrr{3S_(P%*kL6aA^Ue*DLYz9O4t!4I>)z+=yhBv$i-7y*Q1@o7ejqX3W0Cn zh_Tq-N?MF5?-Ds(6!xmG8gorEGz{~sx0&JIuOt4pd(Kt#Q>N;M{Z+V!d|p19Pxd+$ z{L7$%eJg$yhPPFE{y^`{+-#7X!VGmtkj?fPL*Oxjl@kQG3t{C-wdKf8mXbFB4Qtc| zk%|?rP+gB&Ce@S4ANGF{l%{_ZVy{R1?b1V^0kQ~#VhqCyW9BdZ5Y3&t!X>ko2>AD6 zC7&yHWRvp)e-=g@-AaC2;!mO)@xsm8XuW(=#gxO)i zhmJ+q2Sh9jiCAtTVMw#1O;kvZWJcI#ID#*uzRwnYgN$taKO?bnFQmow0nCcEf;J=U z^-}2l3?9)P{Av(?Qr*6RTtTcA9<4Yv9w12zmI$*i!WKcD5z4plzrmJ)Dpm^l#x^VP z)$jn}&?0|p9Ay}T;)+BA7>DA$j#dN7Fmk!|p!t-|k<0G`fJti4z$?f&aR7i*nUu{D zi7kTD^elqEXJ&3ds{1Jl(2QM*38fi3PwzXWPF5=5-hvoA>V%7Q*ClqF{^0~=$)57C zV^kZ&;hm!)p@-vT5ne!;DD(^p^-M2XqfMq(#!)LCC<;*alhT?bb=Z*|?~#kD)sg&l zxP+3h-h@~EbrTErJc48%AYq-Qj2K1I!L^GQf&!uZuy=rcDGXzWN**B=)&g8jfgfW5 zOpYCuz@k+EfjFgg$dJ}GN$a*xB5jXM8=-rK3ip??4j4?JBW(;n34cs-Kn-{Wllujs z7Ru&Fq~=``W-fYOV7vvjyE0TfR?Q8OT@>`kp~ykSgySr&GK97c!PXlG{yAVc?F1T) z{L|~%zptJq>Y;_P+Af29RAZ?ftmJSsGb{zk^qB&>`>#G9;p7trU@kvzU`Xr0!-$@4 z1QL1XRrt8yMRIp_x?bUe*#_$zo>x2Hbsf!9U3}#pc3p1oW*3TUnfc3ItR*o4@5~QU zS%ZBc_GSJOch6>rI*r6Gpy^lX;zTGtL6@r<1Oz+D$gD2P)Y8nY3UoQYqv|l|WIWrN zfI4ie-LvvAuLWop;uAMw>GW*2OSR7yz)F{&UP>jOgqD>Me+%Cn@mI9(x0MS%+xOYab%?oxg14PQ+ zCf~$c*vbS)tbZH)M`+9bz7@lJX5_lmEGi5&f%T~lcE1lg+iL81{Sdv2p3~Jdvo*gr z7Q6&YX~08APCM`mVijI%BP1yg!3{vc?P(nf$r=5PQ@cObyS^;^Jc=@U zpB<*{GvIyfBt#E7nhN73$ZpCF)$YGHQ`Qfj2u5$pHO9$3&31JZ{<_p*vb5n~W>rS( z3?CU5ROzJ|RH;=+m*_PBzRwIuzuFoLfsYDP1#TbWq5(pPuve^o34l9*5Seunz(>D@ zl?ph%l33}^v9NLhtj|pSxLtMX$J4a;xErqhwb)Z5-Dwy9f)Le7Z)=fGO47XV>3fik zr(P$|wEhD254OXeae>MQ`yz#*8AyBeNgckA~`u4ZpGge_)mHM=MOEk&S`1s9&`qUxNYF)-I zg*o7&T$z>7ql( zY8=I1Lvu;v_1}19AQ(v2Fng1##xD|B8ZEktM9;Ax{3YlNPAQL&#IV7!7HJcTS7MhJ z6#w7F?tnh))pvou1iu@_FBc#PS>K8#vRaL+dgNSPy`0ZGk0xR}>*N2VNd&nLk z-*3&M6U`WK6wR7K6P3xmpOXx)m}1i_a&6X16K32Xx*fEJQF#!-j^h7D=ShOS60cAr z&ITL-b%=gTuFdG?w;oQe=u5!wtQGw!k_-?p5$fReA$Cj~OK&9Kx$~1WMnz7^pd?e6> zDVwO5EFEdLrNMqacM|*-@VD6bCU44v;mpJ5Vt5>^qH?n6sfj26j&0+Q^cUTSs{E;& zFlqc|w1jn$xgHg5JP$=okGTmbk2QL-#KBlzJ7_@iE*Orrfp z{yvg~aOd*?49GA8GuDp82y@;CcW)%>ydMvRdFJ84a<0W+zB@rUB}YD1FI8Ab4KK$Z zV+`xlSHaz=xsW^;DLeQQ{9S8JumZ#vV1h|E<1;ZDd{0T-gGE4g45l0VDfIvgNPiFD zo1U9D;e$EbfEjZ7mXp2%1!N)V%;kK2BaM`N!RWKhrpgL-1dC7~4FsV}A`1~2aC6ez z6bO}iG)nIy29c&JwvEY%@%?kGEl^|+~f*J zm*j?{%e@riGT1&0l(KtWrR*Xmr-#|SMWy+@rR;+U_AOcB=}M{Yb2JjJ_=ysZ&T2zw zOb6U!{>vo2hS;|Q_@CFB&1UpP{N2)nPopTCaf06U$MCM-pKgM5EL@dg z_Ey-@YIcz9k0S2@wPv-gV)>n2+7v}E*$k()O+6DMUXM4fVsxOxVPbn&VLMIFpOMUF zrOSl;*cS?N$ASdy2>01IN*97n3ozcvS|o<8e>63G<`L?H{u| z*Z3Hwh$GG-t!`+nL)g;RY{L;`3BsGg-uGX&e6C_*cjREY;yA!&z*6q6Dz>n;{Gw3Z z=c+U4&sQ&^7yptI>J;5?!`?4wab@f(`wMm%!%>33Y^kGFHak+FSG^=~kc!!H=p&xAK%?*!PK0oJ(5?!Z#Yk=ZFZtJ`pG z8j5MtU&pR(bDGRLtSwimwi&4~xoW=#{^8!%k6yRtUiHUv{w}F@H+E6g>bFnw1b(I8578mo|; zrro1|dP@J~guW|H+VDk&u?!7qx1td_%QPzV#2j_Do*1T1Lzm}K-(&QnL!`QKppo8I zTO4ceQuH#+D29hX%MYU#0Za!P*v^lAO0GFEo0$M(gmE=I#y=88up~s0cLs{MQDnH0k0Mu#BBbO{ zD=&&7QeCFi*)SAye+!ggaiLUqh-`bTceqn_KuyI;r2vg;Of$gY-A>L1Y-%hct!yPpjecj;Ao@=39~2bskIl04zfC4( z%p_-@zvPcauwG+_2>yy*u-&`h##Z!c-8QvSfmP8H$knyEy@6a?HRd)Fn|V_2bFXm6 zX1n=Hp8&O%7z4!KB~+1(Is#3%3Suw9@+Kh#e)X@Y4RYaiAnP4u8BXl18LQ0J#Jw2g z`f}9g51mPkv>}WQFno3niGgf5G}~2#iZm5$cmno<>1lxI!5p0l_j)T8VcAD{+DD?V zb@&eJzT-B-hX(#`<@Qls)AJSB7{Gaiim)?|lNv>;ab68xFe)-SpEI>0I5HQ2Sqj2b*0me zEleP`S3hb(&zTHu4;zllAi*L&74~A|BZP=y&<=@hEmLA4Hv;W2%@pPXHiw9sd&Nke zB1@R0J6IYyHuyeF0cpmUB?ExK9Rt+rLQ)kV*r5VYgd{#7+e=}*0wDN_Ak2S&(K@T;)E5b@WFENVG7yZaL_^isKYE!2FzYitt$3pj6w~t}UNJjvc$~BK<0cfO z?K~iiju;JIDcr$7=)z>h9`g})5TOS-v>dGYAUrPQ7ly85vCj=-Ag$$?V&{)x9YLA(bWq zU$57M?Ebd7Eaq9k;sX+~IQaqqB78N_i5L02kPH1L!U|*oEb=iUjNrLGV({><1Y-f0 zKU2*&<6%dK-giOOk(m6s-Zy;3kVaTOw&t3qASyPF#=CwLM(kDbX5~?t1)I{7df)G1 zYmz-P6bjiU#IX@0iMt5_@Vr1Tynj&sUsCy_M1FGLZ7;uJP?OUn6DOkmchr<^Mpc2rib_0|v4GzYMuT6A)NQ%6s9F2$NBMj5tgloWIH4 z7=P<>iC2T>EaMTbzWIvu{86g~1n)?}KIqlD*|j1U{rS-!HYh<;rstvm2s~;R=q3B1 z-`Z5@D7*OIF}aw)?Zj^W-Rgj?V>6zik0I6v5#6_JIVjKH@|!5JA7OPSHbNK1O57f# zy#sDcrg*6RN^G@Tj?Ef^ZRX}+bbfXuIzPSgtA8l5lHc{BbpFYP^a=G2Qm)L`wXc*| z4B-VfvDwp*UR-ad;3RV!G6W0|Aw`2cB9>i`MPGCPu-W&wqo5U~J2P zYuNcLoK7qWOllNR@d|15U7E3imR^)gUY0lO+4 zOPgp=iPbHGVkI{ed7+Vpr!y@>}E{hg}_7J=EP0 z%sgle!)Kc6t{&=@8-lKb^)-YnBcnw;ZL~WIJqH`uTAOht(dN~8z{BHmEa27q#;$Oo znb>r%kqu|r*cHRj3W)c#HlTh3(Qi~?11~1#2fX*T{TA<~2?X!W1@HADIvPCqAa<=V zI4jhi|2-36Lr^87O&)**Q%5R#V;sb5qdoOUA%$(m()Gl|_)Rc|*DCCBN~1;!6o1|< z@Dz1Ak@@&;GfNu{_T?gSfhmdXHpsrxcfm24b0RUH=tH6|bB+>Un9pt#Wzm$?!%Tt(yhWFaK=jQ7dA>W)G!E%Ywpch0o=0ScSl8_l; zwpD(Wz%DM5YR|@t?AQ@@9D)oe(2qK+5;Z1237x*yfvg&nA_t{fEPNOVyG+mDp10QD zCrF@m>19Z~FPXSiBwl?6DILUF_;1ewc|VXIU64(2gkI^V4~r5(-`Pd;d}pf{6qVc6 zDbmYD(#wk7e{_W>e;3-kEDr!xXQqc8Z-(=CXx{*^w4z+?4JCHfnd?uXfohj4?ARjm z*wyJZi6^0~lZle9j@J{2k%S464wu`w#&oa{V`Sq9dSo}IL*F)>jl=O3y(&8xt#lcV z=mHl)bKT%{>tRaSz~;nmRB_&>Ay8lI8o{z@N8zp02Nw4dPUqZ-KY6eAfbg3>On(gx z+h)gt=eZFGA{XVV7Zv@;uAVQwJY0HNw);C)j0H>hL7v>;kR5NzE<2@`I(GOw?g8$b z(u%Md5{8?j)bVaO|B&_y@K*CB4UHg$!S`i4Y@#o7)GTwCHtrQ`Xs=j9d&L^sD>Sr7 z{@Xw|7Pos=Y}I*RZ=hje3rVVsB8H_YEu8p@;hS9YjLk@oIu9b) zmd$WQo$rg6?5J}uy}X01pgFIYCh2neG3RP*7jBc*1!uV{o7thTSGY{%T;I2o=d8xM_{| z+n>4L{wx^$8I#N9Mj}U8x8!wDip#pyEQPsg{}IhWIb7K%Vloh%)t?miQcMpv18d6B zShrcd5D&Qf+ojsype+hF=?>ZY&x1MBEV`A#9aAMtGt4wP1Q78`MdZxZ;IZ_s2Dy!u|BC8zR!B6$$>m>B!L7` zJWgLvXduY<-hd>=r~rSC3dim*-)ND6b2Sw&p4qLAP?#ikC1|uxUPtBhH9$A^D`wN4 zGLuNCBWOOUWu`&|6SI&H1HiIVC(nXa!=2N*9KU)h>UJMvHB{q%QMsFe5d$ zUzZuiUlaL{#(@DNe?6(^ykQh6G%-eL@M@5r%df{;Lo-%^J4PZSrH{11@k9EE)$5KO zP&&=Y2Hb8K8`&@{=N>db27iv}$%j3y=PW`3nTt~rzX6k_a!{L6Td>$MgC67D+k|GL zHBSHr{T8>aRmP?tJcu>KC+V%FM#W=BOHD65P~mAqR3N5nX-&<55_*|VDc}HB;y=-$ zHHargcJ!4vvat`QA^Ov~h1Pmh7fDQvpT7W|f2~!c3U#eX#8?M>r^1#pJ!O9dpU!DO z3*;2|0T2LuVf|=0=V{;2Mj~Z0c)69R7P6e1sWaALdWMOz+p*L}LwD=OOq!k%l&0fc zPt<`xaPJRocLV@JT7vNxahf z;7cV^CDn!CgfEEcx&J9he&-+10fpx%VtOrinT#FAm(Z%{P0iSA#b}c9nJ~HGKqI8_T41rEXyeBmR2%WMM7xVebn6 z?+~0EavLR70;`9ZtMqcZ%Q&CS8U?G-D~oYbxEbum5bWb1Ove3KOwfE#QBB9+?{-R< zrC9SaaVFEkEv@sMTQCmO<9`yU0Di_o;9iLou}^hoG7-mk;hakGmsDN84md=P=jJ~P z9wPo)J1PFkZ^Mj}?+j;9hl!=JonLNH(susS`PAWdeoyjA_AkCg zkBRqS*X~e&qzk^oq_ATJkkbgZW*lWWQ1$s>?l4fCbXURE(8?IBPY4PTfRJYHPSu3F zZjc*fFhA5wHqw3`+Y7E8%lQx9`2$x9T5{3g|4a#&EC9eU&s4%!EXVu{DE0v$2VLEW z?>9>DeT>Ey%X7_*BfvTD^ph2_vqEw9r=PUG#y;3)WArz$-R8 z>fR*cW;riHEx>t|KhZ}wQCa~oF4Y2h%Ke$htrxj{f(|3BhoG23VG_d7)W3vU1C3Vk zpQ>8MZ$X@%Qkkd75Z>s+EIppZGo#-x2>;nls|( z>Z=j<6%8Bv58_$S-zVIyv?h$-VM<;BZ32^z;lbBo(IctRO8Pq`J&B~Xp}LP$$-<5s z@)w`l*{#m`a0S>gPAj7qTtx=oY6gG66Z^rB6Io&kmZf$*0)uuvXtMSsD#5CDkarx@ zW>1vki^4g`p_G^LtRza}9GT;f~%i`X3~Ll2O+KmHI8atqY@0@UYG z{sbtuu*YJdQtcPOLF{S={|qFq_km={K%<1cNC1K7=p|>O31rMeMvZv_5KFbhPH~ET zav!kv>L>@9Va_&qGZeN{W^eK)5Fte)N_C@95Dtf_R8U+vP#L}CT+kr)qfF2Be?%(0 zbDyH0^UwnMLntzJC$B~WSo)p;4>*yXmrvtBA{F@0RCGpI>6>=XOiK7O63|2znvmB6 zAb=MMy$ansoWE7s-KXUDAsCZij8*!5?S6zkH8|FtaUflU@a*I@(3=65j2Ql8%H389 z1fC~?a&n{FOB+Nx`PDaqF+$c1a@bqo#;DoT$FOp6qE+rTN=hErzU2>u#y;YF*q4Iv zo6X?q!{&NJmR#?uEG|@so14Wsk><3TV_yTmRUeaEDiElD(N~Bm=F)Y93b89gn>1_} zow_IVnVxCDxWXP5q(G+ri>;Q!j)=wDELnohlI6J8;9&n*JJK-)MtKVm02)!pSfA@G zeB|jRCk}u@U@)El4){{6IqOi+t+XJ?Tm+5goQ2HfSskGo<+zbxdd{QaVyqB_5JGs# zutYTlB3lwE$@_%Q8i)G(&-$*lCYgq{8jWly$9L?c5EnZ^db&Af=troS)H`zhij_;jBXy{fE6~W$Sd)mKlszIq-I&Ewg3%Mf5c@SYI^* zi%Fvj`sQ*RI_b5VfxcC>mE{DHfn8(OcdJx;F|7^SQ5CF|oNAE@?<+P)PaGvqFLiue zoGV3g{oAG3Lt+Fa=b*BHo@nV-0u*Ri%sgMZ&|9pYpatJ_ycPAlM=AcM<+2hhvjDEX z^}_0J0bP(e2;hwG0^80!zoAz2R+s;cRrSUYmiG|85g<^v|AAFueVWfy7t`aEdW+l> zvf0%e>EY6bg;}=G^khCJR!~T(_)q9t0fBx^Owp4ou*qj;4He18y*08?Cx@(1C}lY5qLBCcZ7N_mQ5*PQ4xC1^`ckR z5AVMcJ>QS>Fj@vR889;SK8NO{cliDyes^*cxDM?Qn43YsX3(E)o)&fZOfIk>wwHxLW1U4z`SDKgpDrswKx%?IhhP$ib9VA`Kkf(Nykchsy1i#2 z2}uvY2*w__0LYs~Or9MF5GQ2+2@RG1S0Mf?4rt{!#=Dza=RCn1o_0(q;A)p!iz$e@%{o{}qa)Z9HgCj2H|}1qJ0mXnaPFO`N_E z+q#H_-@)JbE-+>Nz@&W(n4*4QzLEXkXs-}3m<Pr!vurL16u>mMa-=6q@;9_AC9MSR0M_(pugS2nZR__<|lb%7f(?C|u5rs^VO z(9)ktrX=r9l5=U&_WB&t(zGjkZPnW|6Xj>KX3uB$0nu1u5(ksbQ{0#W`wJ^ zY7(1~TN}Su(z{OL&L<;7pOx#yxeD0HZi<&+Erh5dB?{YWKod^c8ze&z@B>dAoH2=u z!5aAOScX{hP73$-QVe?lKp-Z6J0FVf(eH=ox3_bAe87n_fOBzSjF2q?`-mRHVHGoW zwVl5U93ZY(M%1e{3%fiN&QgsoBNYdM+{c}Wj@PGk+0?~9WTU!zrXv((0eaolBEnvs zYbc*(cZn$~l=YK#Z3Mp*z{>tsb&}ZvH2RG3h(psxqX>5G_*g4bmVy*PGX*{(zi4CB zhiJAB&6>@LW%z4N60Vhi@AITx3ZAm&3Iu_hb0`{rA<#q7I=DEaSrkrWqFO)AWB!uk zCW40x`N@M|K0Sr|!|a5<#%0&eRw`O7p4g(qva4b0SCLaCmffaAq1w;?l@JDJWB(V2 z0rlg)RqaB0TN5T`4?qNHjsSpC(M)=)?%M(mY3v^?*Hbb4n|#13%&0Q}2Scb(K8{&o zC+B{bohA~D@Av4D#N^#^V#5?m)9fby-U zGW7486mk}baS9(B1_#^qi4O=9`@vfdkk9rTNIcS_V#;t{@)bdyePCcg0#|*%{0^?f z9cM|03Y|LYLPR6Ul~`$K5JxBJY~oRzzwO|)PSk~O(8KR~FSS*{kHbwuYxo|7=c`US zTma@A)PWGH- zmUgamDJ;}cpKBo8X>JFCl9oVb5}!;*u==p#`JL12=bddze`t=)I7N|BWtQMx-Y>XQ z_j>&oxe4K(2-A75eK^d4BYILT5eFT#@(`CA3)iz$w--qW%lw#Nr6TR(1!0$Qyo9m$ zSIQS-=n({+LbbYO9+p6^P$VN({4DqYg$m)S3^IRtJthXn*0iPk2ZXSqiBcUYl-!4} zXs+=9ACDI_l#7PYLXdfrjbS$eF$v#pjP#n8-~0oh-?v}1s}=GNyal6P z<6OkLPie^zrSkX&q*~`3jV%70_yD8DU077F4V3cjaHr(ypaNnAI7Dj!AV3k(PW*Ae z09HXUPL`b@9m5@cH_uiWYX?%MKd_BTO5B< zT#93st4DT7YBsi29XQskJQ{JdMGCj!4iHORytV;M5HL(Zurc63#%>-3X<5umSSzYXCsw#w}Br+SsUp>3|3JR@3h006xYH>0V5Ct7y&cIjz~$eo#~w2=Bh8g0>Z_=LGf4m zAp`}E@=;)*s}$EEVtYR~lHXA(O@HII2s>l48?co6-&j7nQm*;X)?FI=J+=T$xy3=L zS~spZ@X;+DE}f(^d-Q%+Pm^1!jiLgPfd0WLCF+RI%7uP`JRJPe z+tQ$6{2j4WHtj;*{2I|&9C0F@>M;J2^|cURS{Bsp=xu_-f?;gu=i(iTgwXP9V`v@% z30e$}0wS8rj!xJNpV@{BQtCc`U@UHZLiImhk5YZc4SKs55G1YyRnd{`N&2Z%2&-qW zBYq$LgY%DLY#$`;rFPg$*_(|Ftko_1F;3a#RmT+WVXwoJl*XGXFePW%=+OJbA;X7o@Qx6z($4eUl{ihoME#qFQD?#Yanf}aid-U z&rbZ`h5C4K{NIiL`?OBukz~A|)I3~~no$FPtSWfvq%TnkPfj1*^ruhj=&sK8*%EUw z8Q;-;{?D?ilh6-YmCH9n$xfbk68bg>6uU`a_epW}{%H~^Typ|FUwO~a))0ac%r!>F zj99u4aw;X(NQ!~4_lvE05L;mqTkvF)*rLC*VxyrB1A7EQg8dzBk_0=8GO29(Ao2Ea zGLyuc1n~xDG_Ug1|3SW4I!}LmVA2-CeWag%N5Wyx=X448K9V5eJ|ns$3HQ2qfrQ(N z7m{#g{HNGbPD;34x2GiB%E1zDS-*t478oaaw2YUbmXL4{qai*WU(wt|HN9U-Thy@} z9c>K=IkYz*f)Bz8#9%v;8uL&Gtvf=#d)4Ehk%;xEU zR7cL~r_-Rwws~=CH9^?c+w6F?*jNUCcp>d*DNKeAk7$ftd=fZjkj+GJ`VDSaJAE10 zLYwJI`SS$}fxT|SIr;~+o2kMP_@s!0rqRd8;^P!H#qrUJ4?LCFB@g!Ct)f$DI_~6~ zks4&3kj4{Am+UW(IL>(A$UxN7uyH$=^ zL&64QR@^Qj1)HJ-r)17{NiT~q(WqpMBHK*waw7nF=*(RGb{8E8wp2}FN?7VTWaS*6 zo8o=vimL?%U#45GR5am8ZEL6tGi^rae?${FmjB=lc)ZPM5g&>dORyxiVvnlO1d-N1 zi8sIVL2Q{z`lDj!3gW9T63e;$H@L>6$#m+U;OOl%kM1^Pf2S->?}Tt_#F@>Ab-~hj zexc|XyBjth6t9>nTcXPevMN;y_t2gMKR}9LENAVnsb1$SRx5@C5nm8Uec-YTxsmLT zo?roZYb_jSwuVT-Q2BCfi2e*8G@PH}Dc286)sb-tgzVTCj$LmL#TNDk>w^VDL$#l) zx26i9fnqeEUV~`O()!F)GU_PiW>o2;D#da&?Bc1ZOw_rY>g1u$*nv7$g`=oSbuLd$ zOC(nMF2ZYJnp`Ayqo4;pL{eO;tp|>kin;GX|E^z!cNFq>NDuu4uWaG(r|Vrb|3{Dn=57;cDa_13CBVR{L0jZ(4CUT z1K8wB`~^iYG652e3=AKvCHo@l-~t*+j`44p2xz03IG0!_-YSb%zpLJYaXt5lX$~Hg zqK>OxU7o2<2-Z(ZwcHRYuMb`{)bNM>?`v6gCi@ey1@K`03r3#8AZ^9{mxhD)do=-B~8*zrUL!OuUq}kXci>N8GhZ z7eHc<;$EHjK^K{XriYH(gGH}@9TEiKZUf=?o7wy>I3f=J(lmdVL?kX0Xbm|&imob9 z5`RURx;-0cajH4Eo_h5EN{|Z$LEbh%km9ydy$>`w6^WIye~XI12M2s3X+(Jm(v3lv z5MS|AM4011m}!;8Zfz*C(-Y63TcXYP@JwMVNt>M(Z35%)b8JmZ5@%+uAjx5-=g|l0 zO{)7f3V=W*p-6*ekOGxZH>k z6}$WEYtG8(NaHe0mD0MIWC0kUgTH?RXp0bt-Wz@aU4VKTZDgm??x8gFOGp7-FO(i6 zMcSB{>WTP6KV4_T$ioG&778uW#sm@>l={C>U18V)+n&Sp9zO*)-n)n#`;qC1)sVo& z^|XA=j+7+gkBC=rYFk3aeuLh3rI0<&y#as!Ld}#LK*l?unnoti2nB~Da4{65(%byme2f?;!|0l( zwMWQ~=ux*S{^QHDxkmLT5=J(Pb6Vd~c#?j|RX(@4N68D%H!lKiK_Bm3FP4^we&vV$ ziFPfsR=_us@3T^bX}d!BHcaQWdxS|VOyLT6eD`#bfb7GL-wB(RFSIkh0NXv`X!;54 zplJw-nc>A9pr9s4Bdzi13?B?$V=T4T0O)_n)Gs$;gUACbNx$NQKptY)M2EVV0YG;v(tUg zsXZSV=STxI0}J7y46;dK#@P`#FV*a@zEX5$A8uR$pz8KKaIxG_iUon)k+;m(`K_1{ zh-UptQyZcC&(nrmIWJrZNe>5V&zjwIs@y@`C)wkUG~P;(KDx`U=2<5YN#8 zv}+T{5YtgFNZ*cG%Hg1dqO zP$%Vw;cKXS3Y0+}lgD9r!oP818_HmJV+s^-byB1vj)J);7{+M`y$gPiqt4PL@$ynf zZ96Gm2HjwWBHu9hka_~RmFrxpeJ0f&EAIB%Oc#2I-DSGe>yaj*u1&|yT<7zP2f%K^ z)(B3meFtnA_pCVpHik?0Of07L*&1p9FFX2zQHduT1^lkwcr%UDL`}j0j5`w}9(C?24!1%CQe51NXOYBRzq(6h^fn>ygYZ zWHlX2M-L0xHacW~4FEA=9Nz~Oot_hu&kvI6JCe_a;5XdkblD@74kg4@*?q@Of64ohkcqk(2P^!v>_DP{NH8QZ0fY zU>xmFb)ZPWyaiTsi4W1?X)O)ZRL{Y+WLNh=Nsdz(j8e*)dC47Ot}%$QXpaJyY`Aj1 z;i^7-DfCqzLgD%GL=j*_+Db~3tFaLEodL?g26CM#Mb89&ksSo)ZHajT*|$j#J5WjQ zyZwPZdfy$Eim&(GC5bcYdf(mFSp>arnOdRuEthIDu>AEt&GLcheJiE9W;E1z4#8Ar zw!s}s*OkH-3ucRp5VtG4j~ZSef)FrwyTGTBX|@G%t7!&&Xef0ge>+yZlDHr3Y$8W2 zAy;Yyx!6FZi)3+K4J@#G=cQkR6Od&Ea_pKT^+De?#%x(cyHQ>slx0BoXIIc9@UO2; zA`GN}6)g8`_2TB>=w(J)^s*EN6Lgh6AxIWGRBVnf(rBv&1=MjZgXklB`Os2}e2l4L zYN-|6pS3lhW|T^suIe+!V5;zNi?ku76?jh%v8&+QN0}U~%6nb7u$6}6qE<6St5uE= zi`SX=W^5Akn1!p*dSySt3}zpPTVN60uN(UV|Kpo933QPu#A}ASw~y6(>QXrfMis)b_yHHLddryoy~s{e7^< zXy{+GzTmp)WW9!qs!};DJ?z+3fpZGvNgGcn6^OPBHJTphN>m4L$F=>$>zShyzl1^`bL+1jrz(ol^!Qwf-c~aet7@C;Bi)gl7%QJ}NE-?+xkuN3=2#FZOk(#E8B3^@ILnsmy*Gr<=Wx@PKXf*#*6v@^`i2DsOMuZc-;N>o>ui`W@ zvC^ll(@5qny7QZ#T9jN3+J$WH({KP_vkZMha^UnWBJFU_S=0j-y!?e;Ax446XN@T` z>kVR{0lK^>39SVvvwlj3>sDz(V_nb6vj|{y{6NdrXh#sp7NXINAQlOMO#Hq-iLOXW zVMIY8%CHOG9HzyiT~0{X7i{gW#96xJf>&r3?lL@SVUmj~UC} zL?Sx2O6d@3cZ&kY2&!P>)>Jd&Ws6OMe#&7PN5hn%@5GL#A%V!s5osY>38EEzCeF1g zdQ}-vOj0kx-+Xl<{$7CH(CQNfQXO5Dh!$LlQ!o~(sl+$di#l3va<|ph$Z(@khiw48%vIf(NX-N4E2S z!qG@uR=9-mH=^=Tup_9B#ahJQGuB9A5rc#AHFIIfvf~ z&@AgCP_)#iNVRhSC&dT^lD0=;1`^<%64|i*Ao&7Gwk9k#+JeE>I3x-1RdzGQ7~p=7 zcEk(>i*v?fyc!*&0zMr5J@K^&1Q7T@mEBGhkO?>-T80b3O zdErjn35CY`9kX$Fqf`Y>`>QLlaU-s*YLch73CO-h@#HI!Gz4A;nMss_$|Y5aA%;v zJ&>tp(>+V5$q)Pvw7%7No5u0qVq`FukD5rbek3#a7WQj-!kh#l0>veCt1M|5dkIak zc{5VK2X!zu(=L9#0ihl|NV|Oq?TI??YbElIs|fMW`x#n=9z99Djv9Cr4bXL5v1N93 zs#5WVz=eXCu?(sl&7fLhgOI&NLe=xWh;O=vBBBxPro_&eL2YR-qTx%>Fg0n%=pxa{ zXX6u>dA2(5fE&~fxGl^C2s1#>pc`;0#$_PxpMI^#%d6K>&kKOz{Q7ZK({NA4F1}+fu*(k6wjFK?s$#_27=u`cXDso9v42Y^iyDRfSN zc$Eb{ts&v^OkO(fMMxW6(2>ihQh9LvA$TU;oHYO)9RD_d9))HtpHCrsIBO)Fo0A)m zq<;#IZfC z^XHI30Mzvt;3Ft0wSf%u)2-n6oeJR?qhL|^cGQV@aglEdO_8|FF<)Pqo32e}0dI*HS0SmMlC$VBtYjiRGD>WL-y%Sg`zPU; z6B@Hzo>m(;g9%QH-|>TwHJSB2GRKR6KAa}HaSQ3Bh@0#LfnvuD%L2bd^aM}~cV2NZ z&UM5djMEqHYNW#whf?3T**qQj_yGxuu99l6Ma>A4T`<|C+7LbvdE(S^BGIr5*V3F2 zAwWp7YvMe|*`l9+JWVY}|3eS`-gK8#*MqkW@IIh-{Rw*Ln-RzqN$XQ;j>Yl4>eHIy zpWz39*(?9TmnOC9^wjZxf?QZN#cPq7O0J<~ltwa~-}rS(t8^_=jQeVe5mnirR!3=- zHN^-L>Q8%+(ypy3hGx;9_ESo`qNezlgVGu(?Yx@em(m8xp&gqsx~BL=q(Np95K?e+ zR6xuAhhP?#Jg%dRijlyB21Mb4B%~-&ftSRapvlzgIBl7K&wLi>CC_!DUCqgRpdEqx zUHB^IAFk^C+IV8rFCueVKbwB$OdvNl&tY$H;KHO&2Q%r%ccqy0)+Q7J--@HP4_?7W zqTi+>RAM_{irUqKSTtr6Gq#g2qN)~)UM^hcf<U!Mju9_)ZRku zM=JW{Rrth|pMx`xEr>~=RxJbs2C-;Mf(ZdSz@qtwpn2lXBLFfVZZ+q`yDec8dI&tO z@P>W&HwP(h8pvG;pDT_F$og$W#UEnj%B^(x62sr4W<;XcWZU8@{Jh#fQ5y~=_w&HZ z6QxJuFsvCjie)o|a=6W|!BL-~n61*wg_QF;zvCtvIF$U;3LrO7+oO(0$oO-C>Qi2I z2>uS${zQ!oZJ7yoIJq_odw?dX_8_3B_QRi3?H{Tk5V6@fEYjgy5+O^tnRc3wz?01( z$@7Yk>SnXqRPCRqqK%2#rM26%JZxFu$FgQ%w|XL)0o%@^Ly+)^J`!i$yA#H61Z$!H zAuSp6Fa`n{+Ll4&q0Yt~_^xSQb>_68>I@*{GHGKQe;7N%1bdws%^b8=-{3fimZIqPM)KP+EzW4B12@!0&<)IXYl4h?|au zivU)RIEgOQEdJU_v|2$zmRz6oZ-U}KynLQg0Y4soZOdGK4gAH2760xk=n>oad)UoN ziu|HTEWLZ6T_MhszJ%M+D*dc#0HDmmo}G*-kK?CR{dhMV{elI;oh%5t>iNIwW&zXl zlS-kQS=~ytf5&(+$xwyh!y~m`C`CQ}Z-+>!DZ++0JqwiG_;E>$IEcGHaRL@e__-q} zJviToW}%s2BCnwZ?hqsECZ-^Z7DRQb2Gc<3rc3*E(=V}>pnqo3A*eNLY4hM~p}%ZG zI{iu*GN8@8fG=y2gR}GpVZlGL7=xx?UK6~IrrC=#0{iPw7w5C_`2O?R-_|9PVXCpb z|5){&{Xf>TevttBlJQsc2VPTPQ#SqJ&j&8}Z9cxSwUMp#f=@f&L^kfEC(Fz9Ot_84 z1zpDUWaAZ)R^h_^re`s{QK|Psc&E}usf0I>K(<;@1WF=VGWds-Nu5e#VY0i3|3E|v zVxICApto@E8+e;XUJCgenIYmI?3DG^CG0NMK_`wYlxc|iER5F_i+k6z zH(2+sUz3Z~kNDw8g*;8zT6-dnCRj~@QSkJZ55Wa{wp?7U3;pk!Y}~j~l?egw*o+%h zsw5eA1L{pvLjLVlA5ssvB`4K8=oGpPeq|9Ztq*@f6W`8%jDdk@CYI(SG`fRQ>XAZp zB(Z?6iBIV5(7WtH9Zcymg}+{4!W~sCK97imvXH*SU?1VDz@WU<3F=X(#_-IT5E+cWwrp`8Kpw z*R8pGTWo<0u#w_Wr|Jl0p2UyDi=(onB**1R?YPhnyjZ;nVO}!+FXE3rOI!MSmdWqJ zTWTBm*gv350wa(4i0i;LSB=MD*IbZw8)E}KEiToCE^X-Ya~X@?7BJu|?`EY7agA?W zeog4<3AcKIjz5#=&sh2+=|=-vZOGq^6BLhZhEJC5#!Y|hEh7Vd3nLA`S<|u}ZQ7!9 zw@hjR$Pi=dRWg3|rMQH4!_@OiZ15PWXO`H~@i@Q4=EP%4iERiT!(j5~C488g5c}8| zDZh$58=S$fq9~YBh<&iW8gJ3k9dcc_I=aExupYr?9TP)~pRSI{HBzt*Sr}PB9W8B0 zjDzFq3%9;49iSkcMkQ_dBzyS~IV)KXq7UW98=0u%Rx|K^1`6_Jv(SfwTuW#1i-AoG z$C-F*wb3kd^BJJnmLOXZF4jPhg>ayxw5(O=3@ij=e3h>Nw1X=+N;&S0CYAA7%FqZ-1D=(mEi zcG3cE8WbZAd!vPXUZ+gYRq&a>rn?_w;Fr=siW0vEeuZ)MB6iUJOEcyI zY6e!?56}ii-(`Bz$s4m@`#V2?9pYA74hCmDCi}N)L*(jR0b?i~mrcj3OJQ@nSQcMA z6Wt1cZ|v_W85)Jn+E8X6K}|o96Rcl_BeoS_+1~?9QMKIA>Qm}N9a0^s;-OmjR-Xc? z$+h388p{gdaPc;iGXoPDYNJ^l%NGE^nj$K#mI9EOhY=}50rD&huHZK<7%HQy-{d=C z^+Sq`snl1$IZksU7_bfH{~vqr0v}~@HU3X-B!q-bfJhLOMM0w6!bOcnBuh59!9>DM zP*D-+wGpMMvgoHO^CGiPpRX3m_+h8GcecM!V_)z(NN7mJ5XP@M&N*Y|O0 z(OXppbor+*-k){gOrC`8@o7tVk5(%EY1ln;yv#JcXS1XNoPjA^H-`+sAWV@;FSjC< z?=<(?tl2Gnw~9`7n`2@*^*OsGQo0+{lG%Wxy}3}H?)EQls9V0|t>`}2Bt}IarBK$Qmo?oSwW9kR zPT{s5JW{Y~`R-5t@|5z$qoMei6kF?3~Q^TwBIddhb$7?zpKJy^oaM@xx;}x3CX|dV91k zl%w`DW2tL>uUHC0UED0%Lm-Ca+Vxi{n!iVO;SR!b!y;;T*03Gc_Q9HGSmesYE21eY z{lk?9ThI%;yus%A!XEB*#T+^k9(PqAT4_!!@Q#>iyJ^+#n4%aSGvQeOaR(awaXbAq z?!>f}+pL6!rsx=VVp>Mp@GEh1}+&*n&%h-7HiB8!m{ zO{^|TjyXm}_Hx5Ol}IIY$)43mHNHd<07o_aaW8o4B4as!F@pW5_Vt!NC1@?YM;2Wo z&zAXFhMp(MTy7aMT=rM=)6~h<^m{`rV!cHO|Mq4P*4H>>b&Kdgym?>a)sdy>h;tp; zwvq~#fmSg$!R0A^P0B6uPPhMA{dV)Kdd6iPJVIm#a2QA6RE9Eq5AqbU*QOtVL;O^` zi*&fBLl5R*-$D1keuvdef#oE0C7eb!ntvcu5vEfj9?j76Z0@(Y{M+>VhP-Q#!}Nl& z1msa$EUfdl^%}fR=K2Qq|3hdadA8rJIuUg z$}M$U15lp4XUZbrYCfNbSql^4sLBnbHCEm-jqzWdKQ_SnG>el!Gs*XgbKL%yvi;j! zfoC()$B$7AN<+(9AY1XF@Wf?{*N;Mr<|gRX@kLe^QSu@yJa}G0HZS4w zxJlQqu`U+P4^nnn;|ah0jF@&s*e|lWZ<6wh*v;EBSpMw^%z$5)HR+;`xe0Bzpx1~T z>4yR@@@G}Z=`X#QU}J`L5~~ndZ%J7+o3_Nu3`mNnivuYGNmmk{t)}`*+h1mVcQqO5 z_*CnQSjbLuZj3zDnzu^w33K{kPCv{k&$P}p`&ps;$01z;6{3D(zqHRhPF%91XvNeJ zTkYU2n(Hklk*h2GFGvgN52A18uUe?`-6oJ;)kr577Q&Vi>eB)*fB=t!> z-VTs@&yaf8Q*)_o#I@2F(QesP)-{|(k4g>Cwre=A!xmwMY;-K8yUmySJ@MCm|CJ}& zc_7=d`c9fRLg$#ug2UUQz30hw(;n+0IrXqxE4M$qE!^uU@ncbAh1*({4{wX~#v7|h zt!Ft>u19Wbj`B@4NCff}+~v}ieMORYgqxzJ`{&g7c6~pRG;{)TWAZbx(dK zj`6KWJ}w7wdDMsql_NPa){*Rv&G4++R*ji#uw-r)A6qgo=lJ7HdO42m-T9`*@Q7Dz zf+UuVF*`x*lEuy`b}3{lL+qQx-V%G0F)qPi+bXtgzTai9vLpp|Um{as zs0*o1V`BC1hJ-*Ccia#IBKi=^B#in@UCIn4Fmk zior7R+v?|Wna0Stc!;T_-oKJ_$^PsZYv(fQ1ujjn{%Vk9>tTa9t=}6Y#k$KNsn#7D z>0#YsEHbRM2Ju)c4U%UqHppyip+WMkB7-cjt};lPHO(LuR<=QwS{WMg3(c%?245?9 ziow?lKEUA8&sYft-za#b!KE9r+HRE6_%{pwp}}QbvYHIuAo$A$-!1rVgEtEPguxF8 zzS-bSg5P6sOYqwb-YWRb25%F*+TbFcu)GG35qyEc(VnQRIR;M_JkQ`x!QBRz*{79e z@KnKv8(fYKTTX+sfK^r>gL?#zF?gQfM{m$&uMoV|;Bs!(I%M!_!4DXGt>AkNzFzPK zgWoRr!vY zf^RW6q5)++U~rG%8x5W(_-zKCE%;i4=L^2f;0pw=FnERFMFw9g_-un$3!Y=}wSs3D ze7)e~41T-dDF)vt_yB`%5lZn(kW^HPoJ7b}}B9 z8IJ`fs1ut?V>89rq+qka*o-$eXnlyk#@GxqHfh+*FgE>+O$IiTjZL(%@nDl-Y`$2c z3%3B9%Z$yt#-;)rx3Sr8Y?fkkp|N>N+vpAm%Mr%%cg9kBA}pQ8@~6g9x+E-n8_R03 zwCQfn2S5?@&$E{%8|Js01iG?D}w*pSou zNIoLCujkrC>z{ZwSCF3JqS>>diBbD}gUJvNhszwO|GKb=>-u!R@M+)WoJJ?AS)L=? z5Ed)n`|GoLl1KkmO|I|Ly=2up0Y}NIui6|-o?|kMf)-fhY2Q=Xj|YzF--?`w7dRyG5z-&{r9N;`?dc2 zmHzvs{@d1RR@x!|!$c8BOywC;1%1eW*oBd?zTrlv26n!W?Wv1zAnf3E7!L)txN#+py7=W{ ztczb^z)AzE0XhwT{6et_3^|6hUHof(zoIg&!vZyxr`1JST>jX@{{1x{aC!Y1@&n?X zCx3F~&+M8HBD~XUK8W;=tofju_msf1-gB)lZV^fUDR{P4gSK#|YseSu{&E)LV?eWr z60!QHv1$RdEW>=zm|qfenKc8Y!sgJKIi4d@==NhkIfYT@&*Sp%6_N7F(>4~_4W4aH zJjVL8oCvaxGgd{3w{XsF))&O&`iBp0agy2`liEx`@G?PuAnD^tvJ$bfMWdb^c|A&2 zb2$?c>#EOe0~1Ywc&66Jh$i z8GIO{^^)H1`{{Ari)u^cKS0_Gxa-t3?LrI#70j1W{cY$QZBldi#*%7`&mDrO>dF?3F$3G+wzbkg3hd8YcuY=29b2Dar}vP z{JX?xIduDGKUSY95j9G1j);0g$4N{-&>~eJI!uln2rzUQy~nm}+sWC3`H1hHiq*yK z5lA}QPE`)t@{Ci9M4u{=YT48D&~`OlOdj&wyKW-Wa_^IyH!eTxIJuT;wf<5U{({r0 z?$cCstl?w|Vv_iDHu3^VZ%P_S>qL(E6KRt5Nxg8;R2U>~;&6jF$hpj)XKEi!*N7Pj zX4L6K8c1prYjIy<+&6{XyJ`0WLHAk4eL=|m<9j9imZ1CjASfOo;`eZndO3p(4Km)5 z8wdS41fhQL)WRA$>Q9371i24{M^^8b(43)|$H@jSr#8vjp^1{m_ZTc8?Gt({@SV2sUCiF1c$j@K=Kz zp!UdpoS|j#4@cL7NByt9%GQ^yudT`{$rE(YrE#Y=cxqsWzkSNbVVy+Y(UZ*s6wiGs z6TNPtc&_F7@1vdW9bz)`!A;Bu|G+qB`U_Fz*eKWEQH7`fyy$b{0gb$Sn3ORW?In)f z8|>F&=~*e5^;3yVh8J$>59*LbL~LC8&F1ivZ$R#_gB%(OFUhQn?48YShuj_*C9<#P zmXw@f`!hL)e=4tRMP$o4B-}&*=Z%tS$sv6kAGiJ_=vcFe9#Kcxqoi)m)MFeR&1+dN zxo`xF)a=8HZm%5ub=G_tz9PQn%Lv~o{-`|8bG8ifM|psC@e{?=+j@0fXml(Bl|xn1 zu=So*NM`LcH!p-Jv2uLtcVwgG2SW5{Yv;A#b38R)H7&`6S*}IAAgfyVNN#u{d*Rd* z(&|9c1uo6I@RzUrf~c?WWTHyDf6m%-mp=G1%v1Aq)8fC1&12YD7we405tBLmlq&sx zn(05RK{)Epe82>Tlik*yI(E(@TK;Cm1RWxb!)EOuhpGNz_Tk#gB$}YPPa?cyo-e(m z$#dkh?DPhnFWpA4(DS9^Q4>dF*yD|{?mbt^Krj1}oY{I+4@vs0*D0dwAkj076LN9U zQsC`Y9Rr^z!_(bfd*03b#u@7VIIUCvz~DwS5^PEB&m3jmp9sdDh(BumDiYH-;hm?- zNghtVpx9y%SFt$sodp?XO7LDcJW&w?tr6sTP@2_EXI)Qse!>YNDzteUescUAV0#z! zE4TihQDNIr&Y%BdpeXH?^J#gxoC0^P^E%RGmXMzjemJA?7$+{n^|?#V!E^pY>udUj z>#*B__P%(u_dTbz_g&kY3V6;Mmt?k`o((5E^-&{8WU@c93GdOOap)net=oRC`*QU8 z&0#j#AUskZ6@kwSuIM8rerWU?i$Dz&YFJsvm#XW}v?qX^wT8mvZ%lhUiuaoCO`{u088C>9! z1;1L0#!**I5l*6LFu#Qx?|ZEKlJ$NsjIlPCzs&|EawDb#yLPaETQ%aKg>+bDn#G0VS$_oE0WM3n74be0eM zCIZHhmnmmEZbsL#Vn;X!uAuVGOga?jb;QZ7*8F(Q9mhp%$`(j^`plw?~pl~6fg%FzY;pAgj z%P{et=D6pGL-XKi5xmVXJ>Jd4$?d6s#7T?eu^aymJ3B*N>mmnd2X+7-h6c-nP z0y|mq@qcYML05elpdD4d;2Fe@l&5};ghy43dsu^95MxI%{`LE%?&G%UtC%vg*EKEm z?(x+BRQv~$CNP_nDj&rNKyBloBk~Mu^%OFz@}%!{+#E;#$=Bm@^2ub*v3e5zn?Hjd z(&Qa$4g?D&k~{73WW^f8phmfBc7#g>`*?Tr6c-gHcZ~JW%g5WZU1RWKU8++aKyIvj&>E*|9*_Mn+LY|N*2^#`^T@Xf zB)$!U*D_vmcvObX;C}mDo^jRk1Jie7%S?SQ43$GU_Pn#>-Crk~in#piKbWzunb{=TrZUz&A9&8%FPFns0Z&$7C_>-X*YhR#_kO!Zy*hQ6e|ev7`LtOD5gW=h3M##5KIQ6vh(tsAkh zB~LNh?L@^rGCg}8p{iduPPuKTq|^Z z&Wf|>SO3POqm$Yo|3JF;jtYo&eX9bZH*0isT`C|7h13d&rf?Fbg94%_Xw_6cW6p63 zh%WhmO##u%v@7WO?^HlEL8{xTLN2BW#z@Z)iCz>gto@f15NY;l87Z;qSlSg3U1Agv z0YVChz)?VS$^U;85Y@2NM-RMN#8}aY2#!}kgiZXXt5^UF@j5CXsvQL%{I?Vk{h4Ta ze@@5Kl>(x5+I>Jr_Y)Klop*={gVBpa&-Op6fM_Li6^}n_qfF{DWLlipK>^Vpkx9U7 zCs9D;mTAtvrhsTUlRVuTzK;T;R8kkuwgRHVZccliTmezI@hxOLxdNhFj)VwJp*6E` zD0;W`(0;`h|Xnl%7EjIWeAE!?HP3Vc7;S|%`y|$ zZ&OG#9)&jxyd(J=R7kX5wA=J(sjI%QjPufgr^Oi8%!aur!bxZrksye(xfo=fQ9y)J zqCV9(h)yC|v`7F)vy71FBocd664~~sBpNC9MJUu0Sn8-fU0}JR_7s7rB&=G6s_?;?=0uO|0rFj*pKm)CkNcW%!rnE{TIsDFJ ze!>Lq=1L5mRT5pJJ=iLVrUs)yC6VqpX;)e$QJOf^oLnVQvI+PtDv1swU?7H==8p^# z*?h<#Va=}#0@0(J_ZzG5=G`DdwJuc>nbH`QM8C$PS~^4_T95B+Auf20we4K1-Y?-jV6QcD79D zwNj#3g0^rUnBxrFnRL$oIdO0wLM{k&R7xakxwPZ9bIhLYcdWT?rX(zVtRkSTq2g9@ z$(n0%`uKO*JE~fAoK7MOtFG2u+={g}B24I@lc<;0s~glwM7Q74*ZPJthW0wPmr2=U zRwQvkE0!b!tgrAQIeM|(>s(Nwn-s6py36bl1fp7Rl<=<^RkFQ*jDOJ{3kl!s~1Htn7*Aki8YD*PW zv*`SFJoc$$&6+StTLZ93SQAZ;z5k3LY2I5@oa< zR=iBoG8zG!L=m{ zJL|GA&soP zYBeGE_U~pN$op5Mx`AiX3B7EWR^XAw_H-RPDiPvIq4Hyo>z z*DaPZGDVlvv2_}a${>9!{8h4N{q-~{Ioo|A64{$4h18EZm+2GWTnyK`wEXeer4#^m z*c0ZtAbtkfM>fu0udVN)BEeI)l*79D&YAXmW{7Kz9%wn?%DS$%oL%YZMk&G&*#wD1 zBsR!w$>htH%mRIzNu;lG@SNA#nMNVX-*1hcCL*S*o3bt_?BhF=6)`tiAEh7>?zG}I zt#r4wX5((C`HkK$YWaiXQ3pdxlhuTsEaT{XC}3`r+g~TQQCu}&hOJ0-rGL6?n5Y4v z0Nfb)X#*mG$GhW`jy-sw|C7NcAYzJMp2AfU*puE`d6qkEewf$k*A|iqN9}oxX(SPR z*uMEJ%+`)_={U|I&J%Jf%k^;lh``Y30MdD2gOy{>yL1-(T%MJC1%6LtAgP#~P!xTT zq2)gI8Mz?VT+Ps;`;rU@EZk;wn=08r-0yZgiYlbF8RU2(qSY&gsA_INqFLHL?4KGu z>|e7ZgWkj5w+zO^CBP>%AN=bvnZB(nz0;>yTfUZe|KNSpo%y1;%iTfI^moZ@9FN&D z8^>cJvte@n{bV+ry9mjE-J5N!gWyxive1m)X4H&8j99mz0_03s-Ql1 zy-{C{%AUb_@Jj_-J2EBFI;>^KNAB~N@wu%}FM}7;7U~HpE1E4vrZf}_duH%(nYf8e zX_IbF8|~&KOge|)l(@{GJf5>GT5I@yl zc7?nCnnBrU+RCZRs+I>u+Gto1@|hCXu5kfM1l8)IZzOC@+m+ra zT3z%8?T)%=*)(&bY{UK32z-hn*fK5|i+Jepjtn+f7#Wz~9g1<(9H!Pwn48WC9^1-k zPQukzGo8aMHBY7n7X)=pr|K9baF41}&^zs}FMs(CxxnWZjnnJ}wA|u$Je7u~N|_G2 zzhUfwb9>M}%@D!_VhY2Y5}pB?$r-o8#^6ZOwO>W6oC+9{6O1Q5rgXup}c-YTHE8_^CbYj&EwDtCHd z*(=Q-$eg%CqOgWmrtSJG2|xwbNxCG#l)dBFT`gjPwcfpU#^B10Y$5I zf!1pMdkN{Yo!%6G$cZKNnlFgF-u0&z4W*dH>9Wsdlq zbi=fej4X|;4hrsK(B@6>ce%*}b%;-8=E66y=xD~5=qVyPTL z$k)d*tV1livm?Hd_H=mV<2u_dBQ)SHxeTK%XNEGWlYdoK&y?J0oh#YkoD81Uyts6( zWY&9y$*i+IvwtB0&6;?1{Y4xk0C5`LnNEiwf~ovTtRdhYe8~XLa4p_UfaY`Yq~kE= z7Ew_2Oif$1H?O8`fj4t&8L^_rBYEp=keaq7jx~vdpe=vcqd6L^#C1W8b_;2${xGij zW7-K_LqhX=AdE20a^Fb8I7(bKY3KQP7`0|;xY2qYsqLE|#fLbKwH=M-YjOPC(R>Gv zTV%*4HHPbTVw4Wq7)8Cu7U_wNQF^B>()-qX-ZeIJv8i`CjbR!FFeVfpFc~OWg-~(k zC=F9hIykMZ-WvRcu7u_n=+4R7H|O}6327DAy1=?ylJ+a3$WhYsA7j+?=*I}PT$>g`pDki~Kmye4Me8;EFkA1)(it1&*TfrCeqAlz zs4y35L@%ysJJUO6tF$R9?98n)`vB>q**krksiav%8q+dWqHdYQ->{Ypp?@@b_Gk+nQ+1NPc8&^N2yETE4biFa3%3^tJg8feX zcEmCqMSt3!ait-nT8_ugHtH8Mr}cj_(?o9oHh zdJde{NA)HdI-J2Zfec5zpWZ8{@W^EUfL0M^)P72HmsCvcje^Hz zRH~o6+uUWPt4x&zQeQVXB1}!)nbAGA zCO?j5(J)&E-_({@w*4bYrbxU=Vk^6`wcK#lrC}#h(=a51S1~bTg63w5`492D6?!Y{ z`4!ABC=fi8FFa!eT(C=qIk#@kMl&%-a>FTJBQ?uMH!AvqjmXSrL}nr_3m=3w)aG@> zE2fB=?g1|ULxh!!ldL&cw;vq0<}ki*Z(*@ClXXpQll9rfMtbI|xv`IOtobuBpj;|# zD`FUt!z`XGNB?_?GMIRz7;+5b1*`8GBnI7x)kv?buZgsJd7JVpkq?BVT+&%r6AeYm z*XN~(37Q9dXIpy+Q9t(#j`7R_LqiiXGTBz@5W0CLW=<`NJa1P749x49Z4Bs1+FMpo zrq>*Wxu!UUA@fqi;91D#WYa)vK+37lN(&x%h3+_^p9XzQy@YGMy=#`IP~x!?ABp^i zbKSq96fiD0_djfmpF*TA%Bu06)9R6y8yL4@|~{DwMO^#9Z^C&J5kx z2yH||QK-LKfK0Rksrgc;evppW>g8oTR^idSB4f_eG!~q|1KkVxbSsg3b-qAN8K@)|-e{sR|lHgSWqD*e5cz zU4wi_R3ft_&%nZ6iw2)H_9IW-D7tE-X6If2Gqu&vczV(Hbf@vOU5x&ZrFGuy@psD|_MyjrEPMYq@0yv zWVy>)C)1z6PVMNk=yZchRz-tvqod|liOV1VVF|OkE!32<{vhS16gRe1l9QKkuyfMP z78JMlioQ^}bt79io6j+}uL;4^R^dn9s=p*WeV5~AFRd+ylUyRHlHb6b^ae+b?AaQ= zp&QFF2{5^rmRYzjr*80W$hSL&XsPM~QgmOyE{E4A#9Q~PK;X%6$z^?9H`b2f?DYxP zk;9qd{sY`|r90=b?>^nE=;gv1zNsQ^uP~OE(3hX%iFkz!XFIa~7D#%kryf3gvR!CB zHY?bjO0#|~%@C89-q>@&K$g(fcGovDuic{+EpoAws&-nHm$O|f-tINtnty?(PIpi; zDhe5;cTc+7BcKn- zlkS5NSSxMw?{{sDgiw;rJly6CR0fBW(yX7U^#*3K+*;-C>=TA>Ce5$x47l#sXTu)zCu4MnQcES$%TaHCa!ab zrEwZ^PFPHMgKO2hPzI6vlZ~$ZU!mzJgWFZJf!KOQ^AV~gyeR_1mDec{ZJnNp)j44? z)yy@YL(`#3TZLJrg|(!1vR|$3!&{Q^6&ACyB_ZfeCa_7XMgg)fTI&er5yB$}`!@_x z4GN{XWI`%5hDkQ#ZHf#_eXWui!7kbQY&w^oPEim$)*tmWQi{4TC@xC}bKmscZ4d$$ zkKHPstV0+NM@^kWA$WC9v;E^t?=xKWo^b0iw$QXHVk@^)+L@hRKkG1w*c&D_prO@? z){_|jL;-s=C~4S{!X*C_`Zv}CdKhjVBhlTZlV z{B#0x7A*C#gLjT`>Y#3VvOhB!x0rIqQqR2LeYjS;ES$iwrvcVIKeVgkjevp!Sg0u_OGMgV7eoyB%vj zTG7w#e}?d-jTEP?Wme6zPO{K^5q%G@iJ`Lj*fH~jQ)5g1$A-ltgm!A`r6seZ%TGWVe5y|S~yohC+=(+@K-8^kzmGPA|+`oS?XCI z?33ivzj3U265j}0^z1%E)Twtkf8M^nlirW@BHQ14yg63N&l#LdydJjrTdm$>B}qNR zjP%rBj%Ih}j=QY~7HcB$bil5bWWx#VgMu}WomqK^#Po0onP3C0vR-%J00l`;SzebnwjV^Q;T0?)60IL|vhFq4468Z#iw3)Q&D&$%V4+z0L#* zIoZ*rS}o^wiq5z|yZ!zc>y}@1;ks3NWow!pWhfsKP+w-Y&hOGmhZ@kqNy^qf9ak`9`w=*~&R|`^lsh+DoX)k~sbBFrlJ%Uwf$le+pw{7$DuX8=tEJyR}SQeHQ zHOt4LvDs6|9bT>fmGyNkn}Hox#zy~ZtkwLnN%6S4ty!M0w6clcGmAeVqt>4ME^Pg| z8P*0nvg2!&9#L5ZUBzT18PI%*Xv06+aJLN) z+VEo=Mh-IJoHo43hSO}gz=pLp{DlqwXu~IM_<{}Jw_%$Nm&rRr{C;4=;j;71FU^K- z8_uzz*M>LQ@OB$MV8h)ueBXv&*|6uCCf*bqUSz|`HmtPaY8&2a!%a4P)P_53*l5Fp zHhkZPZ8q#S*reCThNs(bqz&CR%(LM<8!ootY8&2W!@F$wunqUx@MRkwvSF(Y!-tsk z2iWjD8|K(>o(-4T@Mas{Wy2?I_^b^N+R(CLn+;>_dOO{QBW;*r!z*pL)`q%r`P0X) zm!SD%@FJ%ueV6y74XpURZC+zw=Ww$>F!lomb?x5K|K@97zEaFGwC3`)azV2-!qBZL zGfbyj?KZGb6{;%y%v3YfBsD^=HtJu{JX)Qna#WUWEB-E1*(y^_QEqMPQ66=nTFAc( ziAQ2GzanM+4OdR`VE#F^o!DG&|C9XXs!`g0l(v)5dDzL%WkbQ;s+gP>lg>Q;R*IGW zU8c&_;x6o`;MndH6{NFdP;s#Q>XJOWtV&f^J5@;%TT01=g#EA zn1vc9UGXRBNIV6^=OZUYb_gu_P13n~!3ZH%mT(=_(zvse*3oon+F;o%1;m%xsPC>5chcCkA zjw&h|Wej<+jmxl(au*jCFPv8#VwB6u7awPyQs^xg=jp{2-k^iaH{Vx9W+(f~bmWtL zi*&`iDk@8*zHJg?GMP#LP=$pwqFk#rSlDdLy76>&;@e2WNwy&^7&M@=f8 zrwF5_7kGV@ML6)AR9R|c&}D(IvMOYPUd2U=iaS`$@EKEX`6BIWy3d9MUWrG-OBjVe zg&&2xB%#8>BO;@^MaOiH?GYE>(~*$at9PHIQ~Dn(TX-@8CQ}U)xn?3_^>Fg`6 zylT$X*UZf?m{(X-Trz(_=?|_gTezsa;=0NzuW#{^rPtqZh0rpJ&rU#a~$bvfdd%yWc)a%el#`Pb1; z&{g;;n>KdINnE90nF8;*w6CsWm`4AXs>bq9;v7LdE^=l15$R8yeC7O=9z^n)t5eJY zlTsCFyAqxAh1%}|N>l|_z+VX={h9PP(l<&wETz=*F)h;Vw^a9S9pe)BQqnBNy^Oy% z*#4yT#Ol^%;un8KxEE@7X|a-~lhmYNapJOwzt@vTNmbk`_$M)%5T&>*qCT8ZMoMX7 zU5KspluqsQTJmr5?xj^r>7^E{h_OujDOV-fNqkbL!IDY;)J~@nnobKzTl!S#bDS!L z5up%Mr8*l^3Gpq$EM=BFiH&fA&{IluBBd7TJPHSyF+j>E9M&)037ZZT7#C-npUlD&SGW1B;2Pk~n47Q3lmcY)Qek*-o)w{>O#l`~LZ#OU`Ak zw3Kwcv|*u8&?Fogti|@!g7rOGT@XsItNJddR;9j7PblrXT=y2zYZlW-O0QL{V+it5 z5SsFg?!-@$D~VD12h-oR<;*_!JC1QajY&_m5v@<}$Yr#$44Y?)LgjRqg$K3H4U4X(zpm zjFlCb1(#7kMmHHL4aH>iF{6VCBiwjAjbsfbzYJy8Te8z9zovj$$BE=we$}?w%xaWM zwasku=wIV86>)cEgjo1M0q zldSsJ__c9A-8Qea%>!+716w0u;7;3|XPXDv=KMC}zSK5n*yi8_+G(3_u-%nyma~KM zYdd1X%P|f49k9*0y6ww+K>CCdT}PK=C-r1{y-YQXe@;e7X{pj8&5SY!Ojc^bB_X#yH<0+68@q$}eS+udECdv2r&Rrr7L@=%%`bCq!mtWkt#F+*INckIuBb+0ilT{M6K0 zdB~5QPR(L5`3v&-DYdj>DZdze@G@ge3?PPoV*Jk!3OG;rmqI^i{+;M{qK$`|`L=(x z>coG?`(LHn|AndV_pf6Nt5y5IaAZE;=U*_q^FKUrLjB7K_&>}6VXH#_j2is!{u``8 zfvdyX|AmKrzkk8-Cvx<^YV%z!KvL?%zhH`DYR407#sA-3V&kK;|L<4Yc3%mq9XD7Q zy=(igde!Qh+BG-*aBbZ>|IPJ3`tkZ(ZvDw^KmFOyZ@=RgzufSvJAb|LH+S8A&%O8E zzv;KX`~3rd_~V~8KltZ|9)9G}$F^*J{4Y=Z^~t9iwmrRl$1^*3?cVe3b9=Mq2aWd3m{^ZHI^&MO}m{?<?4!$f(Sz8~Eb*E-4Pm8fTR* zDk@(x-CN)-MIMU%%&OwbDf50%T>bjQnuVDX8+g3caO^i}T7Wd=(*k zV`rjMxkxb!<`+*aUur_mME;k>EO0C3|_T2Zm2 zqT+=4ob1e8#wYtXgYiG9z*|t}EUv69uXL6!a+VeN78NciuDsAWL=OQicS1Z%7Lg{|Yso}A-vV76P;ziz||Mql> zD;Jh7qH4$uRRJ@NxXxD&A*u5Y?DnmhChIdgL}S7)DfKRps;%-CRO;COrD)shOP9x{ z7w3aI(;1TJ{F`>vk*=Dc9sL&->niW)$7Yk6GbW8NJFFf3>y(F{In-HTmqNIuV`x%1 z(f^>Kkglw(e2L~iLU*d}lhpI^HP$JWF48HeobQtgt#YOmFQ#WGEpZkvtnglc_IIZ@ z4_2}}jRG=CyDSbdt1zT6vWWqr>L~bJI8z8 zxRDh_rsFeYI_Y1T947XTQN4@eRPS*;RPSky#`uOF6>p3&`|B?vF!_RS{RbwBufTgE zGzpsfjfg*Y;0}lC@9nAj7R3`soN|nft?bqm*%;0O-kVRqPtdT~NEJH{2|;DIim)DG zst36X>l3T`jB}_yV-i|>HpMl@HpEm!=Xc9X>=�?dYNUd4WaURX_A__M4W}D0!0n z7SR(e=lh+Vr^EqKYQV(ghEpn%^81ij&>v^w)H{5^yoQ|?r%vNH+Pk{cX9uSFPLU`P2cV+c3QVkzP3P% zS)-Nul6VD%p~E{aEK!9y1##0*lRD=zPQv-?|YQT)1 zY5;XPU|MqPDNTJEdo?6fBzJ8;+tJA%YsMCucR;Q19NSz+GStZ!vDhQXT z%NVU<$F!I6j0~l&=j$6xdti)87{~gnvYnrV2c=i~wtA5C*SeJ&m(?CuVz+SBZA^G- zke@#DF!#zm4TJ;{|&~+x^ z^DpJpJ6|yTufbp83x)3$sd|lzSG{iSkr$?U*5*MkRUqXn#rMDQQe%Lzt<@yu=gT z8iVxddo^=FzFr>+btqr|So*XCXhh!zP5a-f%aIor8KxrV;ohk&X!~B+_l=<+?5_IG z08+Po$Mmky@kyMTHgV9V2eg4k(+q9G26R^g?xLJciH(ki_=>pv9;va^Rifm9ez`yW za{n=XTMg|EuL!>$Ek}+^?5V*#Cv;N@-e~wAI3}(ktb4fXJ|-%)Uuq9Ea9oiZ7+PYBD20Y<`e7+g2#`8DA)!KJ@$`jps?V(n6`CG1V(A;` zALttr6T7KI%9uDtMw9lq9;#L9RlZMxdDd|eA3W5Dd`rI?rtKIT;GsU_aGPew4^KFV zQ{p%L7Z0DnE6`K(N+tZK`-m9bCc8^rO>7?z`u>Qf$d^aj0>cK!s=?#>slop|wKciv zl*T>{y($v(6Y?~_ObgF5?c0o5L0VkR02Gke||+Zxl< ztueA8IR4RX*!+@6{u7kr#U2%U+_d?tFZ|VeY|qNh;Zj549E9ts9DkSa_Floc^WnwBD^jP6(F0_;>ID-(T#q zo3`3vj2>e+H0b}8-z&A@0i|9G(&}`^jaz#(b#IJrh^mOpkH`y8mA+Z%)9<_bGW<(YOzd!vu6`wy4t2gu>DukeujsuQ^V@a{1Q6# z8$w^}9S84@Rei%!RdBu`4JItEn~I~~h?{2Smth0r)Ie{d8d#J-zvxf{+sDhieq-X5 z)4PQE(PHLDKITX4iiTAvGfOo6Wd%YQoiF;9rqiVLm|wRuz+087aJJhyv0MMoppO$_ zwe9ym=erHf{&T+D(Bc1<^W6i(Iv!`N4?}L4Y2-0EtZ-+kVUg2|ML?EU;9W3Ft-#b# z+KAN4NFdcFm8s=Q_QA+mJQbzm@>N!{_zoVIjES06Q0kpjUOAmbe_62|b|F3&6<4yn z&MaS4RbEz{>8&iwVzJIy)>D+Ls;YGUB0Gi|6U6L0%;?!5A@%oHa`Xlt=@GJ{<~S{8g$CmD`r=7283lsm!wSs-Wr8tZA2J<%}IO zvZ$;K8AjN2Zzcb;$@g?m&Ma46wsv?m+*4doF{!eclwZ=gOT-fDpDJq+;+@ROQZK^8 zvgrs8L`1C8BXWuh78jpjUtvm7Ngd3%zCx&TbEkTDsTU%HlB#yfz7sif(E?raqO7Hb z96Tl!NKDd7JtQSRsdIQlc9pw$o^SsA;>x_r;wq`yvm&Q?%Pudi^!f_QW-dYsRHW2E zvCAnhzt&eV2|=$UK+#0Rk}NKn1r?k&7B2A?FZHS+VrPP8EmbYy*^3}RL0Rbyor~;R zR5ZP!bWvuxk90qVS|Z=dD=!tQspsUZbqDk7nzG09IkE_$+2sgmG-dy${TPnth=QhG zp754hB)BUxPpOL~#FVUD!Q&|Z<>ahLb1L$7b!FJ3vMPzPpo|mFSBZ%vjp+(8>1wVP zs&?S7=X6S@P0d&!66$QIHe37~R!}*TsnTOkornek+vh;p?)CY*><3!Dx`?B)QeE8teDo?iQyU|r1Pzj3%@32JX0?l6O}MIA>TStKR}QLQdzmCIY2&m`XH>B9&L|JH zX!Tpu^7D!-wRC<_A^Za$Q1ic#SZ0(KUTc?oR|o3a-3jEa*5$vxievUON=c_mQwB`^ z*zO_3VwQ46NcTPhyJFB^XCgI3Mz|3v@I4N9cXlUL1n23EoZ3$<5D!GM50t`s+7Ynmh>a6 zn+&JZUbyRQIKu9`$o_wR|05Kr&Nt`kf{6vq$L;DT1YJ)KWv*{#7AN=9(M9~r_n+T? zDDWQ&{MRWEY;$AodTYcT!<2gdYUhh3FN@L#^Aq<|_=4?C_V)#6Nvo3iqWI$ZI47z1 z{iA_#d@(lcZo^ohxb@%*x=FkeR-l7V;+3vK?Btv+;!6FA{UAQEKbKF;F58@Gn;DXH zm}$2CbQ{V@An_x@)oC+5)$uF@I{6qz6x&7Y{F?krImCZ8pX7O!4OauDEH!-MUdt!> z;rxU?F?y$M{tZy_cMqSG^?p9__ZXj)=>GRSZ4HE`9hn~n;Wv7%bTP59q-_rbfJ#`%Q5tBx6~3>!Z3J^j0E z|BvE--(839I#s=Qmvt8#VV!=V_*eW!hnUKMGWFm!2c2r}!5c+V;C`*kHpgHr#B(O*Y(U!}T_-wqb=0XWKBt zhN(78wqb$|V{E8w*!H=}XR8e@8#dYSfDIdMxZ8#eHr!&vO*UL_!)hB=*f1D>zHQF4 zVTKJ;Y?y3AWkc&TlfO4?__7TfY4{#ziOHM{TqLs%huW4IqA;soz4HM z|9>_7zdQcU`RQ!#oc@2z|8G*@yY88Uclt%xzhG$(gq!xd+lImRKGAN+Bk?F-uzy%@ z_Y-B)O}PIqTxtJqv*WF>Ml;pMtX1l%LF#B zO1@?^UgvwbcbIwu_yP;~8Q3=hxAx`?BKQ-)p?#16fTsX+_-+Rmcrgp>6z~k- zaD)q+PYKg7zCY`9>=S^e@`?Xc;1s_6USTQ^ID$od5qK)FhHvnxVd@^>+kAb%4*`d8 znL_MSfO~9wFYuky$$vlm0GACk@e3T|G-bI6IAD+oD=?E!^56#EZ`*GIzGmY^XOOqS z@Sx-k_)EUs;P(J`^1Tec8yGkX88)~YN<4g@gKq*p!?(UKet?HlNEdpx0k1jRlqDZH z@Enr|C-4>IHaYk?08AaO)B6nJM7QfIs4svabiq^gIW9Iot8tIO}}{LIEb2VOth*e?S{BA}D@F7S_hyTLaD&%V;oa5!)&pOi~r z(N!E}kvM^$^QFQw0&kpS>hU(<4Odey*e?S%@JSg3-ggagVZRC3Iv0K$Kt6#z^9}DM z0IT>UEidpEJ}KjEz<_PfC3JN)-|fV+9{6{@P2f$yg@u#_yc+m}BFX|@2E3`*q`MaQ zw-WS~Vt)X5&H}rPz$xYMANF~`Yb!WIDPe)P^65MP`(J1L4*-6|Hyplg1rDv`zC3se za1o#6A9YtMs>;wd2KexD`eFPCoV>!slLH*Kl70jG;lLTyv^{Wv7N4XWxr#oMZxi-= zfnhas44WGoh7P!&I1y=mf#3OLyTGJ;8 zEMI5(w+dj1pRt3!dI50G&8FWLcpIP8ufV-FE^uW%yn+9OA0b!Zy9j&+@aRto4=(V9 zpTVo(jll4q8y-jnesu@=5I?|Me?dKh-v<2WFX?l@KL=KBPz*}&0C2~zX@lSb|9m&? z3;bcA^B&W03q0puV?P{N&nGn52+X^We1hi#WA5iXIJgt|555d=4ydR{_&nfSfUooA zfWHC!l27P4{I~S;zgMaZd-NHqhxjD^hk;K%K%WBM0DP5C{2u`J{DYxe0xFwn=B z555$5$xg$Q9^jjNQjbl*?Yr1-$IlL6-`yq;$-pIil82>&KT93qXFV`#uToEdCje*i zNnbr1_-h;A2z-@K%5?yE_VXqW!+|^br2p9sJmWR`Si%kl?&lNUei^vpAazY&Q4Rc@ zPx2t}j@PMo>~{e7zDa$73taye`v~9y@8OeiP2l7v({^%z(TAv7{KNoj_+(ymJ23Tq z`Yh}P9^-ohT>V3-QGAkaDzL)F1^(8?w*V6@`Vjp00p8urSO$I%u-^xSB@Y9Dv-r6D zpwMfnZV-ELxrZWmR^0dzDEB?Q@VP*_$04{txvwF91j@Yz!3D~_1HlE#{Q$uQF0gUA zBOvGd#a^JCsTW+}E*ls4iH*ydc{%Scegw*yR>1{k+jt&O&N_>|Ksoy>`4lMUdc|I# zoTn9BpqxDwT%epQ6vhjY>}7qj1t{Sff#MF^R!0HxRNW0g^Q(>qrUC)_XW|LdH#j|A*wb26ei!x|<_a&IR!fnF6TR+Zgw;51NyI z#{4Fx-P~{|V;O6AZ z;A6JH-;@7FjvT2jx#SWxYt}4PT3V`nKA)-<@72}(uU5C;e!IH={`=L|ty@(?LxXzt z)mPQAW5<*%U&WX9jGHe9*&j~Kjx?>_VhV>!`^)q zp+x8tbKnoFdJ6&gwTbzgHDeNU_U^;S&3GsN-~M8Bn(?5`ZO`w!=ZpvTYQN%6xDQ0}SKT0H=o9#6=2LA)wX^t#zRerj0@_*AU!t&#v-sCa&<_}A z9ly?-L@x2IwSNuTsE!V7SlJX&J)xSVN{253Jd97X#z~;Ki@%QHB%ZFmmDmTWRTy6c zUo@YCZ(u<9Jb};2_g#EGZrnK5WoRUwK3&~#!woubK^|Yfe!cqL?|!Eqe)wU{scQWA@yg{wA8dBErrp%3Q`O~{U#_mW;tF;3)mN((MU&M0`SaBe ze((deaN$B#S&5!e`j6$ym#g3SCaB-vFkU^hB3;$Y&r&yEo2hOqcd1`iW~jfcx={V0 zI$Ql>)jajs?GVL+`~wMy01)u~%+;NB6uwjGRxN)Pp z_uhMT{cdKW{KzAZ=)UZUC!SC*ZQra`e;QDavg~7R)_l?2+^jzTZF) zaA2C4^~}XL!#p(ho~`g{pqCmS7_F`fOjXwhu2Z)MZc&c}9#k&}_6Pe@)ratBx#|DE(kxrm9a9AsMZmF1L6Nj_y)qiNcck?!kj69{TtYGf79vRVQ=A(pv zx|R7e;SUi0?}UGs@Xdt(gz$$ugdcSl>mL`qeiid#FY|ELXZu*ov~H&nzL=;22S%&F zn^RTb&~+;C!7VD#`k)FN-XF^Us6K>0gYc<@znJj3geR`nQo>)Kr~>OotH7_Os=%MF zQ-N)_5a)v`@ZSFR@Jv4PA_?D(@ZAX?OL+R9Rwgk4XD=1Fc(e-4nW_ReT&Ds*zeNQe zeNY8n-rpWx^;CJPFY}#2YCXE{HluxXADrKc?%qJ+l`1g0LItMYqypD%P=QE2XsfuC7)puZ zhs1C%G3+3Qw~3)OF`)iAI-owE8c>I?3#cQv1k{%g2GrO41EKK!2|t4H69_+z@P&k5 zO!&2gzaufA?i(FY4^IuKr>_gBeYXVE8xICl%l`K8F@zTiBoV$p;Rg}^EW!^b{Kbg@ zHG6bGT{ktLZoZDVZV9NT9t^01``g1G-!;rNzmArfnG;T!DpOt)+Hei zu8EVgv)%5=nG+_s+;c~y3>`XT@Fka=IoI~!&c>Pl*&uHB++k;nhf6MT+U}EcGqWa7 zAo{FK*My0#xx>Z7kRfLbvfZ+7 zL1Gwu#vtdJWHMXgpFBBx!sNN9_3oF9J04PO4`!Dl$h zWOgY2xk-I`_r7$zj$oKBYhs~b5A|a!(1VQxMybNW>21+o0U88I1jiFJx9ksG1omg zE7zSn>GV^R>?BBG?%63LawxyVpQ)1wQf=-<$z(xH&`-xdIz1N>=VndL)rHV`4AP(c z&vlQ^kSt8j&7FJdq)EMd_ofgb_qpeFo0~BzizF_?{q#wbbSVdf+%p|fjBE-6LXpW#gDR5ndiQE;F;;eeT?et|{p~ zqod9vh0M7Ud$zkh{kfUKcT>{i!=p3AJ#zx=Iyo2|`u%_Hoe6Z6)wRbjRi1!VV&7{G z6q^tf0(lygA^`#k5-K=BwMB}y8ZinAVTepmAYrJWAVaklp-2V_n0phM1O#Q0D#cb2 zL~$rVM2#SVGBhf(-~V@VPrQTx0eo-0x0YwEopZl?zwewqpMCZ|_Xhq*!BWq^=)$gd zbi6U8t#qy8V}0&h=ctQX-`GKX>=N-Y{7-Xt=>1kLI<}RmM1JhXmwc~FlOlHM)Ur*b zk0ZvHpu2QvbL`Wyk7L`7#|$q2YHPB~>gJ^EP;jwEkW&t46VGL9jLKkD#d0};luGg3 z$>S6s&)t0U%`P4pm2quF>@jax`@n=godVMbbqp*S(q`=Q_V-f$oYO zh#7{df8vQJ0yAgM49uD}E0C9$7nn0=j*Ar*FJA28!4)f31c(*3u9+8@sd!_7VukhV z*9TsG^;H)e?Alop*tv6OVArl)fjxWn1U~)r)4&&>?{l%ip+koP-+c2;VA{Vduu!qW z>t9Fs*~Erg=vrx^lU+{jrG;*|7P=`~=;j74vL%5lZB^ixwjnUkwgjfxdx3@aq1Nwj z*O@?~7Q}ARdR~j|)c@4;SL^vyJ%62^zfI2%)br!@{471cNYDRG&zDrJGvCK4V&C_1 z%KytarGB8i)vsS)Mx(`I2->_Ls)P)yPQ>_;lf7xYCE=#;9 zs&3N@bX~t;qb5ys??q8Di3y1bfd+BS;u9~alX#JCxbT7te%7GjW$}sU*NOjG_>I`u z_-2Ws_ zyg1#Pn0QI;nvDOG=pX#MuFh{%r*`cG?E@NK_S173=uA5-v?hXqe#WYoHdZ*07;d$Ur zL5)DH{+?C)fu2^x9#F(y0X6lSng2K1iKsu>+~9k@;KwmBF%8uNmkJu!u3cL@ApP8y zZm5T%y2t3F2@;hnphjF#LsaAu(Q3BLIcJ}Pqw`$#4xo_XTk3ReCvyZj*?A*I| z?}zG34jw$XPy5nc@4ffl!k1rud7{qwBqS#%OY@Pw`>F`zXm{o0b~^k+)-23FYTUSS zBlYl#^m27t^li7@mU6Y4ZvLtAh*PwWw$-aw+s7Y&Y+6rEc^48?R#H-8+J76Liw4sk z{GX+zrJsn__cw3e{M6dDYqPJp=9(572RStmTr9Xq04~V2P8fI%kNNB;f=dJ!D_7B? ztgP%Sz1G56ZT#zr->=CAf6f@KTD58ne!|P)zj5P6Q;yVxpDka$+~KVpl6~~iN49Cx zCR0wq-N!SE5qN)>o|(OH;lh#2mMt5pu&>kDv17Xq7%(7h^5n_g6jy^=Q$fPT7hl{$ zxs9GzU3C?_i%v^Ri)UQDJ}vwY95`T#Wu`T1)TpI+IC=N(-KLyw1)pDg?KOM*?YAAC zl8?h3+3eV{!`^-OU3U#l9u)Jq*V4IzpMU;&ne^^!jamBDS6|r|Uwm<3>(;H$eel5t zC7SC?o)Mjo$=+HDhsiv9y7()eGR;*1;O_yxdO#)~z#m@G``26po(_8O@gSVdgJf&=>phQ=?L#jP-#{GMt9M~-l6p*qdsFJEsSz~6(Hh40VAPOt;?<<{+H*KX5yIkt-b z&>$IH{fgODq9OAw)jN(T-~GB-^Hol*_E!ga-BV9_QgnGzixw^7qzmJqLvnERcsl6e zC3*z^tD*zG!*gVW-k|^ZUG3Q%LS2Rb_3xI`aE-n{*>G+W^r8{(ZKXUHW{v zSyUH)tr_P1Kl-or$N(=7dhqvpj{YML^a|bZv|!`t8G4Ux_#V5U-SWQKO`_pC>Hjap zZ)j+@$*lE1%4ujWTTq_d;#a%d@uSsg^}t`gc$#wjEhUG^*cra1Dmu{jNSS!OqKE%{ z&bq*}_?gJoeV1$$8vbe4MKp904GZ>0-f_a$pX-wuCm^QYmtTHqT4#;{Xz<|as7fZD zmPk6j{`zZs{kd88ml4<5jDackME_KWSkI5le*J;jFL#*T2n}zC?O8gL?Clw!Bzwju zHI^M<&zDP|u<4_!vsFQZ!(Z~Aq})ZE{Q5X(!5`9l8tBn`^tv(~Ucb=tmwbKk5ygfsMw`Y9Pl`o6$FPb%l2Jt`kIkT?vQ);hPg}?m0(|_68Wa1%g z&eP#(K?bY`kuveL(1SmI^4ZZHtoXsURwx?&A{z1@D91l*pV^&zL_?|Bt)c;Y#wT4T zpXBWs8oWJ2gSTgVQv6DX|Gz5ZkDal;D9)+?Xz);(4sR=wbnqG)Xv?x&+oBO|toXm$ z+S9{C!;p4%X#Z||OXds>cZden*zDE_d&VbW&mCk7_@pHHB-Ur|lbWnHYgCSZWxrGp z{1x9%YT2@7oMM!5(1Bm1M`xgen1>!(c#pN#$7OuRdP~eeTanY+mX2y|3q?bbXqYb= zo`15x9o#47sYd%mboP@>l31TbgO4$qtSW!FGXL=xUjN&)Y10^4c)(u48{EL%(}M5E z-?Oj5Z?aagw$WCNNwVie!?UB>*n$ylEhHK$+w&jglcx3SV3Y2@#wPUXXk+i~XxVqC z*~Gy;ZTnlB3>qS13}?^sNwli)m%n%VFMKA0J9yE96L@0}@Y%}*A0HVX6AwWn`v2mD z_V$8kSS}itj1&#B1?(9buxEVIlk%(3Fcli^yT-=%77h2L*{HiaIU2x|7=!iM#~3ZO zzNsCj|M(2nFyaVc{`~nC3t;i@yt}4(zk<66b%oFhB2aHq-da3jxl~G+fX~s|A4=A!Rf#BdK|Po z^w2{V3WXd!o(B38(E&~97SEQxzS!1{Yh`Oh!%ETM>{&G6le|6W$|wDKP>TI&K&m|^ z8YYW|M?}N#rAMPP(j21rqz(}=#$wsM+Hw9L{Js9SZQHgnIDn^4g;dahTq5Z>87<#@ zbHv_XJH!4usiUnPn`A4)_KZ*R_B=;E2^waGY50TmX!89X?BPD;G=Qj!F%)m#Aebe8 zb=2&YO1jTwJ!G!+cbd*IT$h=dxe;8kM{Ho!s8NoFNd2lzOJ$jGjZeT9PSxI8UtAGm zV9$?+?YYu(Cyjr=AK7>A+!;C8(@#I`VlQGE$pn1xNlWlar?O|! zK&yh+RlQXYKIyE@TKP_E(xi#qamO8|vl&KQmz9-e4?OUIO`0^x*|kzW&hHQlRYeQD zXKlb&5epHIvLB#f&#%oLY)8J*exqQBTc0Cij3>kPJXN+ps|s2Fo^lm+73=>>*AD;7 zFTeb19qq}~zu?}ZINHe}KR@5PcI|39TV#(u`lzX9<864W}D}M|6$uWXUMlPhHQZt1AE3N%@}qg=$~@`rQF}cDus?lf28Zv zwC``Jb1)&57@M` zXUT!}+1oQdDbk+*zHBye;7rf!ll84zw|-n_dR!iaxx=<>I-_VhlVr+iS<|LXO|iAZ z1w4p1A^{pacsl4A4?w5<*!b3e%(}~-|9z^~=kgeX^*K1Qqb+;vUVHbIrFQtxp)Hbk zwet@;OKIcAjXR(-N;YQ9m_t2#_O!lz`#QdBU+HuS8n6rUKHvr&m1!Xd!8(lI6C+_O zS=<&fYou?bxxSrKYBuPNz6I@Hx+T8tA=Vd6{@S z!9Kv<_vjIO2K0(J5ZU?shVZA{_CrB+n)9Ecdy+~3apK`P?D>TkUT`$PAM%E>4@X0K zdb-_k!wpWiRFil3!)tgBE#QqE;D30ZJp%eeti4)xfDK?1jEfHz?ngDwxajEURLMPG zd8bk#P0fe8PIa1#AG{6{|9HNFPf1CM!$-0;#TJ|@cJ?57oAk};0BaO{=6--{Xuyxc zQ~dv;MT@MsxY(_M&ph*tdyX7|_*5$2_FJuyBZT`py*FM+Q}?KD{iYuH693@;Wsj4& z#@^_`1-auFq^B;HP@Tm2R@u5+-|#i)H#!IY*a3ElOwbc#Kt7Aled9IbA|vF%dMIsE=tYtLl#k~IiAutE5Q|G?L=9!1Io-Y#9b)Mm_>;q)1Pdw_0q z2iqZT;u@V{Jk|pF%z66hgfrzkxYO!^Pvp7?{vM#g!^vdA7&;T}#zDt;2HL(NV<+rIHovDrYQC zXKez%Q?(!I2F@V-PXcpr_JMO6XWcAEi_7YvLyrW`v!f|Phwjk#bM}|_dgZRayZ)T; zcs0VkzWt*4)XuW^70ruGZ>vHr5IvCm~* z;%Imhrr*U* zWAny#cXbK!;k3KtX83=Ow)O@`@QtzeCn5=&5u{s&Unxg${y5ijK z;NfvW{=~rG!uqrF$$l=UP0g8{yq_N&ekUBLds07l`Y0TzwNc|iAE{AM>!eQj8`+53 zhOkUxSjUxj$<+7n?qh8RJPyPh@Pyd9cvMGM>!UtH9+Ae}0JT@@$JG2H^^vnI)M%*n zQDZ_M6JBtS9`^GS2l@IU>nd@YkKa5lJV9QWTwsCnnbdssT{;^r8;isNee7OIA9a4; z*#u{4(8o%3x)|nHJtbXvh3=7fFgJQmPov&Ojf8q3b*iO*8gR;*;qI@@29>DhDI7S@ zgH2GgrFr^^D~1mro|&DUeIGF>;PD`D0xxLf*Qqs87(kdzV^*{k@MpN0Ad>;5)W`e{%gml81=42!hxJ7xp8VN=p%J5>MPV5 zU5zU||H!+4!BoyBygs%QAJi)3Pi&?-M7q9r*&ScNexLXpy1@k=&~Gm2;j)4*LT3cJKt>13wOy zzHJ*H>*neNDI zU#E4m{kz_os9&RxGe`Y~>8o7-`IB0A3&ShyTJB{c(Wp*`?s*TU|Sb4ei^v zZ=rneaOhASv;SRp-Sx7*zis#5f4|EE>x`t!UHH8ZJcl3fpZy)W06)OLa$`*0w^_Os zxz-*tKNqTt{aSYK_JOSR)McnGDb6yT`*w8u{TFxu@B=(5%Le>dx9}b8OEvt}0^+-q zwHWrGia&N!OQbHL@0l9;S8y0OaG>Km{(yCmvAqs37k|FfUCvUBbX1da z=IK;>su*}B-cRk_yZ1}fc*xP~3;%{JsIO^VcJmMqz<1^ch{=f2e6O|YEAjI|jWt{K zioKeft-z_8(?4vGv54*9H@RGNkX$--4eSFveeMbw;1_rfT;K(M8=ncDt5>g{qKPj& z)#Uoz*}v+y>G~dBcjjhHN{ulqFbM*HyUWsz@toisHY76?!0#{?kF2IilAAF9Nc$u0Cal2^vvnH+X+~DWt z_xcQ90q_8rkM&5uk>Gp-XH}scokL#m8oKex%Dv1z&AHwD%p+gp{C`z=_!wBFJl7uJZuizhE8;h^E;fq(DwqIe>g+BnVS{^{gwG1 z$$w-3;Cu0X$XUM$f&T4PKI~zQLH^`S8{ajFIzzy|PUYWDh}V%AeP_2u#rJh%a>NTk z4MCt9zDMsiET8^n(?4%hxG3tcF5iseJ$1=<;al*fWts z*sQST=Q`J^=Huf4^#kU@*6>RJ`pUDkbZt-A11~EN$=rM4KZn#W#W!I`;7aU(Zf|;i zj!o-d9vk)w@88%zvVUc-z+R=f_DW9*FKVsq$-y1{XWpkrb#XDy0qwzN3TMV(FV5bJ zTm<_*_MJ)EcaozfrUGyL7L7d&d#J^K9B7OY+4jiAlG7oNz@82MAt%-#;vm|}hdQ}< zi~R?C5B5I9y6iVXe}32)gMD~s;eq`0+cU1tjSqtlJJ#pf(wuf~KS}IP+|Is%z0=5y zYuy~Tl$g!ezE)ZQoDAof}Kr**;kEoyR{ zAK+Q9a|=hcx7|CoIDWE+`h6ca|3R-m=pMcvzk@9G-F3%Pd^NJ%zAoPujk(sH=bkm} zdfT{S9@9VTpVxO+T&#xwLT~Wf#9YV&e;SDkuUX6ev-I5Wd$q*xTfEij4Hq4_tt)(&$y?S`?a-m^jY~8zqNL*e$j}lSoce7HN$u6 zO85L*_|91OD{JQklGC%YGWy-scX;fuetk2u+QkhXl-VY$SMQ8|=~(gsc|E&J^ zJaBoh{{7mdXZ5>c$dz%i{n9i0+>?>z|Ep)axGS${9@qYYIzy|&Z$jSmy!m;{^ETvd%PY+* z%i{+g-@s^dqjGbeo?1A)Ft>1i;ex{Dg=-2o6mBZqR=A_Ev~Yi6Sz#bpD;O1w4#o!K zgUy3U!PH>qV7Fk8;GMzV!2!Wx!JOcP;MCysU~X`Ja6xc+a7}PSa8qzwa7VB#7zou0 zMTMe6v7z`-^H5SKHPkuOEz~1)XQ+2*KxkMfCo~~6H8eex8=4jz?OSyN@`vT;L2N;MLGyxc1w9Jx eEa+V@pkP=*PQiqNsRh%;(eK;$9QeP+f&T_bxyt=?|={2,3}|[~!]=)\s*') +MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*') +OR = re.compile(r'^or\b\s*') +AND = re.compile(r'^and\b\s*') +NON_SPACE = re.compile(r'(\S+)\s*') +STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)') + + +def parse_marker(marker_string): + """ + Parse a marker string and return a dictionary containing a marker expression. + + The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in + the expression grammar, or strings. A string contained in quotes is to be + interpreted as a literal string, and a string not contained in quotes is a + variable (such as os_name). + """ + def marker_var(remaining): + # either identifier, or literal string + m = IDENTIFIER.match(remaining) + if m: + result = m.groups()[0] + remaining = remaining[m.end():] + elif not remaining: + raise SyntaxError('unexpected end of input') + else: + q = remaining[0] + if q not in '\'"': + raise SyntaxError('invalid expression: %s' % remaining) + oq = '\'"'.replace(q, '') + remaining = remaining[1:] + parts = [q] + while remaining: + # either a string chunk, or oq, or q to terminate + if remaining[0] == q: + break + elif remaining[0] == oq: + parts.append(oq) + remaining = remaining[1:] + else: + m = STRING_CHUNK.match(remaining) + if not m: + raise SyntaxError('error in string literal: %s' % remaining) + parts.append(m.groups()[0]) + remaining = remaining[m.end():] + else: + s = ''.join(parts) + raise SyntaxError('unterminated string: %s' % s) + parts.append(q) + result = ''.join(parts) + remaining = remaining[1:].lstrip() # skip past closing quote + return result, remaining + + def marker_expr(remaining): + if remaining and remaining[0] == '(': + result, remaining = marker(remaining[1:].lstrip()) + if remaining[0] != ')': + raise SyntaxError('unterminated parenthesis: %s' % remaining) + remaining = remaining[1:].lstrip() + else: + lhs, remaining = marker_var(remaining) + while remaining: + m = MARKER_OP.match(remaining) + if not m: + break + op = m.groups()[0] + remaining = remaining[m.end():] + rhs, remaining = marker_var(remaining) + lhs = {'op': op, 'lhs': lhs, 'rhs': rhs} + result = lhs + return result, remaining + + def marker_and(remaining): + lhs, remaining = marker_expr(remaining) + while remaining: + m = AND.match(remaining) + if not m: + break + remaining = remaining[m.end():] + rhs, remaining = marker_expr(remaining) + lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs} + return lhs, remaining + + def marker(remaining): + lhs, remaining = marker_and(remaining) + while remaining: + m = OR.match(remaining) + if not m: + break + remaining = remaining[m.end():] + rhs, remaining = marker_and(remaining) + lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs} + return lhs, remaining + + return marker(marker_string) + + +def parse_requirement(req): + """ + Parse a requirement passed in as a string. Return a Container + whose attributes contain the various parts of the requirement. + """ + remaining = req.strip() + if not remaining or remaining.startswith('#'): + return None + m = IDENTIFIER.match(remaining) + if not m: + raise SyntaxError('name expected: %s' % remaining) + distname = m.groups()[0] + remaining = remaining[m.end():] + extras = mark_expr = versions = uri = None + if remaining and remaining[0] == '[': + i = remaining.find(']', 1) + if i < 0: + raise SyntaxError('unterminated extra: %s' % remaining) + s = remaining[1:i] + remaining = remaining[i + 1:].lstrip() + extras = [] + while s: + m = IDENTIFIER.match(s) + if not m: + raise SyntaxError('malformed extra: %s' % s) + extras.append(m.groups()[0]) + s = s[m.end():] + if not s: + break + if s[0] != ',': + raise SyntaxError('comma expected in extras: %s' % s) + s = s[1:].lstrip() + if not extras: + extras = None + if remaining: + if remaining[0] == '@': + # it's a URI + remaining = remaining[1:].lstrip() + m = NON_SPACE.match(remaining) + if not m: + raise SyntaxError('invalid URI: %s' % remaining) + uri = m.groups()[0] + t = urlparse(uri) + # there are issues with Python and URL parsing, so this test + # is a bit crude. See bpo-20271, bpo-23505. Python doesn't + # always parse invalid URLs correctly - it should raise + # exceptions for malformed URLs + if not (t.scheme and t.netloc): + raise SyntaxError('Invalid URL: %s' % uri) + remaining = remaining[m.end():].lstrip() + else: + + def get_versions(ver_remaining): + """ + Return a list of operator, version tuples if any are + specified, else None. + """ + m = COMPARE_OP.match(ver_remaining) + versions = None + if m: + versions = [] + while True: + op = m.groups()[0] + ver_remaining = ver_remaining[m.end():] + m = VERSION_IDENTIFIER.match(ver_remaining) + if not m: + raise SyntaxError('invalid version: %s' % ver_remaining) + v = m.groups()[0] + versions.append((op, v)) + ver_remaining = ver_remaining[m.end():] + if not ver_remaining or ver_remaining[0] != ',': + break + ver_remaining = ver_remaining[1:].lstrip() + m = COMPARE_OP.match(ver_remaining) + if not m: + raise SyntaxError('invalid constraint: %s' % ver_remaining) + if not versions: + versions = None + return versions, ver_remaining + + if remaining[0] != '(': + versions, remaining = get_versions(remaining) + else: + i = remaining.find(')', 1) + if i < 0: + raise SyntaxError('unterminated parenthesis: %s' % remaining) + s = remaining[1:i] + remaining = remaining[i + 1:].lstrip() + # As a special diversion from PEP 508, allow a version number + # a.b.c in parentheses as a synonym for ~= a.b.c (because this + # is allowed in earlier PEPs) + if COMPARE_OP.match(s): + versions, _ = get_versions(s) + else: + m = VERSION_IDENTIFIER.match(s) + if not m: + raise SyntaxError('invalid constraint: %s' % s) + v = m.groups()[0] + s = s[m.end():].lstrip() + if s: + raise SyntaxError('invalid constraint: %s' % s) + versions = [('~=', v)] + + if remaining: + if remaining[0] != ';': + raise SyntaxError('invalid requirement: %s' % remaining) + remaining = remaining[1:].lstrip() + + mark_expr, remaining = parse_marker(remaining) + + if remaining and remaining[0] != '#': + raise SyntaxError('unexpected trailing data: %s' % remaining) + + if not versions: + rs = distname + else: + rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions])) + return Container(name=distname, extras=extras, constraints=versions, + marker=mark_expr, url=uri, requirement=rs) + + +def get_resources_dests(resources_root, rules): + """Find destinations for resources files""" + + def get_rel_path(root, path): + # normalizes and returns a lstripped-/-separated path + root = root.replace(os.path.sep, '/') + path = path.replace(os.path.sep, '/') + assert path.startswith(root) + return path[len(root):].lstrip('/') + + destinations = {} + for base, suffix, dest in rules: + prefix = os.path.join(resources_root, base) + for abs_base in iglob(prefix): + abs_glob = os.path.join(abs_base, suffix) + for abs_path in iglob(abs_glob): + resource_file = get_rel_path(resources_root, abs_path) + if dest is None: # remove the entry if it was here + destinations.pop(resource_file, None) + else: + rel_path = get_rel_path(abs_base, abs_path) + rel_dest = dest.replace(os.path.sep, '/').rstrip('/') + destinations[resource_file] = rel_dest + '/' + rel_path + return destinations + + +def in_venv(): + if hasattr(sys, 'real_prefix'): + # virtualenv venvs + result = True + else: + # PEP 405 venvs + result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix) + return result + + +def get_executable(): +# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as +# changes to the stub launcher mean that sys.executable always points +# to the stub on OS X +# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__' +# in os.environ): +# result = os.environ['__PYVENV_LAUNCHER__'] +# else: +# result = sys.executable +# return result + result = os.path.normcase(sys.executable) + if not isinstance(result, text_type): + result = fsdecode(result) + return result + + +def proceed(prompt, allowed_chars, error_prompt=None, default=None): + p = prompt + while True: + s = raw_input(p) + p = prompt + if not s and default: + s = default + if s: + c = s[0].lower() + if c in allowed_chars: + break + if error_prompt: + p = '%c: %s\n%s' % (c, error_prompt, prompt) + return c + + +def extract_by_key(d, keys): + if isinstance(keys, string_types): + keys = keys.split() + result = {} + for key in keys: + if key in d: + result[key] = d[key] + return result + +def read_exports(stream): + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getreader('utf-8')(stream) + # Try to load as JSON, falling back on legacy format + data = stream.read() + stream = StringIO(data) + try: + jdata = json.load(stream) + result = jdata['extensions']['python.exports']['exports'] + for group, entries in result.items(): + for k, v in entries.items(): + s = '%s = %s' % (k, v) + entry = get_export_entry(s) + assert entry is not None + entries[k] = entry + return result + except Exception: + stream.seek(0, 0) + + def read_stream(cp, stream): + if hasattr(cp, 'read_file'): + cp.read_file(stream) + else: + cp.readfp(stream) + + cp = configparser.ConfigParser() + try: + read_stream(cp, stream) + except configparser.MissingSectionHeaderError: + stream.close() + data = textwrap.dedent(data) + stream = StringIO(data) + read_stream(cp, stream) + + result = {} + for key in cp.sections(): + result[key] = entries = {} + for name, value in cp.items(key): + s = '%s = %s' % (name, value) + entry = get_export_entry(s) + assert entry is not None + #entry.dist = self + entries[name] = entry + return result + + +def write_exports(exports, stream): + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getwriter('utf-8')(stream) + cp = configparser.ConfigParser() + for k, v in exports.items(): + # TODO check k, v for valid values + cp.add_section(k) + for entry in v.values(): + if entry.suffix is None: + s = entry.prefix + else: + s = '%s:%s' % (entry.prefix, entry.suffix) + if entry.flags: + s = '%s [%s]' % (s, ', '.join(entry.flags)) + cp.set(k, entry.name, s) + cp.write(stream) + + +@contextlib.contextmanager +def tempdir(): + td = tempfile.mkdtemp() + try: + yield td + finally: + shutil.rmtree(td) + +@contextlib.contextmanager +def chdir(d): + cwd = os.getcwd() + try: + os.chdir(d) + yield + finally: + os.chdir(cwd) + + +@contextlib.contextmanager +def socket_timeout(seconds=15): + cto = socket.getdefaulttimeout() + try: + socket.setdefaulttimeout(seconds) + yield + finally: + socket.setdefaulttimeout(cto) + + +class cached_property(object): + def __init__(self, func): + self.func = func + #for attr in ('__name__', '__module__', '__doc__'): + # setattr(self, attr, getattr(func, attr, None)) + + def __get__(self, obj, cls=None): + if obj is None: + return self + value = self.func(obj) + object.__setattr__(obj, self.func.__name__, value) + #obj.__dict__[self.func.__name__] = value = self.func(obj) + return value + +def convert_path(pathname): + """Return 'pathname' as a name that will work on the native filesystem. + + The path is split on '/' and put back together again using the current + directory separator. Needed because filenames in the setup script are + always supplied in Unix style, and have to be converted to the local + convention before we can actually use them in the filesystem. Raises + ValueError on non-Unix-ish systems if 'pathname' either starts or + ends with a slash. + """ + if os.sep == '/': + return pathname + if not pathname: + return pathname + if pathname[0] == '/': + raise ValueError("path '%s' cannot be absolute" % pathname) + if pathname[-1] == '/': + raise ValueError("path '%s' cannot end with '/'" % pathname) + + paths = pathname.split('/') + while os.curdir in paths: + paths.remove(os.curdir) + if not paths: + return os.curdir + return os.path.join(*paths) + + +class FileOperator(object): + def __init__(self, dry_run=False): + self.dry_run = dry_run + self.ensured = set() + self._init_record() + + def _init_record(self): + self.record = False + self.files_written = set() + self.dirs_created = set() + + def record_as_written(self, path): + if self.record: + self.files_written.add(path) + + def newer(self, source, target): + """Tell if the target is newer than the source. + + Returns true if 'source' exists and is more recently modified than + 'target', or if 'source' exists and 'target' doesn't. + + Returns false if both exist and 'target' is the same age or younger + than 'source'. Raise PackagingFileError if 'source' does not exist. + + Note that this test is not very accurate: files created in the same + second will have the same "age". + """ + if not os.path.exists(source): + raise DistlibException("file '%r' does not exist" % + os.path.abspath(source)) + if not os.path.exists(target): + return True + + return os.stat(source).st_mtime > os.stat(target).st_mtime + + def copy_file(self, infile, outfile, check=True): + """Copy a file respecting dry-run and force flags. + """ + self.ensure_dir(os.path.dirname(outfile)) + logger.info('Copying %s to %s', infile, outfile) + if not self.dry_run: + msg = None + if check: + if os.path.islink(outfile): + msg = '%s is a symlink' % outfile + elif os.path.exists(outfile) and not os.path.isfile(outfile): + msg = '%s is a non-regular file' % outfile + if msg: + raise ValueError(msg + ' which would be overwritten') + shutil.copyfile(infile, outfile) + self.record_as_written(outfile) + + def copy_stream(self, instream, outfile, encoding=None): + assert not os.path.isdir(outfile) + self.ensure_dir(os.path.dirname(outfile)) + logger.info('Copying stream %s to %s', instream, outfile) + if not self.dry_run: + if encoding is None: + outstream = open(outfile, 'wb') + else: + outstream = codecs.open(outfile, 'w', encoding=encoding) + try: + shutil.copyfileobj(instream, outstream) + finally: + outstream.close() + self.record_as_written(outfile) + + def write_binary_file(self, path, data): + self.ensure_dir(os.path.dirname(path)) + if not self.dry_run: + with open(path, 'wb') as f: + f.write(data) + self.record_as_written(path) + + def write_text_file(self, path, data, encoding): + self.ensure_dir(os.path.dirname(path)) + if not self.dry_run: + with open(path, 'wb') as f: + f.write(data.encode(encoding)) + self.record_as_written(path) + + def set_mode(self, bits, mask, files): + if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'): + # Set the executable bits (owner, group, and world) on + # all the files specified. + for f in files: + if self.dry_run: + logger.info("changing mode of %s", f) + else: + mode = (os.stat(f).st_mode | bits) & mask + logger.info("changing mode of %s to %o", f, mode) + os.chmod(f, mode) + + set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f) + + def ensure_dir(self, path): + path = os.path.abspath(path) + if path not in self.ensured and not os.path.exists(path): + self.ensured.add(path) + d, f = os.path.split(path) + self.ensure_dir(d) + logger.info('Creating %s' % path) + if not self.dry_run: + os.mkdir(path) + if self.record: + self.dirs_created.add(path) + + def byte_compile(self, path, optimize=False, force=False, prefix=None): + dpath = cache_from_source(path, not optimize) + logger.info('Byte-compiling %s to %s', path, dpath) + if not self.dry_run: + if force or self.newer(path, dpath): + if not prefix: + diagpath = None + else: + assert path.startswith(prefix) + diagpath = path[len(prefix):] + py_compile.compile(path, dpath, diagpath, True) # raise error + self.record_as_written(dpath) + return dpath + + def ensure_removed(self, path): + if os.path.exists(path): + if os.path.isdir(path) and not os.path.islink(path): + logger.debug('Removing directory tree at %s', path) + if not self.dry_run: + shutil.rmtree(path) + if self.record: + if path in self.dirs_created: + self.dirs_created.remove(path) + else: + if os.path.islink(path): + s = 'link' + else: + s = 'file' + logger.debug('Removing %s %s', s, path) + if not self.dry_run: + os.remove(path) + if self.record: + if path in self.files_written: + self.files_written.remove(path) + + def is_writable(self, path): + result = False + while not result: + if os.path.exists(path): + result = os.access(path, os.W_OK) + break + parent = os.path.dirname(path) + if parent == path: + break + path = parent + return result + + def commit(self): + """ + Commit recorded changes, turn off recording, return + changes. + """ + assert self.record + result = self.files_written, self.dirs_created + self._init_record() + return result + + def rollback(self): + if not self.dry_run: + for f in list(self.files_written): + if os.path.exists(f): + os.remove(f) + # dirs should all be empty now, except perhaps for + # __pycache__ subdirs + # reverse so that subdirs appear before their parents + dirs = sorted(self.dirs_created, reverse=True) + for d in dirs: + flist = os.listdir(d) + if flist: + assert flist == ['__pycache__'] + sd = os.path.join(d, flist[0]) + os.rmdir(sd) + os.rmdir(d) # should fail if non-empty + self._init_record() + +def resolve(module_name, dotted_path): + if module_name in sys.modules: + mod = sys.modules[module_name] + else: + mod = __import__(module_name) + if dotted_path is None: + result = mod + else: + parts = dotted_path.split('.') + result = getattr(mod, parts.pop(0)) + for p in parts: + result = getattr(result, p) + return result + + +class ExportEntry(object): + def __init__(self, name, prefix, suffix, flags): + self.name = name + self.prefix = prefix + self.suffix = suffix + self.flags = flags + + @cached_property + def value(self): + return resolve(self.prefix, self.suffix) + + def __repr__(self): # pragma: no cover + return '' % (self.name, self.prefix, + self.suffix, self.flags) + + def __eq__(self, other): + if not isinstance(other, ExportEntry): + result = False + else: + result = (self.name == other.name and + self.prefix == other.prefix and + self.suffix == other.suffix and + self.flags == other.flags) + return result + + __hash__ = object.__hash__ + + +ENTRY_RE = re.compile(r'''(?P(\w|[-.+])+) + \s*=\s*(?P(\w+)([:\.]\w+)*) + \s*(\[\s*(?P\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? + ''', re.VERBOSE) + +def get_export_entry(specification): + m = ENTRY_RE.search(specification) + if not m: + result = None + if '[' in specification or ']' in specification: + raise DistlibException("Invalid specification " + "'%s'" % specification) + else: + d = m.groupdict() + name = d['name'] + path = d['callable'] + colons = path.count(':') + if colons == 0: + prefix, suffix = path, None + else: + if colons != 1: + raise DistlibException("Invalid specification " + "'%s'" % specification) + prefix, suffix = path.split(':') + flags = d['flags'] + if flags is None: + if '[' in specification or ']' in specification: + raise DistlibException("Invalid specification " + "'%s'" % specification) + flags = [] + else: + flags = [f.strip() for f in flags.split(',')] + result = ExportEntry(name, prefix, suffix, flags) + return result + + +def get_cache_base(suffix=None): + """ + Return the default base location for distlib caches. If the directory does + not exist, it is created. Use the suffix provided for the base directory, + and default to '.distlib' if it isn't provided. + + On Windows, if LOCALAPPDATA is defined in the environment, then it is + assumed to be a directory, and will be the parent directory of the result. + On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home + directory - using os.expanduser('~') - will be the parent directory of + the result. + + The result is just the directory '.distlib' in the parent directory as + determined above, or with the name specified with ``suffix``. + """ + if suffix is None: + suffix = '.distlib' + if os.name == 'nt' and 'LOCALAPPDATA' in os.environ: + result = os.path.expandvars('$localappdata') + else: + # Assume posix, or old Windows + result = os.path.expanduser('~') + # we use 'isdir' instead of 'exists', because we want to + # fail if there's a file with that name + if os.path.isdir(result): + usable = os.access(result, os.W_OK) + if not usable: + logger.warning('Directory exists but is not writable: %s', result) + else: + try: + os.makedirs(result) + usable = True + except OSError: + logger.warning('Unable to create %s', result, exc_info=True) + usable = False + if not usable: + result = tempfile.mkdtemp() + logger.warning('Default location unusable, using %s', result) + return os.path.join(result, suffix) + + +def path_to_cache_dir(path): + """ + Convert an absolute path to a directory name for use in a cache. + + The algorithm used is: + + #. On Windows, any ``':'`` in the drive is replaced with ``'---'``. + #. Any occurrence of ``os.sep`` is replaced with ``'--'``. + #. ``'.cache'`` is appended. + """ + d, p = os.path.splitdrive(os.path.abspath(path)) + if d: + d = d.replace(':', '---') + p = p.replace(os.sep, '--') + return d + p + '.cache' + + +def ensure_slash(s): + if not s.endswith('/'): + return s + '/' + return s + + +def parse_credentials(netloc): + username = password = None + if '@' in netloc: + prefix, netloc = netloc.split('@', 1) + if ':' not in prefix: + username = prefix + else: + username, password = prefix.split(':', 1) + return username, password, netloc + + +def get_process_umask(): + result = os.umask(0o22) + os.umask(result) + return result + +def is_string_sequence(seq): + result = True + i = None + for i, s in enumerate(seq): + if not isinstance(s, string_types): + result = False + break + assert i is not None + return result + +PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-' + '([a-z0-9_.+-]+)', re.I) +PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)') + + +def split_filename(filename, project_name=None): + """ + Extract name, version, python version from a filename (no extension) + + Return name, version, pyver or None + """ + result = None + pyver = None + filename = unquote(filename).replace(' ', '-') + m = PYTHON_VERSION.search(filename) + if m: + pyver = m.group(1) + filename = filename[:m.start()] + if project_name and len(filename) > len(project_name) + 1: + m = re.match(re.escape(project_name) + r'\b', filename) + if m: + n = m.end() + result = filename[:n], filename[n + 1:], pyver + if result is None: + m = PROJECT_NAME_AND_VERSION.match(filename) + if m: + result = m.group(1), m.group(3), pyver + return result + +# Allow spaces in name because of legacy dists like "Twisted Core" +NAME_VERSION_RE = re.compile(r'(?P[\w .-]+)\s*' + r'\(\s*(?P[^\s)]+)\)$') + +def parse_name_and_version(p): + """ + A utility method used to get name and version from a string. + + From e.g. a Provides-Dist value. + + :param p: A value in a form 'foo (1.0)' + :return: The name and version as a tuple. + """ + m = NAME_VERSION_RE.match(p) + if not m: + raise DistlibException('Ill-formed name/version string: \'%s\'' % p) + d = m.groupdict() + return d['name'].strip().lower(), d['ver'] + +def get_extras(requested, available): + result = set() + requested = set(requested or []) + available = set(available or []) + if '*' in requested: + requested.remove('*') + result |= available + for r in requested: + if r == '-': + result.add(r) + elif r.startswith('-'): + unwanted = r[1:] + if unwanted not in available: + logger.warning('undeclared extra: %s' % unwanted) + if unwanted in result: + result.remove(unwanted) + else: + if r not in available: + logger.warning('undeclared extra: %s' % r) + result.add(r) + return result +# +# Extended metadata functionality +# + +def _get_external_data(url): + result = {} + try: + # urlopen might fail if it runs into redirections, + # because of Python issue #13696. Fixed in locators + # using a custom redirect handler. + resp = urlopen(url) + headers = resp.info() + ct = headers.get('Content-Type') + if not ct.startswith('application/json'): + logger.debug('Unexpected response for JSON request: %s', ct) + else: + reader = codecs.getreader('utf-8')(resp) + #data = reader.read().decode('utf-8') + #result = json.loads(data) + result = json.load(reader) + except Exception as e: + logger.exception('Failed to get external data for %s: %s', url, e) + return result + +_external_data_base_url = 'https://www.red-dove.com/pypi/projects/' + +def get_project_data(name): + url = '%s/%s/project.json' % (name[0].upper(), name) + url = urljoin(_external_data_base_url, url) + result = _get_external_data(url) + return result + +def get_package_data(name, version): + url = '%s/%s/package-%s.json' % (name[0].upper(), name, version) + url = urljoin(_external_data_base_url, url) + return _get_external_data(url) + + +class Cache(object): + """ + A class implementing a cache for resources that need to live in the file system + e.g. shared libraries. This class was moved from resources to here because it + could be used by other modules, e.g. the wheel module. + """ + + def __init__(self, base): + """ + Initialise an instance. + + :param base: The base directory where the cache should be located. + """ + # we use 'isdir' instead of 'exists', because we want to + # fail if there's a file with that name + if not os.path.isdir(base): # pragma: no cover + os.makedirs(base) + if (os.stat(base).st_mode & 0o77) != 0: + logger.warning('Directory \'%s\' is not private', base) + self.base = os.path.abspath(os.path.normpath(base)) + + def prefix_to_dir(self, prefix): + """ + Converts a resource prefix to a directory name in the cache. + """ + return path_to_cache_dir(prefix) + + def clear(self): + """ + Clear the cache. + """ + not_removed = [] + for fn in os.listdir(self.base): + fn = os.path.join(self.base, fn) + try: + if os.path.islink(fn) or os.path.isfile(fn): + os.remove(fn) + elif os.path.isdir(fn): + shutil.rmtree(fn) + except Exception: + not_removed.append(fn) + return not_removed + + +class EventMixin(object): + """ + A very simple publish/subscribe system. + """ + def __init__(self): + self._subscribers = {} + + def add(self, event, subscriber, append=True): + """ + Add a subscriber for an event. + + :param event: The name of an event. + :param subscriber: The subscriber to be added (and called when the + event is published). + :param append: Whether to append or prepend the subscriber to an + existing subscriber list for the event. + """ + subs = self._subscribers + if event not in subs: + subs[event] = deque([subscriber]) + else: + sq = subs[event] + if append: + sq.append(subscriber) + else: + sq.appendleft(subscriber) + + def remove(self, event, subscriber): + """ + Remove a subscriber for an event. + + :param event: The name of an event. + :param subscriber: The subscriber to be removed. + """ + subs = self._subscribers + if event not in subs: + raise ValueError('No subscribers: %r' % event) + subs[event].remove(subscriber) + + def get_subscribers(self, event): + """ + Return an iterator for the subscribers for an event. + :param event: The event to return subscribers for. + """ + return iter(self._subscribers.get(event, ())) + + def publish(self, event, *args, **kwargs): + """ + Publish a event and return a list of values returned by its + subscribers. + + :param event: The event to publish. + :param args: The positional arguments to pass to the event's + subscribers. + :param kwargs: The keyword arguments to pass to the event's + subscribers. + """ + result = [] + for subscriber in self.get_subscribers(event): + try: + value = subscriber(event, *args, **kwargs) + except Exception: + logger.exception('Exception during event publication') + value = None + result.append(value) + logger.debug('publish %s: args = %s, kwargs = %s, result = %s', + event, args, kwargs, result) + return result + +# +# Simple sequencing +# +class Sequencer(object): + def __init__(self): + self._preds = {} + self._succs = {} + self._nodes = set() # nodes with no preds/succs + + def add_node(self, node): + self._nodes.add(node) + + def remove_node(self, node, edges=False): + if node in self._nodes: + self._nodes.remove(node) + if edges: + for p in set(self._preds.get(node, ())): + self.remove(p, node) + for s in set(self._succs.get(node, ())): + self.remove(node, s) + # Remove empties + for k, v in list(self._preds.items()): + if not v: + del self._preds[k] + for k, v in list(self._succs.items()): + if not v: + del self._succs[k] + + def add(self, pred, succ): + assert pred != succ + self._preds.setdefault(succ, set()).add(pred) + self._succs.setdefault(pred, set()).add(succ) + + def remove(self, pred, succ): + assert pred != succ + try: + preds = self._preds[succ] + succs = self._succs[pred] + except KeyError: # pragma: no cover + raise ValueError('%r not a successor of anything' % succ) + try: + preds.remove(pred) + succs.remove(succ) + except KeyError: # pragma: no cover + raise ValueError('%r not a successor of %r' % (succ, pred)) + + def is_step(self, step): + return (step in self._preds or step in self._succs or + step in self._nodes) + + def get_steps(self, final): + if not self.is_step(final): + raise ValueError('Unknown: %r' % final) + result = [] + todo = [] + seen = set() + todo.append(final) + while todo: + step = todo.pop(0) + if step in seen: + # if a step was already seen, + # move it to the end (so it will appear earlier + # when reversed on return) ... but not for the + # final step, as that would be confusing for + # users + if step != final: + result.remove(step) + result.append(step) + else: + seen.add(step) + result.append(step) + preds = self._preds.get(step, ()) + todo.extend(preds) + return reversed(result) + + @property + def strong_connections(self): + #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm + index_counter = [0] + stack = [] + lowlinks = {} + index = {} + result = [] + + graph = self._succs + + def strongconnect(node): + # set the depth index for this node to the smallest unused index + index[node] = index_counter[0] + lowlinks[node] = index_counter[0] + index_counter[0] += 1 + stack.append(node) + + # Consider successors + try: + successors = graph[node] + except Exception: + successors = [] + for successor in successors: + if successor not in lowlinks: + # Successor has not yet been visited + strongconnect(successor) + lowlinks[node] = min(lowlinks[node],lowlinks[successor]) + elif successor in stack: + # the successor is in the stack and hence in the current + # strongly connected component (SCC) + lowlinks[node] = min(lowlinks[node],index[successor]) + + # If `node` is a root node, pop the stack and generate an SCC + if lowlinks[node] == index[node]: + connected_component = [] + + while True: + successor = stack.pop() + connected_component.append(successor) + if successor == node: break + component = tuple(connected_component) + # storing the result + result.append(component) + + for node in graph: + if node not in lowlinks: + strongconnect(node) + + return result + + @property + def dot(self): + result = ['digraph G {'] + for succ in self._preds: + preds = self._preds[succ] + for pred in preds: + result.append(' %s -> %s;' % (pred, succ)) + for node in self._nodes: + result.append(' %s;' % node) + result.append('}') + return '\n'.join(result) + +# +# Unarchiving functionality for zip, tar, tgz, tbz, whl +# + +ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip', + '.tgz', '.tbz', '.whl') + +def unarchive(archive_filename, dest_dir, format=None, check=True): + + def check_path(path): + if not isinstance(path, text_type): + path = path.decode('utf-8') + p = os.path.abspath(os.path.join(dest_dir, path)) + if not p.startswith(dest_dir) or p[plen] != os.sep: + raise ValueError('path outside destination: %r' % p) + + dest_dir = os.path.abspath(dest_dir) + plen = len(dest_dir) + archive = None + if format is None: + if archive_filename.endswith(('.zip', '.whl')): + format = 'zip' + elif archive_filename.endswith(('.tar.gz', '.tgz')): + format = 'tgz' + mode = 'r:gz' + elif archive_filename.endswith(('.tar.bz2', '.tbz')): + format = 'tbz' + mode = 'r:bz2' + elif archive_filename.endswith('.tar'): + format = 'tar' + mode = 'r' + else: # pragma: no cover + raise ValueError('Unknown format for %r' % archive_filename) + try: + if format == 'zip': + archive = ZipFile(archive_filename, 'r') + if check: + names = archive.namelist() + for name in names: + check_path(name) + else: + archive = tarfile.open(archive_filename, mode) + if check: + names = archive.getnames() + for name in names: + check_path(name) + if format != 'zip' and sys.version_info[0] < 3: + # See Python issue 17153. If the dest path contains Unicode, + # tarfile extraction fails on Python 2.x if a member path name + # contains non-ASCII characters - it leads to an implicit + # bytes -> unicode conversion using ASCII to decode. + for tarinfo in archive.getmembers(): + if not isinstance(tarinfo.name, text_type): + tarinfo.name = tarinfo.name.decode('utf-8') + archive.extractall(dest_dir) + + finally: + if archive: + archive.close() + + +def zip_dir(directory): + """zip a directory tree into a BytesIO object""" + result = io.BytesIO() + dlen = len(directory) + with ZipFile(result, "w") as zf: + for root, dirs, files in os.walk(directory): + for name in files: + full = os.path.join(root, name) + rel = root[dlen:] + dest = os.path.join(rel, name) + zf.write(full, dest) + return result + +# +# Simple progress bar +# + +UNITS = ('', 'K', 'M', 'G','T','P') + + +class Progress(object): + unknown = 'UNKNOWN' + + def __init__(self, minval=0, maxval=100): + assert maxval is None or maxval >= minval + self.min = self.cur = minval + self.max = maxval + self.started = None + self.elapsed = 0 + self.done = False + + def update(self, curval): + assert self.min <= curval + assert self.max is None or curval <= self.max + self.cur = curval + now = time.time() + if self.started is None: + self.started = now + else: + self.elapsed = now - self.started + + def increment(self, incr): + assert incr >= 0 + self.update(self.cur + incr) + + def start(self): + self.update(self.min) + return self + + def stop(self): + if self.max is not None: + self.update(self.max) + self.done = True + + @property + def maximum(self): + return self.unknown if self.max is None else self.max + + @property + def percentage(self): + if self.done: + result = '100 %' + elif self.max is None: + result = ' ?? %' + else: + v = 100.0 * (self.cur - self.min) / (self.max - self.min) + result = '%3d %%' % v + return result + + def format_duration(self, duration): + if (duration <= 0) and self.max is None or self.cur == self.min: + result = '??:??:??' + #elif duration < 1: + # result = '--:--:--' + else: + result = time.strftime('%H:%M:%S', time.gmtime(duration)) + return result + + @property + def ETA(self): + if self.done: + prefix = 'Done' + t = self.elapsed + #import pdb; pdb.set_trace() + else: + prefix = 'ETA ' + if self.max is None: + t = -1 + elif self.elapsed == 0 or (self.cur == self.min): + t = 0 + else: + #import pdb; pdb.set_trace() + t = float(self.max - self.min) + t /= self.cur - self.min + t = (t - 1) * self.elapsed + return '%s: %s' % (prefix, self.format_duration(t)) + + @property + def speed(self): + if self.elapsed == 0: + result = 0.0 + else: + result = (self.cur - self.min) / self.elapsed + for unit in UNITS: + if result < 1000: + break + result /= 1000.0 + return '%d %sB/s' % (result, unit) + +# +# Glob functionality +# + +RICH_GLOB = re.compile(r'\{([^}]*)\}') +_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]') +_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$') + + +def iglob(path_glob): + """Extended globbing function that supports ** and {opt1,opt2,opt3}.""" + if _CHECK_RECURSIVE_GLOB.search(path_glob): + msg = """invalid glob %r: recursive glob "**" must be used alone""" + raise ValueError(msg % path_glob) + if _CHECK_MISMATCH_SET.search(path_glob): + msg = """invalid glob %r: mismatching set marker '{' or '}'""" + raise ValueError(msg % path_glob) + return _iglob(path_glob) + + +def _iglob(path_glob): + rich_path_glob = RICH_GLOB.split(path_glob, 1) + if len(rich_path_glob) > 1: + assert len(rich_path_glob) == 3, rich_path_glob + prefix, set, suffix = rich_path_glob + for item in set.split(','): + for path in _iglob(''.join((prefix, item, suffix))): + yield path + else: + if '**' not in path_glob: + for item in std_iglob(path_glob): + yield item + else: + prefix, radical = path_glob.split('**', 1) + if prefix == '': + prefix = '.' + if radical == '': + radical = '*' + else: + # we support both + radical = radical.lstrip('/') + radical = radical.lstrip('\\') + for path, dir, files in os.walk(prefix): + path = os.path.normpath(path) + for fn in _iglob(os.path.join(path, radical)): + yield fn + +if ssl: + from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname, + CertificateError) + + +# +# HTTPSConnection which verifies certificates/matches domains +# + + class HTTPSConnection(httplib.HTTPSConnection): + ca_certs = None # set this to the path to the certs file (.pem) + check_domain = True # only used if ca_certs is not None + + # noinspection PyPropertyAccess + def connect(self): + sock = socket.create_connection((self.host, self.port), self.timeout) + if getattr(self, '_tunnel_host', False): + self.sock = sock + self._tunnel() + + if not hasattr(ssl, 'SSLContext'): + # For 2.x + if self.ca_certs: + cert_reqs = ssl.CERT_REQUIRED + else: + cert_reqs = ssl.CERT_NONE + self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, + cert_reqs=cert_reqs, + ssl_version=ssl.PROTOCOL_SSLv23, + ca_certs=self.ca_certs) + else: # pragma: no cover + context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + context.options |= ssl.OP_NO_SSLv2 + if self.cert_file: + context.load_cert_chain(self.cert_file, self.key_file) + kwargs = {} + if self.ca_certs: + context.verify_mode = ssl.CERT_REQUIRED + context.load_verify_locations(cafile=self.ca_certs) + if getattr(ssl, 'HAS_SNI', False): + kwargs['server_hostname'] = self.host + self.sock = context.wrap_socket(sock, **kwargs) + if self.ca_certs and self.check_domain: + try: + match_hostname(self.sock.getpeercert(), self.host) + logger.debug('Host verified: %s', self.host) + except CertificateError: # pragma: no cover + self.sock.shutdown(socket.SHUT_RDWR) + self.sock.close() + raise + + class HTTPSHandler(BaseHTTPSHandler): + def __init__(self, ca_certs, check_domain=True): + BaseHTTPSHandler.__init__(self) + self.ca_certs = ca_certs + self.check_domain = check_domain + + def _conn_maker(self, *args, **kwargs): + """ + This is called to create a connection instance. Normally you'd + pass a connection class to do_open, but it doesn't actually check for + a class, and just expects a callable. As long as we behave just as a + constructor would have, we should be OK. If it ever changes so that + we *must* pass a class, we'll create an UnsafeHTTPSConnection class + which just sets check_domain to False in the class definition, and + choose which one to pass to do_open. + """ + result = HTTPSConnection(*args, **kwargs) + if self.ca_certs: + result.ca_certs = self.ca_certs + result.check_domain = self.check_domain + return result + + def https_open(self, req): + try: + return self.do_open(self._conn_maker, req) + except URLError as e: + if 'certificate verify failed' in str(e.reason): + raise CertificateError('Unable to verify server certificate ' + 'for %s' % req.host) + else: + raise + + # + # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The- + # Middle proxy using HTTP listens on port 443, or an index mistakenly serves + # HTML containing a http://xyz link when it should be https://xyz), + # you can use the following handler class, which does not allow HTTP traffic. + # + # It works by inheriting from HTTPHandler - so build_opener won't add a + # handler for HTTP itself. + # + class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler): + def http_open(self, req): + raise URLError('Unexpected HTTP request on what should be a secure ' + 'connection: %s' % req) + +# +# XML-RPC with timeouts +# + +_ver_info = sys.version_info[:2] + +if _ver_info == (2, 6): + class HTTP(httplib.HTTP): + def __init__(self, host='', port=None, **kwargs): + if port == 0: # 0 means use port 0, not the default port + port = None + self._setup(self._connection_class(host, port, **kwargs)) + + + if ssl: + class HTTPS(httplib.HTTPS): + def __init__(self, host='', port=None, **kwargs): + if port == 0: # 0 means use port 0, not the default port + port = None + self._setup(self._connection_class(host, port, **kwargs)) + + +class Transport(xmlrpclib.Transport): + def __init__(self, timeout, use_datetime=0): + self.timeout = timeout + xmlrpclib.Transport.__init__(self, use_datetime) + + def make_connection(self, host): + h, eh, x509 = self.get_host_info(host) + if _ver_info == (2, 6): + result = HTTP(h, timeout=self.timeout) + else: + if not self._connection or host != self._connection[0]: + self._extra_headers = eh + self._connection = host, httplib.HTTPConnection(h) + result = self._connection[1] + return result + +if ssl: + class SafeTransport(xmlrpclib.SafeTransport): + def __init__(self, timeout, use_datetime=0): + self.timeout = timeout + xmlrpclib.SafeTransport.__init__(self, use_datetime) + + def make_connection(self, host): + h, eh, kwargs = self.get_host_info(host) + if not kwargs: + kwargs = {} + kwargs['timeout'] = self.timeout + if _ver_info == (2, 6): + result = HTTPS(host, None, **kwargs) + else: + if not self._connection or host != self._connection[0]: + self._extra_headers = eh + self._connection = host, httplib.HTTPSConnection(h, None, + **kwargs) + result = self._connection[1] + return result + + +class ServerProxy(xmlrpclib.ServerProxy): + def __init__(self, uri, **kwargs): + self.timeout = timeout = kwargs.pop('timeout', None) + # The above classes only come into play if a timeout + # is specified + if timeout is not None: + scheme, _ = splittype(uri) + use_datetime = kwargs.get('use_datetime', 0) + if scheme == 'https': + tcls = SafeTransport + else: + tcls = Transport + kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime) + self.transport = t + xmlrpclib.ServerProxy.__init__(self, uri, **kwargs) + +# +# CSV functionality. This is provided because on 2.x, the csv module can't +# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files. +# + +def _csv_open(fn, mode, **kwargs): + if sys.version_info[0] < 3: + mode += 'b' + else: + kwargs['newline'] = '' + # Python 3 determines encoding from locale. Force 'utf-8' + # file encoding to match other forced utf-8 encoding + kwargs['encoding'] = 'utf-8' + return open(fn, mode, **kwargs) + + +class CSVBase(object): + defaults = { + 'delimiter': str(','), # The strs are used because we need native + 'quotechar': str('"'), # str in the csv API (2.x won't take + 'lineterminator': str('\n') # Unicode) + } + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.stream.close() + + +class CSVReader(CSVBase): + def __init__(self, **kwargs): + if 'stream' in kwargs: + stream = kwargs['stream'] + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getreader('utf-8')(stream) + self.stream = stream + else: + self.stream = _csv_open(kwargs['path'], 'r') + self.reader = csv.reader(self.stream, **self.defaults) + + def __iter__(self): + return self + + def next(self): + result = next(self.reader) + if sys.version_info[0] < 3: + for i, item in enumerate(result): + if not isinstance(item, text_type): + result[i] = item.decode('utf-8') + return result + + __next__ = next + +class CSVWriter(CSVBase): + def __init__(self, fn, **kwargs): + self.stream = _csv_open(fn, 'w') + self.writer = csv.writer(self.stream, **self.defaults) + + def writerow(self, row): + if sys.version_info[0] < 3: + r = [] + for item in row: + if isinstance(item, text_type): + item = item.encode('utf-8') + r.append(item) + row = r + self.writer.writerow(row) + +# +# Configurator functionality +# + +class Configurator(BaseConfigurator): + + value_converters = dict(BaseConfigurator.value_converters) + value_converters['inc'] = 'inc_convert' + + def __init__(self, config, base=None): + super(Configurator, self).__init__(config) + self.base = base or os.getcwd() + + def configure_custom(self, config): + def convert(o): + if isinstance(o, (list, tuple)): + result = type(o)([convert(i) for i in o]) + elif isinstance(o, dict): + if '()' in o: + result = self.configure_custom(o) + else: + result = {} + for k in o: + result[k] = convert(o[k]) + else: + result = self.convert(o) + return result + + c = config.pop('()') + if not callable(c): + c = self.resolve(c) + props = config.pop('.', None) + # Check for valid identifiers + args = config.pop('[]', ()) + if args: + args = tuple([convert(o) for o in args]) + items = [(k, convert(config[k])) for k in config if valid_ident(k)] + kwargs = dict(items) + result = c(*args, **kwargs) + if props: + for n, v in props.items(): + setattr(result, n, convert(v)) + return result + + def __getitem__(self, key): + result = self.config[key] + if isinstance(result, dict) and '()' in result: + self.config[key] = result = self.configure_custom(result) + return result + + def inc_convert(self, value): + """Default converter for the inc:// protocol.""" + if not os.path.isabs(value): + value = os.path.join(self.base, value) + with codecs.open(value, 'r', encoding='utf-8') as f: + result = json.load(f) + return result + + +class SubprocessMixin(object): + """ + Mixin for running subprocesses and capturing their output + """ + def __init__(self, verbose=False, progress=None): + self.verbose = verbose + self.progress = progress + + def reader(self, stream, context): + """ + Read lines from a subprocess' output stream and either pass to a progress + callable (if specified) or write progress information to sys.stderr. + """ + progress = self.progress + verbose = self.verbose + while True: + s = stream.readline() + if not s: + break + if progress is not None: + progress(s, context) + else: + if not verbose: + sys.stderr.write('.') + else: + sys.stderr.write(s.decode('utf-8')) + sys.stderr.flush() + stream.close() + + def run_command(self, cmd, **kwargs): + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, **kwargs) + t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout')) + t1.start() + t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr')) + t2.start() + p.wait() + t1.join() + t2.join() + if self.progress is not None: + self.progress('done.', 'main') + elif self.verbose: + sys.stderr.write('done.\n') + return p + + +def normalize_name(name): + """Normalize a python package name a la PEP 503""" + # https://www.python.org/dev/peps/pep-0503/#normalized-names + return re.sub('[-_.]+', '-', name).lower() diff --git a/pipenv/vendor/distlib/version.py b/pipenv/vendor/distlib/version.py new file mode 100644 index 0000000000..3eebe18ee8 --- /dev/null +++ b/pipenv/vendor/distlib/version.py @@ -0,0 +1,736 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2017 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +""" +Implementation of a flexible versioning scheme providing support for PEP-440, +setuptools-compatible and semantic versioning. +""" + +import logging +import re + +from .compat import string_types +from .util import parse_requirement + +__all__ = ['NormalizedVersion', 'NormalizedMatcher', + 'LegacyVersion', 'LegacyMatcher', + 'SemanticVersion', 'SemanticMatcher', + 'UnsupportedVersionError', 'get_scheme'] + +logger = logging.getLogger(__name__) + + +class UnsupportedVersionError(ValueError): + """This is an unsupported version.""" + pass + + +class Version(object): + def __init__(self, s): + self._string = s = s.strip() + self._parts = parts = self.parse(s) + assert isinstance(parts, tuple) + assert len(parts) > 0 + + def parse(self, s): + raise NotImplementedError('please implement in a subclass') + + def _check_compatible(self, other): + if type(self) != type(other): + raise TypeError('cannot compare %r and %r' % (self, other)) + + def __eq__(self, other): + self._check_compatible(other) + return self._parts == other._parts + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + self._check_compatible(other) + return self._parts < other._parts + + def __gt__(self, other): + return not (self.__lt__(other) or self.__eq__(other)) + + def __le__(self, other): + return self.__lt__(other) or self.__eq__(other) + + def __ge__(self, other): + return self.__gt__(other) or self.__eq__(other) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + def __hash__(self): + return hash(self._parts) + + def __repr__(self): + return "%s('%s')" % (self.__class__.__name__, self._string) + + def __str__(self): + return self._string + + @property + def is_prerelease(self): + raise NotImplementedError('Please implement in subclasses.') + + +class Matcher(object): + version_class = None + + # value is either a callable or the name of a method + _operators = { + '<': lambda v, c, p: v < c, + '>': lambda v, c, p: v > c, + '<=': lambda v, c, p: v == c or v < c, + '>=': lambda v, c, p: v == c or v > c, + '==': lambda v, c, p: v == c, + '===': lambda v, c, p: v == c, + # by default, compatible => >=. + '~=': lambda v, c, p: v == c or v > c, + '!=': lambda v, c, p: v != c, + } + + # this is a method only to support alternative implementations + # via overriding + def parse_requirement(self, s): + return parse_requirement(s) + + def __init__(self, s): + if self.version_class is None: + raise ValueError('Please specify a version class') + self._string = s = s.strip() + r = self.parse_requirement(s) + if not r: + raise ValueError('Not valid: %r' % s) + self.name = r.name + self.key = self.name.lower() # for case-insensitive comparisons + clist = [] + if r.constraints: + # import pdb; pdb.set_trace() + for op, s in r.constraints: + if s.endswith('.*'): + if op not in ('==', '!='): + raise ValueError('\'.*\' not allowed for ' + '%r constraints' % op) + # Could be a partial version (e.g. for '2.*') which + # won't parse as a version, so keep it as a string + vn, prefix = s[:-2], True + # Just to check that vn is a valid version + self.version_class(vn) + else: + # Should parse as a version, so we can create an + # instance for the comparison + vn, prefix = self.version_class(s), False + clist.append((op, vn, prefix)) + self._parts = tuple(clist) + + def match(self, version): + """ + Check if the provided version matches the constraints. + + :param version: The version to match against this instance. + :type version: String or :class:`Version` instance. + """ + if isinstance(version, string_types): + version = self.version_class(version) + for operator, constraint, prefix in self._parts: + f = self._operators.get(operator) + if isinstance(f, string_types): + f = getattr(self, f) + if not f: + msg = ('%r not implemented ' + 'for %s' % (operator, self.__class__.__name__)) + raise NotImplementedError(msg) + if not f(version, constraint, prefix): + return False + return True + + @property + def exact_version(self): + result = None + if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='): + result = self._parts[0][1] + return result + + def _check_compatible(self, other): + if type(self) != type(other) or self.name != other.name: + raise TypeError('cannot compare %s and %s' % (self, other)) + + def __eq__(self, other): + self._check_compatible(other) + return self.key == other.key and self._parts == other._parts + + def __ne__(self, other): + return not self.__eq__(other) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + def __hash__(self): + return hash(self.key) + hash(self._parts) + + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, self._string) + + def __str__(self): + return self._string + + +PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?' + r'(\.(post)(\d+))?(\.(dev)(\d+))?' + r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$') + + +def _pep_440_key(s): + s = s.strip() + m = PEP440_VERSION_RE.match(s) + if not m: + raise UnsupportedVersionError('Not a valid version: %s' % s) + groups = m.groups() + nums = tuple(int(v) for v in groups[1].split('.')) + while len(nums) > 1 and nums[-1] == 0: + nums = nums[:-1] + + if not groups[0]: + epoch = 0 + else: + epoch = int(groups[0]) + pre = groups[4:6] + post = groups[7:9] + dev = groups[10:12] + local = groups[13] + if pre == (None, None): + pre = () + else: + pre = pre[0], int(pre[1]) + if post == (None, None): + post = () + else: + post = post[0], int(post[1]) + if dev == (None, None): + dev = () + else: + dev = dev[0], int(dev[1]) + if local is None: + local = () + else: + parts = [] + for part in local.split('.'): + # to ensure that numeric compares as > lexicographic, avoid + # comparing them directly, but encode a tuple which ensures + # correct sorting + if part.isdigit(): + part = (1, int(part)) + else: + part = (0, part) + parts.append(part) + local = tuple(parts) + if not pre: + # either before pre-release, or final release and after + if not post and dev: + # before pre-release + pre = ('a', -1) # to sort before a0 + else: + pre = ('z',) # to sort after all pre-releases + # now look at the state of post and dev. + if not post: + post = ('_',) # sort before 'a' + if not dev: + dev = ('final',) + + #print('%s -> %s' % (s, m.groups())) + return epoch, nums, pre, post, dev, local + + +_normalized_key = _pep_440_key + + +class NormalizedVersion(Version): + """A rational version. + + Good: + 1.2 # equivalent to "1.2.0" + 1.2.0 + 1.2a1 + 1.2.3a2 + 1.2.3b1 + 1.2.3c1 + 1.2.3.4 + TODO: fill this out + + Bad: + 1 # minimum two numbers + 1.2a # release level must have a release serial + 1.2.3b + """ + def parse(self, s): + result = _normalized_key(s) + # _normalized_key loses trailing zeroes in the release + # clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0 + # However, PEP 440 prefix matching needs it: for example, + # (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0). + m = PEP440_VERSION_RE.match(s) # must succeed + groups = m.groups() + self._release_clause = tuple(int(v) for v in groups[1].split('.')) + return result + + PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev']) + + @property + def is_prerelease(self): + return any(t[0] in self.PREREL_TAGS for t in self._parts if t) + + +def _match_prefix(x, y): + x = str(x) + y = str(y) + if x == y: + return True + if not x.startswith(y): + return False + n = len(y) + return x[n] == '.' + + +class NormalizedMatcher(Matcher): + version_class = NormalizedVersion + + # value is either a callable or the name of a method + _operators = { + '~=': '_match_compatible', + '<': '_match_lt', + '>': '_match_gt', + '<=': '_match_le', + '>=': '_match_ge', + '==': '_match_eq', + '===': '_match_arbitrary', + '!=': '_match_ne', + } + + def _adjust_local(self, version, constraint, prefix): + if prefix: + strip_local = '+' not in constraint and version._parts[-1] + else: + # both constraint and version are + # NormalizedVersion instances. + # If constraint does not have a local component, + # ensure the version doesn't, either. + strip_local = not constraint._parts[-1] and version._parts[-1] + if strip_local: + s = version._string.split('+', 1)[0] + version = self.version_class(s) + return version, constraint + + def _match_lt(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if version >= constraint: + return False + release_clause = constraint._release_clause + pfx = '.'.join([str(i) for i in release_clause]) + return not _match_prefix(version, pfx) + + def _match_gt(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if version <= constraint: + return False + release_clause = constraint._release_clause + pfx = '.'.join([str(i) for i in release_clause]) + return not _match_prefix(version, pfx) + + def _match_le(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + return version <= constraint + + def _match_ge(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + return version >= constraint + + def _match_eq(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if not prefix: + result = (version == constraint) + else: + result = _match_prefix(version, constraint) + return result + + def _match_arbitrary(self, version, constraint, prefix): + return str(version) == str(constraint) + + def _match_ne(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if not prefix: + result = (version != constraint) + else: + result = not _match_prefix(version, constraint) + return result + + def _match_compatible(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if version == constraint: + return True + if version < constraint: + return False +# if not prefix: +# return True + release_clause = constraint._release_clause + if len(release_clause) > 1: + release_clause = release_clause[:-1] + pfx = '.'.join([str(i) for i in release_clause]) + return _match_prefix(version, pfx) + +_REPLACEMENTS = ( + (re.compile('[.+-]$'), ''), # remove trailing puncts + (re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start + (re.compile('^[.-]'), ''), # remove leading puncts + (re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses + (re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion) + (re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion) + (re.compile('[.]{2,}'), '.'), # multiple runs of '.' + (re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha + (re.compile(r'\b(pre-alpha|prealpha)\b'), + 'pre.alpha'), # standardise + (re.compile(r'\(beta\)$'), 'beta'), # remove parentheses +) + +_SUFFIX_REPLACEMENTS = ( + (re.compile('^[:~._+-]+'), ''), # remove leading puncts + (re.compile('[,*")([\\]]'), ''), # remove unwanted chars + (re.compile('[~:+_ -]'), '.'), # replace illegal chars + (re.compile('[.]{2,}'), '.'), # multiple runs of '.' + (re.compile(r'\.$'), ''), # trailing '.' +) + +_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)') + + +def _suggest_semantic_version(s): + """ + Try to suggest a semantic form for a version for which + _suggest_normalized_version couldn't come up with anything. + """ + result = s.strip().lower() + for pat, repl in _REPLACEMENTS: + result = pat.sub(repl, result) + if not result: + result = '0.0.0' + + # Now look for numeric prefix, and separate it out from + # the rest. + #import pdb; pdb.set_trace() + m = _NUMERIC_PREFIX.match(result) + if not m: + prefix = '0.0.0' + suffix = result + else: + prefix = m.groups()[0].split('.') + prefix = [int(i) for i in prefix] + while len(prefix) < 3: + prefix.append(0) + if len(prefix) == 3: + suffix = result[m.end():] + else: + suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():] + prefix = prefix[:3] + prefix = '.'.join([str(i) for i in prefix]) + suffix = suffix.strip() + if suffix: + #import pdb; pdb.set_trace() + # massage the suffix. + for pat, repl in _SUFFIX_REPLACEMENTS: + suffix = pat.sub(repl, suffix) + + if not suffix: + result = prefix + else: + sep = '-' if 'dev' in suffix else '+' + result = prefix + sep + suffix + if not is_semver(result): + result = None + return result + + +def _suggest_normalized_version(s): + """Suggest a normalized version close to the given version string. + + If you have a version string that isn't rational (i.e. NormalizedVersion + doesn't like it) then you might be able to get an equivalent (or close) + rational version from this function. + + This does a number of simple normalizations to the given string, based + on observation of versions currently in use on PyPI. Given a dump of + those version during PyCon 2009, 4287 of them: + - 2312 (53.93%) match NormalizedVersion without change + with the automatic suggestion + - 3474 (81.04%) match when using this suggestion method + + @param s {str} An irrational version string. + @returns A rational version string, or None, if couldn't determine one. + """ + try: + _normalized_key(s) + return s # already rational + except UnsupportedVersionError: + pass + + rs = s.lower() + + # part of this could use maketrans + for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'), + ('beta', 'b'), ('rc', 'c'), ('-final', ''), + ('-pre', 'c'), + ('-release', ''), ('.release', ''), ('-stable', ''), + ('+', '.'), ('_', '.'), (' ', ''), ('.final', ''), + ('final', '')): + rs = rs.replace(orig, repl) + + # if something ends with dev or pre, we add a 0 + rs = re.sub(r"pre$", r"pre0", rs) + rs = re.sub(r"dev$", r"dev0", rs) + + # if we have something like "b-2" or "a.2" at the end of the + # version, that is probably beta, alpha, etc + # let's remove the dash or dot + rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs) + + # 1.0-dev-r371 -> 1.0.dev371 + # 0.1-dev-r79 -> 0.1.dev79 + rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs) + + # Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1 + rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs) + + # Clean: v0.3, v1.0 + if rs.startswith('v'): + rs = rs[1:] + + # Clean leading '0's on numbers. + #TODO: unintended side-effect on, e.g., "2003.05.09" + # PyPI stats: 77 (~2%) better + rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs) + + # Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers + # zero. + # PyPI stats: 245 (7.56%) better + rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs) + + # the 'dev-rNNN' tag is a dev tag + rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs) + + # clean the - when used as a pre delimiter + rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs) + + # a terminal "dev" or "devel" can be changed into ".dev0" + rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs) + + # a terminal "dev" can be changed into ".dev0" + rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs) + + # a terminal "final" or "stable" can be removed + rs = re.sub(r"(final|stable)$", "", rs) + + # The 'r' and the '-' tags are post release tags + # 0.4a1.r10 -> 0.4a1.post10 + # 0.9.33-17222 -> 0.9.33.post17222 + # 0.9.33-r17222 -> 0.9.33.post17222 + rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs) + + # Clean 'r' instead of 'dev' usage: + # 0.9.33+r17222 -> 0.9.33.dev17222 + # 1.0dev123 -> 1.0.dev123 + # 1.0.git123 -> 1.0.dev123 + # 1.0.bzr123 -> 1.0.dev123 + # 0.1a0dev.123 -> 0.1a0.dev123 + # PyPI stats: ~150 (~4%) better + rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs) + + # Clean '.pre' (normalized from '-pre' above) instead of 'c' usage: + # 0.2.pre1 -> 0.2c1 + # 0.2-c1 -> 0.2c1 + # 1.0preview123 -> 1.0c123 + # PyPI stats: ~21 (0.62%) better + rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs) + + # Tcl/Tk uses "px" for their post release markers + rs = re.sub(r"p(\d+)$", r".post\1", rs) + + try: + _normalized_key(rs) + except UnsupportedVersionError: + rs = None + return rs + +# +# Legacy version processing (distribute-compatible) +# + +_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I) +_VERSION_REPLACE = { + 'pre': 'c', + 'preview': 'c', + '-': 'final-', + 'rc': 'c', + 'dev': '@', + '': None, + '.': None, +} + + +def _legacy_key(s): + def get_parts(s): + result = [] + for p in _VERSION_PART.split(s.lower()): + p = _VERSION_REPLACE.get(p, p) + if p: + if '0' <= p[:1] <= '9': + p = p.zfill(8) + else: + p = '*' + p + result.append(p) + result.append('*final') + return result + + result = [] + for p in get_parts(s): + if p.startswith('*'): + if p < '*final': + while result and result[-1] == '*final-': + result.pop() + while result and result[-1] == '00000000': + result.pop() + result.append(p) + return tuple(result) + + +class LegacyVersion(Version): + def parse(self, s): + return _legacy_key(s) + + @property + def is_prerelease(self): + result = False + for x in self._parts: + if (isinstance(x, string_types) and x.startswith('*') and + x < '*final'): + result = True + break + return result + + +class LegacyMatcher(Matcher): + version_class = LegacyVersion + + _operators = dict(Matcher._operators) + _operators['~='] = '_match_compatible' + + numeric_re = re.compile(r'^(\d+(\.\d+)*)') + + def _match_compatible(self, version, constraint, prefix): + if version < constraint: + return False + m = self.numeric_re.match(str(constraint)) + if not m: + logger.warning('Cannot compute compatible match for version %s ' + ' and constraint %s', version, constraint) + return True + s = m.groups()[0] + if '.' in s: + s = s.rsplit('.', 1)[0] + return _match_prefix(version, s) + +# +# Semantic versioning +# + +_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)' + r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?' + r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I) + + +def is_semver(s): + return _SEMVER_RE.match(s) + + +def _semantic_key(s): + def make_tuple(s, absent): + if s is None: + result = (absent,) + else: + parts = s[1:].split('.') + # We can't compare ints and strings on Python 3, so fudge it + # by zero-filling numeric values so simulate a numeric comparison + result = tuple([p.zfill(8) if p.isdigit() else p for p in parts]) + return result + + m = is_semver(s) + if not m: + raise UnsupportedVersionError(s) + groups = m.groups() + major, minor, patch = [int(i) for i in groups[:3]] + # choose the '|' and '*' so that versions sort correctly + pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*') + return (major, minor, patch), pre, build + + +class SemanticVersion(Version): + def parse(self, s): + return _semantic_key(s) + + @property + def is_prerelease(self): + return self._parts[1][0] != '|' + + +class SemanticMatcher(Matcher): + version_class = SemanticVersion + + +class VersionScheme(object): + def __init__(self, key, matcher, suggester=None): + self.key = key + self.matcher = matcher + self.suggester = suggester + + def is_valid_version(self, s): + try: + self.matcher.version_class(s) + result = True + except UnsupportedVersionError: + result = False + return result + + def is_valid_matcher(self, s): + try: + self.matcher(s) + result = True + except UnsupportedVersionError: + result = False + return result + + def is_valid_constraint_list(self, s): + """ + Used for processing some metadata fields + """ + return self.is_valid_matcher('dummy_name (%s)' % s) + + def suggest(self, s): + if self.suggester is None: + result = None + else: + result = self.suggester(s) + return result + +_SCHEMES = { + 'normalized': VersionScheme(_normalized_key, NormalizedMatcher, + _suggest_normalized_version), + 'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s), + 'semantic': VersionScheme(_semantic_key, SemanticMatcher, + _suggest_semantic_version), +} + +_SCHEMES['default'] = _SCHEMES['normalized'] + + +def get_scheme(name): + if name not in _SCHEMES: + raise ValueError('unknown scheme name: %r' % name) + return _SCHEMES[name] diff --git a/pipenv/vendor/distlib/w32.exe b/pipenv/vendor/distlib/w32.exe new file mode 100644 index 0000000000000000000000000000000000000000..732215a9d34ccb7b417d637a7646d9b843ecafa8 GIT binary patch literal 89088 zcmeFae|S?>wm*FGqitH!CO`{C3REl(SahnPNDYM`O{q#T7)Xdvz$F9Cf*Bu86NCeB>CeT#|L3nblC!40kR?2m z{>H@z3`^g*ct!B1Tk<#8{Ol(+x7?n8>n(TO@iQ_1pEl;#NO$D_^p56r|7^p? zD>5@P3KB)%InlPUcCg9H}+(GW%^wV2|ROUbGfyXTfOART)i?T0?9%;4K}Zn{6fx`yPa}*$9C+R!7zI~72c&$InY+UdMGBkF3c`HyxD3K09`bzW z?_q;rO&5ec#{?noJ4vI19bbHBt~vx^h2FH$V8i|^#EsiUg#JboP3@w-(&Uf&%NK<8 zSJZ5{MZ852W)~s>WeT(LIf&1wKNqULLI)GN_(-E-D)X~ZK=1;t<%*guHMhdg`-(mb zHzDv1KBN9zR9?--O+N$ReOXAr81V9z!X5SJ5`=3<1^<8V|AP@&sr2}Qvp;hQT24iW zOHg|EiZd1ojV;oo#(r^ba2`^8T22{~_UQ@YMZp7O1R*2@?SerFE~TuJB_wDaYC0h8 zfONF1t%{=H`W`bdYp+>YBsg9Ty9ec3iy+O3xa}TIvPK#Q&udyx1MvwG0(#iaGC|N| zJ#3?){9$tW;Y34lPnX=&>D4X~|k7c$TwEfOzs@Yh#Nz z`FV;`(w!E`sKg@`2E}bDY>ku^4XS@tV(WO*g? zYH=KK#%%Yu1~&m>IlB^#2^syGG|2AB1(}4aOcaC%!)}%`I7AIC2(Ro3yW`GSttng^ z_xb=ECor!L{-PNO>_ulJ3%fm=O0X!s%)$GZ?~I94l-^KEAX0n$?4wGpr7&i4#~)OB zQD%2NrWUKtZuYT1Vnu}AeF`cSgx>Rkk@}Lg{WltgT76VeA2aic`cTnpXrt2WXmJkM z9%uXm?McyDyZ28Ux7mpxy?mnmvzMMr-85vkRrJLaDRx>|I7je+cM+ zj{RJ(3Vrgke;W@#D!y%U%fQLtlPKTAzWtVuOQdXpwsy6eRjt^cZ%0D4bF7$F;f!th zLN$fmy;M~5BxHB@2G;SZm3yqd&=nXUM}Js~v*{K=2m~;xQ+&bAQx@r{f)-eSY8D^{ zQp9rgPJi$y3Xiz^JeW@pJIh!WIY*3a=a6(=#2xp%avG2{mumi~B7u=3MM~KO z==U)PdIp?wwn@iTlcT?!n){#F9|G%?wza&uKBZU7$wfotONEdzWWexHQ64SFLulLE z*e_YN92Wt^Qzb(=^6B_TOJUsJ&3vti=^+6*@&V;&ap~z@@%oEAWzGgN0pq6loi-Lq0Ml%dqU}6EvE?47#XX)qrkpdN zGP3m$NzO52imT;$(?xSAUrh;3ms`w%g-afa6GY*m`ZGu@`<% zo4$pyjp)A;ceFHW7*edKd7smaJ`=~1iTr|g5J!JN`KvR&C8v38-8Y${weFh?F>R5v zz2-~RsGLE@exmOlo~@R$1-y~QJ`iG0TdGhv;PZzp!R~KqP0c|=iDWxYInPp_9X!u< z$V21YAW^13Ao47^)g`|pXBcOWWG2Q#$C;_pr+g+a2|k8GFy|g-;B|+L>-72hZ8AfK zX@I878I>5%a&hovGRvC-RG|(Z`~mn#V-F3LFZ?@l*=_g=H+JFM(Ngj|a)Z_{P&=Wb zjG`!(0E6?Av9}{u;W@C5A{9n#NTyh|^KGfWu=QA6=~Z|I-%AKLo<=bWpTX}XD(wnK zn1~0(<)XO8Qz-7xvAC(-6rp_nhPmmhJ{$(n))2i;p-e>oD*>2P)AGU|xT`@N?NE$;& zuz7W{L&zTm?PT#BU{O@ji2qb13--zJY$6gv6V`@{*o%_^-li4=>yQs+f?s6gIJ;yG zr-C?`(T_CtDM1O?P^L-R@GANd`%oDw}3QQsvD0;z11al5!5CD7JLabT+3OH z6@Y?rf=?od&6hY_gj0yFcO*cmo$aUn zLGUpVgXm+O-2XcNZb25H0ltBJYe#yGUf4jq>`GSS5z(j}liSQr$y(Es?2=r1dhQ}Y z5GMu6Wp%SqAsU&%+e1+S_WVrn&m#H|T!SyRmzqnP&I+GD=r2P|F#ry%K-$4o_zEa- zXWJH=l7?c8T8A7nJBMn{$fccB&$_kZrK{#Tzi#+6m=kxT>S^ zlo-^CI}nYCc)0d>xaxGc_N4r!8Gh&anj6^r7Yjm3n)o>a3$&{#8+#2=;WX`Sy*!Fa z7Ew}lT1qK#pA@sGoT`qn`y?+_sp?Rlh`GDAV+`tRyBgqZ84H9|XwqpQ++Ak%lbE}+ zi34=rn*it>0qEoaIy&d0Gjgq6kY>eruMG%eIPSqBBxGSPW2I8MXhG~IijA@u&_bVj z3@RM~*i%>6Xa+v<@(qku(FAHCEgTg0fYkK)Fk39r#bZzFO8K)<$sQP zlwLFz3pKaIJt&T6KSdToMz)?xsvHbkI8&Tli$3K{Te%ew(yjV@m0OgGP2nu1A{bs~ zR}5qrz|lnBo-Ig=3O}^%H#_C{qMA%Oe)Beq+>&qG-;15M zmzXm|kD=&P9^C@|Mys@oW!2#K7FIiZ#i%-u=%sDH$-@?+o5-q%(>(0Q2!mYeY!R~A z_G4HnXA0$Px9!LOw!+rB+CgEhn5I<589~`iKu{&#s7aTGtZPeB3Q&fa>1F>;s|wilI5vV0u$f@jc$YiG1ghCyR!aaZ ziny3y#gI5!R#!!j;&@>70&QI2 z$;@0c&aa$r{kz5VAvt!_hw9{Y;2p)RWDXZ{NMEgv66}8~8IIRq(T0Y0n$F2*G{;}% zL+1LA1cRYo>{PBFMERForHYeUyY28=;Weu5>mt``tD})?ht|I( zsYxSdFI9a9yt5)Gu52)7vy`^#lBwck?49yCLg{ma(yjT`VcD^UW zVb0fgE)I1%-dZ(qMvfb6u8x$YTS`eJv~4^qrGgJTqhel6IEp2#j`grdKwJZeN{{@cY^#IlFL>|V{akJ7w>zEjnJCKdmXdp1WNE2CT zeelKcBDy<5@gweB!gDEmWc9o~hc_}YwQ`Rg)I7+n%*O zRhvCfZna`cAqP`F6fH`5E+kHBTFl)?a#$Qp8vcf9OaO^xpwt-7Qd`qkh*i!zPu4)- z=BypG{o+ML__euo@P!oT=}PN>)TgwnX-bql(ZWOO7*4#LC$|}usM9^TZ8Zix?qlmwb^uZhr{1R) z@oqV;i5m>=c;U%e?m@M{$L_$O1}OF>8Pg+92fAqPc#{F$yFtUogC1j7dqOzR6O*(D z;3UTCDv|8sk4vO%@v;&rSGt^+ZbRuL$YR$d3ZKLa=bZXW7;HxidjK(DmUG!Ek~xKG zEORfwmUoHGfJ|nD&xUA__-vJDyPvY@L}WM{q#vmBW{!v1NeY6I6@=;%w?zVDeI$B- zRy75;U@LNC2R@t$#%{lPkvfG~f{-ENw%}XK+1x=)vt+uM#2@sjv|iGh!1?8h+m5ts zwg{a`Y(XSdpbEh86ZT~Sxq-t|6#AaXaz_APPnTG*kbO98>Jy_T}aB_0XCGNp>koqM!3#%7P5<7_VJSK?b6p20x@M2YU zZ^Rhl2-jMI3F*b;#Y@(iAst?44jH^Yc9*^cAvbbHZTFt1S@UBfvLKUWDO_Uio&led zrrc;zP8PlwuIlSQWI|s~w0@JKWIymQbI4bSk}%@{Sy#Vh-|AEjVb zT#@31t)@e*=TlHqB=2`rCys`SiPu_$TJPdV11#?+bqvO$l=77&pvD$cyP94%FGDhE zZi~y=T61<_iB-r4`F7EQ;xu8Ko~g0rt`rQHI`4pB#0KHEY_lsjTKiiqqGh0!HUZJ3 zECCfl#rd9wg*}xi!^Xn=&rpN-Tg7TbU}CD0i%<^!|m`=vlc3n9cwK^ z9x{k2@{m#b8}EN=qW>U4d}o@*DT4I}M!|+k_$at3PXhf*kK)88_>|%}R4qg`)NOto z4X!9D?nQ+76Xti}6qt+CAG>oQofGa#XCEyfk932c32j=$4=7G*&mWM6v#C1M!~TQ3 z&e+zAl+D z-T*xb5d9s5#G}^Y93m-48z|CKq`%^vkrzJDXH^Ve4LSj`U8PT}NW<$v6V{d=O_wsO>Lxa3zA`74_ozrB?;8n0%y41;DpNGAQ!xLr@ zP#04zF{%ZUnxt$5tOu8k{2sZYkZ=3_?5CjI=)qN>C8O}pFaK4;AZN2Lkerz2U%@*j zrk3@WTV-*ckM)7x_>?&NCC1;!78eUR+`WPsz^2QW+FvzwoKl{LZF`J|oj8LoKr9rH ztE~d@%^bBnG=|4fu3Xs#Ng6*&B-fKTQu9QD0D@(rYL}SFi-3fu6VXv0dz6H#{1nON z(*TY_EZU>g<0#h0z9Oh3O637t3{ncaY_fi)-GT$NemuD23zPtUH?%6anHqOB>WH|1 z3$e|P4i~oAlHzutqcp|`)fW^)+Yx!7@@Cq@P?t*(Q)rIo?wt>R{Q-(0?Z5Qd^J73{ zt4o@45hIwy~SbupY8$Jv^{D0cBzH2#R6B=-JZQk0>H!U_+n7 z1v-M&&*m@+rnBFD*dV52lWkW`p$tf_eL?CA>rx>Mb$6CXT~ext{p*(yx3%I+y#mTT z#iFE#D^Ei|s?oB-SZ`#C`!vAi+Ae|M>j?f~d?nCPad)!3bj%@JfF^g7^nveq^*u8& zS^H+%u?=Jv(05KgeNV}w@8VqgF3rYw^}RVR?qts4&J;U$QmovWVd2i@W;hT1GG!hd z#Vzcc&0X`pBDml#_RXg-7p}%qwi909-(E`GHyfc?N*Kh8_j51LZU!GI;!$3*H8JX2c_o9m#6kFrhJNa{*S5Ql}pN-+dlG1bLR z5|WMVYP^5W-kRz4Lz_|ewu_WE3)@@IrO2)J<*XjGseYMNs6*G(47n{I%WMyZC3(!p zjx5KsYbVGpb`M)YY{4&HNc2h&Pu@EUb(l;g-EKe$s{!wE!3?%MSNmZ;Ep#MY39FeE#2+-v*gZv;%nE}7-q8v5at z*%dJKmLXmYNrJ6$FLIQ)5O0=Q zmgug)IG|BEGE22JPC|(TQK1DZ#69M3>JmDwNzVF>gW4-ZHu|VS^-3N)BYovyGG=vmNyP@=<(O-}^wEJCPpl{H~oljfc7OXbX4#_!69le%aUyM{%1Y zmF94SG_5gSAMJSIMn-AZ4Td9KBcsJTj|9Wn5PxzN{J6?}8w=w6_8I z?GTrDK)nPvT!h^!=(*Y@Y=An1kf^M{9^O=7kKj|-2@?U1Cs)EE>{U;A zBZGJegfqbw!P*LPz76{*UsS2=-4MpH2t&D!M1=ocwLE%M|4T>*a=Fk>*{WsiJ*NL&}WPKcOSD@%80N6L0X-P%isjyOd7*~+_&szRlP#+L1_T}u=Vkyhfh+8SQ{X*djXD$9oO4C*96iBI_uU(dIXiLac;#A2LQb|F8_Z*1~rZNG0i+LNIHX4A@CHLE zVpd}6?V((Dgd{VNbDx)NYy%2Qs+UwxD1)uS^w17nGF2+%V*$G(eH^5Tezp+{JHUQC zoGDz@rH%LP!L;eMyamt7`h2u^wpt41LUG3VX|KLwE~1_RB7--C!LNS z!tCsOcsxjMa#Z&{g3!Ll=<7-PdKzCNEInk!4D(syF@p@8xvk%7lAt((WTmF(wj)+k zrDd(NbxR5*8_AqNE2c8^4TWqAda11eC<8ge13Lh&Jsh*^1~Es8hKzy2R&hE$Fy|HF zmlm@DFagAyoWvHF4QWL83M{IF)Wp5?rLNSrtx?`)RWwAA%@!q9U9Lb)XA`diXDeP@ z0sd_-Y-m%h}DgVfc%^aVo#TD#aBX(z;C+R-A{c0!bT z0MG|n<1OMaecV*czDTr#7hg5 z8#jb2J7P;V+>2r;X10=f0Kdl=v>vdp0k(3>#i}T`DM{d2RLgl1!^xHRKCQR_ z>s`xv8Zq3AcCuF7K3o!vZIS@b5J217=w6}^bQqrCficK1BuqOpDMi~$51?Yi45P-bXYgv(2A*t5c(aa-LQiX*Ro(XmIlcE zdIwfWFO=*3;x!mFJ{HACM~x5WA{S=MEKW!Ynblz$n`LGVn&EUG`^)=?b@ZdA7Q~a? zGmyZ?cA+9(k0oShcM=SxU>N7oF#Zd)rD!wk5gX#@hf-dEO0W*9Ibiv0J+w*>&Cx^G z>!JC2XuckD>7gt1P?;Xe*FzP0Xq+BeQ%ciBl^7@j!}TWF6wquaJAIY5l=0pLpf(&kcwT)$?n|s3TSF`QrY}Q}c7hI(Pa}eTa}vlt=ppclZIrv85tR-4Z^Cd7sj#o)DspA z6`qeQG0SmtjpSu7U^T<&eu+8YJh`RffPiBNJkUy;qRwcI4QlB@HW#hrc6bvai|vSA zz+>|hvEq+gHKQjo=RjhEC960PMx~Sw-@9aQZT4yJ?jy4}?Du564~~pfkG_yOl+W({ zF_lh4P~V^_KL>_(ASILwu_Cx868?ebSx*Zx^(?l3*Sp}R5-Kk;V;e1#Pcj_S0T^Y| z0I3fVTE+HbdP)B|a57LqG~aii?n{^x(wF}S%?ZKg5mXaF(bxY3h06@u{+U>fdRM}~ zAV16!Wo>57C%hnH<|-`-dA@-}C}_l@`KH$Td0dSDB?P3pAipBlcK;#e5UhMg{*r8q zQZe5IGpa?|UY~9MovDtu{E;#X*+@)=&6iSPb)Kt92iI?U4`zlL*UBw3p+kk0GG~NG zN;|3>)`frxoYq%6{|0iI;gaJMYQEq@YJRl~QHf2xzK6))D7gvgJ#i!Ec{CBG%%=k3m(4&S z=XqPhCIEun$%B#!MiyX#()5Ti6ai`rvLoOKjD#;R2K7TU6t;%B01D=v#vo?nMDxl? zP!B(Q2t+m^;yXa&Wd_i}jBE(pz1921O&}zh4I1&{d2DScd0MdN6s}G9*oI_2(V7%J z{0JHiAM;Mf@S;`ow_fIB_P#B?|D6J4;sm3bkfVg(}+As z&4T{k#N#1#lpfWdr7k1x%lV0BO1}!)^Kl7o4>I`KCa7xBdUdUr{<`nNP~oOa&V00( zNQsDJkR~p2v@~0nG~JtGL0Q!$c}ql#tF#aOtYI+LrwTlgMoRNERh?(|U4t=9Mqsrc zrLrKrSxeHJua1%Q21CD>WI7mnF$aPBDL{jh7C>6#ac2t%cGvFvjdWFj$ z!3>DgAqf{J$_&>XDnwWYMi0=P!svl<{M!uL8$B?V{Gd2~rI#PX>1tpetkODj>7)xY zMWr>o(;VJu3GcMFeqp|6ZweKwj_q$Xia8XOPf;kS>E2WtFg2~|A?~5RzM|fw z4`Zyc3&s2g8tgbSi~E%aC??X7MVU+;k(=}7^OLq^)Gf`LVvj7(S2N{rCT+7)Fh8=q zv&pWS+CV~_f30atN-q1~So?^y&fM7|Q8cQKBh5^gvG;n8L z)u8B3nE0ym<)Lq-aic*_0z^F}4-HD=NDk&Qk0h#xDQ_ACee(Lv-zsgx_Q5^*qmY$s z35k@m4VVv5^8PScMo3vol)Zq7goluoUJ@ z;CJ|Lb9jrjWF@ohrZrPn`vr=88@`D2Wph>ov}Zs7!S-A^R?3m?$KfAU%(-mvW0hSf z=C&h6VW~6nUwdehpz<$lE;nG2&9XhW!1i1V3?JN`&2>AFXeo0}L1~61&iK~P=#p!j zu@1tN4osW|)p(l)$9XeKsOeT>Xj^PC8DTV)c>i<`p9Ry4|zbY-{k-RODmPvu_HpVUYQ(t)U!|&o`lMST)M|vyaM` z@QaS@8DtEAUF^gpHqrzs(rJy(xQSIVRw)mb>g&YA>i-Jh@Y-X^hy=9=jeQhm7J;Zb12P0Dzy7@5T`&7JK+~q2b)LoBBKg&@lpV3kT4iqw?5+k@!EtOBq}AjAba2{i}$tAy~Hc@kb z0_n2$93`=<0f<|eJBR0Xmp->+l{B<|3>pD($2bBiSvLr)*d^wX=<}cp0XfE}I_6_N z6ue-L>7t7h2M}Pz9G_C;91v&v!}C~(mO34aeC!K&Az}_dUNXex9cezcg-}?Dt>s5j zZY|bHbm#Y7*nqpRn(=-F-+;?EgLB)74LFazLD8ExC3ayqH3Ylx^T!WE7jmO29VxDKY?#;xEn zHqQ7qyfx9KNqG@BX<=jTo@-GA#Nc6xd9V#?EAe&%l!DnfdK5dOJEm)uUE9GPbT;Lx zW0A+>Wn<8h0Jn@!fl-2L!=7K&)wE`TX8T-aejTt4&rCqLJ0wkl1+E5T%%gy$ zJTurK%9E?Y;_W2KxLy+y-$l5@Iz+{iUlyOe4BteGs&JK4hhpk5m zx;d#CKBbLo+qm(23SF=HP&rYE3sn|uUc@BeN~&nOQNo!U;=bbmVEL4PI=_3OzLbzx zz*uDG7|4w54AzI3BYmJKLbun|a=Jj46D=YWVC&X#IIWQZyO{0*JA%Vn%^65SmPeo| zENquQvYA>Z9~sjc=)R_05QbyZlCcGd<#jJ74D_*dXu`s{7X+K-$L_&^-VJR=Odv54 z-FhH|{W12i$6)x$*7Kz4NK*d}E*ECa6T)lFkKra7ElFh=44#L=SulAN0q7`GlA;zpbH;O2-Ak z1$g2GxBr&q7RIl`)k5d_om2c6DJ%Q5k{_O+c2O(k_I%WecB^+mA1$BL#L4oxAz040 z%AM1cW3a1l?MyY9xoN$ca-bbI9-w~D7qde~uNTiSxCP;oLs(CwL70JBn=36%7IgxI z^M1MwLDg*^uCAnZ5ZXz7oK&(_pP`|x>sF849{5LuA^XGO6}JBaj+orcYlJZ5fiUE< z&yki`h!a4>aOhq7fOK zMt~1ak_oas(aFRQ(2&@0BBvp^a0wbx9WL8B^bd&0l_PCG{yy*G|3F0kgkFs%B7MlA zcf4Cc66-?v2Z)IB2N0284WV?Mh+M~Bq^dv=s@Q+Q2O_e96A=y=1R&Yi=T?W8?i?CT zMj8{je#GvkT)FC0kdZ0$dIPRa5+T`u=ma52BiPY|ga`!)Vh&r#f%wgyk56k__cv%c zze8Ya_!8d7T(-tf$u;6u3P)3ZVMA`Gs|5NAfgGq!4hT*y z4%eVB&a*o#&f>Zmi-ekKY~U143ws}q4#?`@CGxZk&`KM+=BN8Bdhe7pTwZBjT4aVy z17`Fu=$RiL&a4LONv^VM+cMmqalUP9NJSwKcGw!fg@~!7$|@E&mlYKlTRP%R?jhU3 zmWq%$AWo{l@%hj|2N6E```bd%Sy=* zJ&LV)Y1Rw^c5~o`O%}!G(sK|f*aZTeks;0CpqCOTE+eAc>?A0_Ah#p1OEW@3q>?R1 zw>(OkHYZifVc4_?N4En+sbnyVZMq#^C+Xlo!{ zCicyYI%kHIQfD!%rn>y|N>wji0g8sJz~%HgPuk>Ts2F0zX2bl8Yz8GRy5pu@*lH<5*S2CW!sswT zT&Se=qp1~QHcYBA#OK>gnMzu7rPj1GHAS7_tm>6gdBVe(Cr!V1*9{3BW{5|d0lydx zqC4C7lmqS593@TfyNfz$R8yJ_Xgn@Ix_dDU26WQaNaqO}!FISeG>>UGvORTi_ihBB z;DT&KwLwX>Ydk8i$-2Sz+!$Bg^DSVj1(7w6w>|fo?O>RKxNeup9>+ebU(r>6jz?r9 zv+1PjQf&QYSE5TZ7B{W9G6mOhcceFuS8PoyvSuun<0dH?x^!{jNp;-7$p>NRh0V{x zY`BV|==0-Bl*Sd@zbj873V4`@%~n6sc{%i7|L{=zl~CZkmNLrW?&bi}x^A^0`cL zY;|}H-MDWtV&=P_MJ%!JtwXR+nMyCc$R!z2U9^~y8p_}|5ebPJD7V{=H!(Pt80n#~ z3vhcBmaOJjvNDM!Gpk{6ogw}iwvN?d6Jbi6Foitl;F+PMwUwn_nxS4sn3JXhH*(Y& zq5=NXe2zMLe7ar;+Mh(AiwJ=xVNHu!=KfSdpe&=BUabhI3t*TOkhJb!W2e)HKa{c- zccCV-eJ6$~=M(UTi@HO!ZN_i6HQr2~jXgs58rmOQIQs&#WZ^69tivLqG#vJME6RjWv-y z!!r>Z%U*&Pt)@#(Nm&okSu(rILlseU&&#fcO~8oZ6|gsl-8oz@%cdgQHL&3>`^f1a z8=F3~lre#R7UU6Uo3ZpS`} zdmr6btOC!hoRhyX*IYU9p8SzXv=$y~N|R#-x!WN1EA6eF7E>!Zb~vxeADddcjbiHA zCs1&P4)+f;5nS$Bif2Nef!KEkNPEZ?%3teapMwVo1h86LVf+P5uz`9< z_K==@AHPM)H*e>mEpz3T6uIKORvmL`LPog41kW@fqs?_O0* zT+x1_<_)jEx}@?GTSFG73(VPSYcP>Fm#@}AQ}iG~(({NP>@X@HlyLm3z3r7pP!_e2 zRr2;h@UdJ@A>7Q5H1Qm1So>Ed+9aWr~EvE++ zB}fS{L+$5BPGXdO?C?!GiwY(v3v!rYzE7>h{ZDMmSQuz&U$Uk2B$EpPl0;!DtsXW6 zeQ1D6oy_^Z#8oMsoZ$6Ob6x(oi93$=VE$JiV!g;POvL_(01>fY!yT@>n@&|15V73} zu-k^gSQfnhXCmfhOL+%>h(z@hrA?Bku_Gl<(kuHR5_K1nbP{zx6+2o&ii380-A5+J zsk(DU$@Ms$mcLDaQB&)E~p(33H5NI_w%T0n>0u7hI?|+s9hKQ9~HI z0&p-OncPD21-cc=4!UR#Hg#Zcp?AxmtAMrIB+yE-kh|c5i;PP6B@w#dGEZEq;AoBu zDn4{$*Ac)6phOC<9MtcTn4g9>#J++LM!c-`+)JY|^7Acz)m^M7T zlSq9`YFJIIK;E>{~IkR5jN;`rs5CSD1Q;7Qf0v2g$bX~(m&UAUa?KFvOd#skkY zG5KxOM4o9>ZjwOSNkWEY2Pt58D&~Uk3Ky}0-f!v6}&-{t&pYYyu|LZXykz}x*xB;#ODipDL^0lP^@Xvem7JuL7GPv8xQtG@Hx zPdlFbfn5w}1-H(uR;|=r((Ghy=r(Jy2wjVOARy@OTwbw_tnt}#9$-RH>rA-Q= zP$`eCLIddi6TLknn#h=n)87!Kkcd=&yl6y5ns~j))=2z$Vr;TTdi7s#B*%tWFNki? zq7M&J|8YxuYc~5k(h~!)8(!?L>p)HZUwPSRHeaQ~FB?IuQ?2kGf1my$}_3Xf7c2lR^G&GC4lV4@8wDlQ5cJ?tegBqZ=2L_`s;op^9s zm`gBNP?zD!4cMp5!a$Qj+4dZM`5jO7JeM#E<8-tHs^5MxJTpB5)KgVhLny^`{T}vJ zgiHYH{v8IeT8@{Bh9y2$LMR#$%aadie&P+8m-yLLXbFy=P=nFMe%dUk_>Fp5YO#+Q zkdJQL1tQxG;&i-<%rq<-WFU3ZBA?&G-u;XdGoZwEa?~7|vyr#EQQ_?z@osW5WqDB7=%;3tdv8MmtQf5GFrb-v>al6m|#KaK8T^j3zkQmXNcEY z8?7$M5`kv80BDio3Afi<*cMr3^n2L^v{8u%AaTt^-Zl>K{yIWI15S;2MIo=>Rfh9EE=*Yg8ZyB8)EG4}Ly{JWe z_cyQUTh@ngN46|#p|+qXs#LtBJW@ska9+~vFhOxe`t$3 zfmD5qIzz>bY#m(oB~FFl=pu-*nCHLnAaS*I06P+}AeDMI}`e&^^i zdAr>t72v6=()^U)OcHZSFD56ebUHa{rGo20dO#ozgcBxhu)GaEw8;g-iYClLQrqfd zQE(SzyKHN?3Ye!XTLaptwEFQ%B+h%*$n$$JfUsZrvc8r3TDO?>2PA>uX^O{!cT$Ly ze+?-4n-pBETMyIl7FTx{kARumNq9i0yvo}3;brTU3bTC9Vr!)1P^ciF>|7&nD6t4~ ztF`_dmA~V2+}GT>q4Yi&@k!~i@(r}xsXI%Nribe(d`BtBqaLRDRo9mOCK+y|@UqfK zGJFq(7nEWr(BVlqpk7h>>twi@!c$A1H73%LQBbo=pP?`wA=!Y2z!GQ!thn9^gm-QL zCx8wvr@-3vtAUbSy+Ld=P^L0A539A=$EY-lXf5wT}?Pg3s95ktvNHFSuT`$3_I(*ED40S}mRuTO&c-cZg z1D0OIT<8O(wNK;p0IPNhSbNi1G7M?gcoW#8eg`MT_Sn9d+;yQ@vv?=DKh43 zX*iKtnp2yM;CZagQ*F;hJ>~5E8};EFucgs*o6z@HTANcfx3vjxj<3WWV|&9X+aq!EtIa-Q+G`RJUM_c#c|oDG64m@mIEBRtFi|bjghO)iUY68M=W^pby8ov4@Yl zj#qA6b1gb_L^?x#0Ro6Cc^fTg!VUA&#U*$WuoQAu4}dRW@nw1^Gn-So6Vr6FmhwY=Mh9Shmh+Kd8?7-g_`XCV?jS-TpuZUWn{8Ad(q#m01ytCF&G7)*RQju?7bq5sx*#*>bI=Lh<`Jk=$R#Lifh*cpqYUc6;) zMhAR8Ut!dDEbj7neLhIlbgraLoabSG$Bc89q)oic2Ps7yBE3F95mQ1>1`V>`K0zCS zG>8}0`Hl+gq{rvUpI}Y+jtbj1Zht0kjB`Zu`d?Z0iR;A|L1pl8j+ohqm;44g_R*Ui z>P83?P*F}c+NR*$9{h?zjvanSuYNCuTq_UrNB43p1n2H^xO*MF&H*Xy;EgC*wKtxX ztV&zv*d@?yblS)ChWRDjffM0ks)ehyfTEABs#7w-vZM;CeX(-YDzb)})y+m*#O<#c%`O6_}vfys%{{5}E z=uMX%L#B1%+D>OFy>?Q&LK)yksFJ*=VbPd)Q~-yrf5m+Q&RXFmuP#~3-Z3+IaDxUI zbN>kNCwcZ_a~=mU58>T_CFS0^=00jg9y|#wriZJJ<6^X3&4{^=6X9*D(jxBD9yVVm9=660piny?)Paw3+of>> zcx~U623kcqtr;>-#$pH;V#LQ2lk*mdyfV!zK@ zfDCQj9{@01*{4oM+d%25SOLZM=$ETqF#TJJU-!rk+3A|&)%aY7&lG&@GIL3fmaOdK z^s`5u3Oz5dQBZT>F_Om$5A_6PRN?BY{RI>Zef9GK_BKB^M|QUEa)>iQr3$d-CYF-9 zuehRAKrzzgF*oC-tbhj@1oZ}Zcm>y%3ebRwb4mq_2(lg$XR!wKM(^QI;M03JhW8LQ z6i0AMrr#%lDKI?LB(w%^lGMT17=yY!vlFCi;MgUVxlmmIrk7w~Mli0vvKKycePtiL zvjMe$`EfE&7ftE4y^rOK>3X_F9rKu7Ow$8)_R1n`ORwXq8qPx(v(H}P{BU?Gdjr0h zkc$G@A$alp=z5S|VkJxSu$pO^_Qv?RLz7s)TY)LY<7rXx5Pvs&E^AjgEMlJScWbDBChC8Mmgw_>3usnrhcL^1`jVKG zl3oeh@pQKxNO(!i*(#Qm5^{;Na8iIX9U-G0fJ@>=0 zAOmqm)r@Z;v3P9_PXd1(8rtE449nK|I|Q*Ial&v(D@qhx`Yk!~)`@pQSlxIRhhg}I zq1|`Di+S1#YXbAuLKwQ*8doFAcF;ZK&?;;VQxT}JH7yfL zPN%`Kk2xeB#j|mUXhHc3Z>sh}Bz=sz+XO2rX!& zvqX0qsspW_L63}<4&h6>2<^F#%K!2Qs_Yp<|`gNole_WCY9y-NS7@1RjVF z{{r66!+MJPuNW3;dC0UHdeRtvBc_!1wr3W{8tl=qLPAzk-1*_I_dzOv*Yv2BVvX=~ zJyrnWwV|Nda#iI}-AB8Ma7X-yA%F_ac0AkM@=;pd$Gb9KtE>H1XtGEbb80@Ba?yGk zE?O_wTxZF@fRgOwlw|wxvQ5&G5UhP+ujuZ>FloEs$Ik{4sRAh)R+vtC5d-;;>d7C% zfPF*vg}V3RBn=Ak=1607A$ZuF1wOQUB_y)^iV`!vfCC96v5(P){xJf{UyzV;6Tc4u z^;JFMbt*ptJ_y(X*xhg{sij9Yv6*~OQk#d%{_gAH&jEtK+z7>(XwTe-8gStmuNRlpc8N1fYCCa1PoEDO+|Ja*ykW-7b-c&_ zOcuL37ssE^AN?K;yO@ynaBf0sO`0@fi4mj|k%b;;&%sOQ)i?uWRy&PQR>6$_kWu{v zk+%IAg{`okgES99qLoB?g7a!!Ak!B15X2c(*zEA`%lF^&xC+X#Y^7q*(ax4X#NYF< zE1sD*DV~CxST?Pu!VyQqFDbDURq$KXGwPa1w?G3{w_(`Uo7l88ffyYgGb#@D#OGR zHW(oE1vl|GD70}1^>>-64fs`x3~rgC)G! z9KOxEa=B+!penE$QWvcw%BCOWw|Xt139JC#Kz~X;vKqy)Awv+8?!ryA)eumFvcos{ z@4!R-599482{=|8?9i1~<(R3>IkeT{jN9&c7_FeU?V#DrOKLX9`+f4KHu)2qb7<{4 zDW&_-GQ3q5Bj!cQh6WRQOh{j9W}SUhD|kStb3yL+0wv1b<{B6ynoR9lL1XqV%xgi! zj^sUX??-S?0lqx=i1=J`49D#>+$CvRVrVWmqS&S-cRy%b)Qm7J4v z`kWNfoIH%S0D^$`JPrSGd-u%>@sm zx1;6(eFKahcfs80XL7Pk$YH;SvLK;6xei_&Yw;utF3S!7Tu454APAq0?7n})8m4>I zL^uXoO+X{Dpc!vu)BUPtdW7^PG5$j2k%CcEenhI)}Hiw2SN zs)jDH;Bhd&co7T^e#_0MzJWK6sg}alXk^jQuiR7EM}Z!P2_iECYK4ctbPKWBi; zF+n>e?4v@ocPTuGwCX;;Azf0E zNwDHVl#18YC^b&MX9J<-Y{3=|T^=&vxg(UE+K=ZTB^-)yM9LRb z$6jiS)6K+LCkFaf3D4A8?9wPM~MinDVg6qp8g@A0k`$Y$`Ef3;zc`!4S?vftu~p?-#s<`voic{Q~S+RZ)`b>Guo9 zv-hAz(D{^F_`s*UuPa@v!$!3O+exxjM!Y?I2(RHuxyo0hC#1$?B^vW180M?fl{1B3 z+4fsg6F+(ZX=Un-qUi=nC4Cqf|I+=aM!MgPE6*qhKhj_gd0_K={m`mzx>t!R+4a;M zy(irv#Oa-(3)S1F%IAzbVS{dxviH)X$5m18gfsO3vmt9@S^oLy#Ij5vyDm!Ozox3J z%!Qo8=Xbuna~ilW<+bbP<>E3qZoBl$M>GXL%u?YzRBLdB-MNr|lvEW+W3WExaY)#- z6)44|j3i_2nNT!Jx!~nUZ)|=Qs{!}Hqyk_e?hp3j2uIqEn-HD+m5u=%2nG7B~^Xmld$Q)ahrb5fY>(FMn;XDs9Pzfn+;yC z4gk!wYA3j=RDXVii!>U+xy@4C`EXE#B(dd98knoi~0i zHQw2}Z325Anr3AX7jA;FH2QrJIc8BW@~}nUYrL+y1K>i4LQzF8Z@=C=VDWUXx1=gx zGJCyhDy|1|lk~VB16zQf9Rv|Y5B%)%f5ng>A1ie+K4AGa>Axk`m?8pz#F@o0tE_*+xEGaDj3?A<4ercVP%oI2PoM#)7;NEXXG; z$mUpJAHjk=!UCWGnX~QMIlo@Rp>5Z;Iiu(P-$?8JonL4%{`R|bPo!3SA!1(W3E_7x zm%K- zR>a+Z6fa@+zcJ6(Myti}j z!{q%Z?tO&3e(rsYyi2*4U(#H}y)6_wmwOMBw~%|=$(zr;SgZoeftNSAgFrO>3HQz;?phb&>Z~?kywl z3*1{l-lyQ@OoH}{lpTqf8G1~G z6XxCe2#x14TbDYE8+Z&XbSMVBA3jg}64o;?p@HCo&PT?K7TixeWUxJ9F2FOK5PTfb z5D#v?Ih7~18EpH^1zWzr?YP7F$y;mS#K47(;<$eDSd!x!10Ogp2jr{};hLL_>c?QN zdYdgx)>Kymzwme#3aqiv!LlnUSAxZBBSm4dsl36kXEuYsw#vomRMqPT% zEe2^uMwd9HmD#UZWRxZ$a_lv?m?S$+74ji-Mi(BH0Y?_yGr8qhr`%$Q4jcmF31V(D zq&fx^^C>!rOs5A987cmeYK6o-NO%*mZB+iNDF0>ff@+gKdPhnA^S>BBMdJg9A2-$q z?o6Z{$W9~IzX(5$kt*K>)p>z-oq78hWo(mCGthGsRw%ad^TWGIbwvs>SRtlHwNzc0 zwY-0^)ddBXLRw`QrreDK8xG`FL#ny}rU#_t-&q8Hu36CVyzc9aXg+=!M_!;wS@Ocm zAOU~<>4j`3A_;WYJM>f?F6a%0(~{F!-&2QS7&$!YFBap6*IV!c0By^W$dlkMlFwRq zk-zaV{!K4dha2flYyE}la3ei>9d3Mte;>v-`#OTJ^50YJLkPnzq>x>WV1lpx+oP}* zeE9%U$X20|9+;q4OK<}1zGV!*w&s#xNiKI_Y+j(tm>3`2*n}`W-jghzu9{exT>nPe|#aRI37*SA! zlF}57T>tNP?Y-fEXjb>W@8|vh?uO^Az4w})=ULBs*6^&o7DDYfAKER~ls^Z2p@rO( zHv@OeTNM-?0|o^}YB%qqx7GdA_+9qU8T{rQGUdJ67+XCStl`ex{wlC(MFCzF4m}xk z`#h8BogJplxld!1Xg_IE!>2+fGOMJKX>*=u3EroAZg+azS&+}yfxGbGA9^II4JW}K zaGx3JV8)}-lkAV%3%TVtxV8e0!BLIV8jm&JlgFjsHKM6t2aXZ2j<7r3t-fBntldW7 zdn7!V^7q2GQ4xcezJptPA#XiOU#+@#D}4_4OVs;ZREW|?s=VrzI&hzRskiMmtbr*g zX5l+>DhrB<2?jp}X;nczE~w4TOYguNhmwg|C5=t*ypQeG4rnq`8p6QqNtY<~(bMfw zGhKQz-PS5yKFLx~l_Kw5Q{;2$51zy>$~qzU(oAba?xi$qyWC6jO!*(#ZE`0#^%iV(p*>29g$IaK)4&Tn`als-zZU({x z`KS;Bv=HmbTbY9rQxZ}d!w^ZPyhyq!ro16^Gfa6{e;|Tvro29M=bQ4n)4ko4*9oqD zJ!Jz9%T^TTv+~87Ht&daCSbx8Yo(UwQy3@V{JO#exG7X(=zIc`{mb2T(aggj>Y+-M zm1~Fv2vNEf24TudMUv_MEl`W5ya}lN>M#l#u&tx)&M<2W2oD9wa|Db|8!(%byTiO& zNg@@gRvAj;5buK8wB8$7oAUn-FBhJ31#-2wt#P%NLy0z4zS!!&Np3*`w16#;j*E<) z1aYRkUp-*TleQ*npGCW-?YZwzJMZsSyXX_4(!B^{cY=Q%(Rl~nIbQe{CK|@ue0~ z!>x1yv%~i^dTl}UAT0gE+`&MNND8EBo4^MC$i>?FF`g6dLW(a*(Tcvb7w*?ST}5`R z^FA0Ha||0aIRARel!J3JK!v6*EjLUjt6|iWKX7dqujnx4k06l>asS z&a1-8)p()lIBS|-EJ&m5E_*)fw+D_#hvUesbZ%$~-b0k4FJb#H729lWn(W9w27gZL zcxt!UY`BU+rNIyvo!v^Kglc?(ZW9jm;=*>3OT51T>)bCphF3V^D@z*zff}*w#jN0> zP@OriðeLyi^XKZ@T)O?Rs!5wIFTl%?8Q=3bg%o36c5r4;Y4ycajn+NC8bv2_T% zc!;Bj2`||-6#*wY>mNg{QObQ%LH^V5KzX&$Pj)C`PL3m39-WU#AYJkuSR$}j znMO4xV64cq z?#p`Ho!Qv73km`iByTbY#cV0wK_smZcB=-yCveh*Mk_?9O%&WfgkP=#c#PeZe;5gR zFU#jKce_>dkCGInEfEFavwoHFFzO^=6+i46rza4!c)3)K2W!N-jc;#bdt-wdV zbbkzu=Ar69H{~D7X}i!Pza+cmg(LPu;=<8t^7QJo3OHYK9I;_Rwaz_5IFuxGuq&N> zL5WaJsc`IUvfI|6i$c24cGjUvSt{j}Uy{>+-qt=2Rm-si0jtOTK#qO8cflG>FmU+N zMgPSO+!arkH6LA8To`%QYu7LxG4KKBhA7sv7_8GFS`8iha{axnw@X#~9qMJ<%gSE-EV2U6X{ghx z8`(M#MS&OAMp|+8DeJ)R&-5p+GJGZjgPi6EJ!?S4T)7gIS%>@6&FPRJsrd~?%w7EL7= zQ;A&zxDfQ5HVcS=t+d5cs7#Gwovk>NTDJdc3$1>`d?D7*^0Zd0ZrJm4sBn5923Lwr zJ%(+VFmA#S0iP_rfq7oLSvc3g)p6MA_^5O(Y5_jA!L$QE56$Y}T&0JzRfTi5!)@6? zwN#`qNDVKrBvku!8N-25{XOho;?w0==S0w2;}Z%@P3qIPTubmNl_a-mI#w{@WLbM> ztLhwX(lXUhHWo}YWd)5q=9qBOIT!xwV9viW3vJl=#wpx83}V9)o7(DXu{I84IMwpHoY+M&aaS7d3Pan;7``3ehM{HUM!wvn{M!TtWhjck+F4P{%vFm zb_=-o7W8Kd@>T!7!M-i#?DZ%C!T#%XH5l@Z=?z`>tp_|(*kM&(P z;oP#AU+j1JVVM?HP}xY+JKJNO2QZ%DCl3LRLgwl9fqM*A_lE}VG2Oc!2_tNN1m5El zgz+2&;nQ!fUiuq)ec@6eQg#!*SI`2*I=x-Oa5E52@}$-;j5!AzVPPQ?8VmhIp6IbP zLxlF>%Me7s76yviSgg4|7wgC;Q4RstY1<5wX}uL&C&U&g zf9DU~Tmy3up<$q519j!E@-r>H2w^PsgukQt@>W`yW#di0j^+gsCcn}%kS3qK6iq*h z=2|@F7E@->+b5zJOR?4}N%O~6C-?q_5RMOg&U-0AD9j&$K-dE%yT|fY*sEj>3oZ8& zwkOf>*d>~U4#OVA!cyI;KG>PoJSUsEz;M|sQ5T+ zM@fr)J*5{Zvd-5`->%vaVN0_Yt1#9+*igh@RY7d5G#Z8otvPj0#BG6hS%_Xe@4(}v zvi^p)p)wCeUq*>xu3rabXzb=}uHttDE1Wmt)^1kcQ8jfaM7)%$pw$#-*xl(oZDj`r zUnMx%%+oMZ^?l?~SOyT+xecy{4%;!BscL*5`I;W=(W+q1K-^x>_mK;3fC6#-K2mSl z2FT6rc6^<}Zdm_Huchgrt>U|!Qa}(?X8aKEU(xsbogY)4k zFQ-3l%Z?j!sQR)?Or>I17B>(3$xvfWrT%e`T9LpFTAT7T=y+2;_xH!AmTv*$t;YOq z--*sw`GV84T_-x9!@cX)V-MrGdh0Pv*6v&$+O1no5cYp^f}n{`n-*DGkx+_DLQ`uc z6myk?@NMQ(Dxuq45;D6a)Vfwe9#jIza*+{sp@raU`~T0O;uwm+Kkhq-vvMUS-N!*w;EXX5+3ar-4=I8Gq=#}bI5-x776bd z2Y!v6nA%8YVuO4xJIOCWXfB&+5vZbMnf>l`!%E*=#z$tRyksaOeXoGhJ8Dd7 z5H|HWbm1q*5!`vgEqCm^kc9Vp9J}bcA1oV=`zd(CZG8~tVClin4}O2}NRz#`a~IA! z$`J~N4fwRU@U8p7tmc6TH_tQ66o{~M8;$mY$4%qAVbfdOdE&0=g`=22IVv3QBV>w_0ojKPqNQD~)sezCh z(-vB!VzHk**RJ4G(R#2+VGm&%rCOwh;QeB}pQNl(2kNp)%}1xRdhnvwe!)Tss}zb2 zYrP5!U&nlQLD6CUt~tfu{Fl^kh59|Be$T1jW9oOK`aOVOXF@i9HaZyRNjxj7)LK01 zL!AiR<`$R0M_Hxb#*JZ>`T-M}uozvo?l)~|i%B(j6%?ITzwfHw4g7uRBz}_vKQZM! zi|{zG&i!IR@zZ$cuVR>1%*SikjIa#@twvjFTaAjCmS#2bcC{MuHLXTt$kK|2ru&*9 z9oUS#He-YXn-R$cO^?DU$k*uA_@Hq>;|w`adJ@A1uo~f2r9kF{DN5<5l6f-t9qq5cCMkYa) z&Z`v6nn*b&R~D>crl!1$&=pve*+jRN@TGk{F%ga;KER~7vL)!@r{y@92zQ$LfcYJk z_lB3K@X&Z+69TCS@GoZMg$GKHAz@`%IsyD?ReFecI~$tv&Y{U5o8CeQ(-sQ4&-q~n zenkfm-4Og|ac>>EEPoH&!u;3qi#|LZY+Hz5AvdzQD^&iNrX)vkRHW)RVvcfwsi0^h zUX?XWE>M12h=P35rKz6MLBPSKPN=yBWpnb;*jikh^s997!S4^pp|CW;x(ycB4r~3_uw=jC8dr%OD?X%yTyYG zH)*z^Qhd1UP?{XJA*yxo6}F*jzGDs?wjl~BBRbr5+t5y=xC$>F;jj%oh#S4oVFkj2 zvJEkgCLWlZp{NzA81b&Qp*5^5v<-a?a+Ga|vG6&Io*WLVtF{d#+l$Eq8izi24dG%O z3Q@)(CQ4ht@B&p|<4fB^jth#I^lsIDLQjaYZpuJ(4>Sj5{z>+ZXdv2$h+3$g2&I>< z#(ii;@O&Xcrg~#v%lr?KphG2SFByyHeR#%M>G&<>tfVfNh-J#>b9LRecGS`h)%Zg@ zQMj@bg;zdS)>iieYYkhGYWnrp*1GIP2E}@ot58x^_9Dem+KYIjBm(v#MlkF}d`~MP zUBg~PA_|Lsg~&V9d#N;{tI3E(p_z>8p9gI;8LeOfN2*LlFX2%$8Rat}Wi?WWqH<_D zv_L5?>zxp~hK^mie%{tNSxDJ)C<)Z`)Gpy&7NiuPW*x8gudd)_jm9Y3QUmwxFeaV# zQqd1qXt~&nHzG+C!Uj<9P}&tAVSC}@42YaZYM-*^*v;4@)vK%lcBO8eYMzu_P{^X) zolh>1V~*k@xbsp9)2oL!cmr4gfL=PZF)#l4sTCgp#g zv$3o%R_x$GmUY)^v__FsrD)Kl2->XMhxi~C`sf5%d+P*U!LoRShh>v#>nyVuZ(v*+ zhRn>qpqTvzScu&4vX1Vq zrgW=@nU3kYg=ojiIGp#zP;m}0=A}^a1mJnV4}eqf)4p)PNHrdl3X?S3N%mOnV_KR< z3^R!dTU6SEnoHY2{fIiQJhRDlYY0nP*(;FJKX2k2qiR)ulCwHOlh@s%GUpO5gw-nRd8czCMAS3J*R&g)i0 zl4_D41;p<(-eGqX$ zMZ1~3NX2xdz*#YVf_yBsi8F?Yz*MVtkmZ~%-u=pV__P@3ecNTIsW1?)9lpg}0mvo% z{OTSDA2>YJoBHhi1eTlZ599UuVK^#&vVAdDQThSHSD6sDFT$QB>fKFOvZ6lhZe{M= zU=^DqIeQ@u`=Vp`q_*%*Y;3LQW}4QU@IXTByF$zCI9(eXMV~1T zmxkx|%MHNHll9%pal9Cg;+D1r!%J_IXg;|!?Q0l)V=pQmv}H{!Y^vzM9FVq4t)u0V z9g3zM*==`)Vk3P|xhu4OV{Xrd{b&)X?2GN+)yFmjnk4MUGD)nE)XQrZ1t;FQ2aOK7 z>ft&8$*ROc?7j$v2sUrcCHBdtGbLkDb+Dol5pwX&)K@v%oeiyn^$+}#O*w9WuonC= z^T(S%q5O&E&t3etqqp74;x{-H2L^^s4>ILAl$tQUT6VbQir z+shaSDS3>vRoqcJfvm)Kd*09)>$v$b76fB*IF=$N7NVkPo9DH1zHpDBML3DH7C5?0 z3g6&#RCzo5;->%KT=8V6j)WN-t>Mt^Ke4Nc2^0DXZDxY$IWqsQ)f zI40~AL<(oIw|DkO4miQCz|+~fDqGhA(u}^RoO$roW{96+UfK*XCS9@P^kQsmh@Ep! zKZf@>8Q2loQ9nVZo%4u#hwt_}K&o&$x~(09@*^AWq3Z|C%5HS*^V)-rf$QB~)&$$> z?lnDDQWzoJurymKb?m@af#qmr-Oy3RL{a!lsH$qSjuwsvm>I)*vmw@-`7vL6wK*jS z{lu}$t3Kmwuc&w72-B&4E-Tko2H;p=d}p{H9|ex`Z;18i+dV>Ye2wkG#L!faSm%b# z;0ewx#|BfFrtwT|+8o@ct&P{^Sm(Fs_^XqW9Ug-z(m&wLV=Qpy*UKt>%zi@q zOyo|o=djgzJLekP6mIM?1Z%@L-d{Lt*@F6{urSX2A2NR1fEjB2c=HDI&3>1>XLO{kB%{8l}9n zT&>#*yd7$zP9Xo97q?ufJP7q!?i_}$HHZkjY+CUfNc8tH<^2M;(0407Svr}(3DZgZ zXijbZftDV}=fJGs=5wAD%uB5|8Q)-4I@&XYUfLBESHXnyg|?;h5r z!Ib|E^1$?~&)!0h;g~kY1g`te3!mMz7yJF}8H-J&gS1*;vd5g0&RgfRIC>*WyfX$U zO#&CJ=oM5tL-2&6n*{2x{r^$YGTLK!L35z*-@tYjx)s}~ne3zCqjs$fdJaED}V;KZVO7fuuouZK-dR^%haAzB)D<2E+cLr-Jh>naV-v?VfE?Bdq+ zje?d1b3DkN{#u%!!3X=MO@wu0E)oI0_upyYiE=iE(Dqn*pJ z&k^RJX^26s!2wF1&YvLAlh9>}i7)fUJGWt|{Vtz)hoVdzJbM04|N7lJ;G>eziO!*m z5e0a3!hybXKbYtoz-Q-tkZT;*%kGwQ23|PIjZKZ(;t9KVmVMe{;OL2SRqzF>Gx2lu*dm>3r<{Tb zIBr5(C-I;sl@t6Ndv19j?By|0Vcd*jQ46o~n=YA_?Si!Cc;Z-ibqe8Dn%?5DQD&Rl z(F1^%PlJQ?O;AQ{ozWm$-E#m1s2yA#XSS6&wKw&0O@cCSw2;w|y zMG}7XZB3lTxYPE9vhS17hh@cqzvj$Yh-|ZGfoQ!ka7G`M(m3VHlotUuGaIj8zj#WAhP_H9oVl7=-F;{-dUBrGa0vrOkn$ZTlp=vbJ(lIl*c{JkrlStax&2q zpSfnmI)%DJN6CosEs9!iRM=YUvp3e3SGn(79)=@$;!T?uc;E{fZ3*qciY32_P>fw; z;ey51p>Q#nwf3M}PgrlJi!D)xfdd7%iOP)HB@`UP2EXmbQdK!M3Y(TbrFgS<=@5`4O4^O+We3OMRnut z2+-^JYMA_;8jL)D1Opo@iX@>{4CCd#|*s7$-ujud3cwTlUcYq7iku5<8QQW z9hRj>;n1`1JzZFq!@{{O3UgXS+s@#~IBPeIoNx?8GsleuZ3}U%bdE<97R=%U z9CvFY?f^1uc$DqJ?RO?j-T4-(g77-v_5N7L4kN&g%i5{{+nqI%7-2g*sk8~}u&yR` zBPBjNYJt(`4!ENsG9N1qX(B2`n=Y5(xCWx#K-*eQ88U-Nryj-|_k}S{ z$Hv7^#QLb1)??!erZ!bZQLg_$#MU@s%9O8m3yARy{J>gV;8ymgreEz9cboEKkpPrW z9z;t9{%X2+7F?2Y;R!`b>0~@Brm~rES!DPC>J|1~(j|1PeaJ?%n3u7+X#%F5*7AY% zShHSZEd@?(s^luN7*8xAYZIXwZR=OGPU~GO{fxkG&%R>ctMqnY{)H)-OhlF9!6_;8M#|2Jqva{+KwA-BXIOS2I z9TYfA-B%qHc;gAEDO5ci6u9F!ZmSOpv@;9spul?iL4jT$;G{sDg`l4lSp0=zAar-x zX#EH_b`a21UH?geZQ{6aajbLs$JLq*>&TJsR2cHxm387`z)Gn1rVbokg;!H6osm`f z`s2vJCRJ=JYv|b79aB9nX9l9pL}REH$Y7gws?fP>Ax@cYsr4p0hff5u+-?*b6m*6k zwl$B1!CdR4d!XrUs%`kyGM#!>wT`-(8XcARiE*&RH`!~Aft?i=)-7SL-Lcus??ihv zKDE4|wI{xgyaiu2VymKn@3I;Md_AvTy>A0{<8CMJb_JZoj_5c;=Ah5sfRolnZ-Q?! ztKNmr9NQks7Y_^Vax6_b5D^@#R#wWkpmg&_-bQ-~F zI&ZQEzfEn8@a%EnOW3gEE_|t*(P0vI*9_`kx_>koUb6esXsZ5uT@ME;@hy8}j14*h zdIKT=;{Zv3`GA#x7XZ5e#{fSAnx7Y<8^8}R0&qJZ6>vA;Z-8e3jnUj*x+KIA+@A(i z0z5AW(HhVL;0G8DxC>wftOC3O*a0{S_!-avAJ_K=3<8V>+zChp%m>^HSOwSscmuEx z@GanHfVe0`Q^1XYt^i*^1RxeL4Uh>~3V0H*8So+CD?lZnDRAxz2m(X^#sTgGqyusR zs{k(nN&trdrvV*7E3XpJSo`ws7b;#3$9_>`u!q%Kq2hR2XjPEhJ)z=D_}9L+&N_St z-$6Nx7(lx-fG@BNI3p`^eu_0VD`UDPJ6nj^B3n!s86rzq@Yh!)i8TBPVaB*~K43cj z5KK%EKIUhoxl? z%d%Jm+&GJMVq%(2-4A!U4@*nSn9jgi!Y0y%Ripq@i!cMS@JD>jDy?RMwmSjrMj`Jw z{JDnFeB*%GXfeDFACwPb#b85chPX1ON*Z(w65gP~M=-rU2!|mK{w%;L6RsJMfnOUq zMI2**MHKKcE6ygVcgYIt*(&TzP?L?jMEo+wY%vWyUw}7BNFxir>0$<+(h-7`&s8a~ zl(e+ypQ3yc5idpDt%u9N`^Mmekv_vE!GF5)x9EAAk(*7WPk%G=%}}`#pG^EQMJ+}O ze9}>JW>7;aHLPdjZMJ%5$XUQm!`+H}Nj)Vg8!4u#Fc~5l@0cFt%AhP!i#GS6FPS*2CgR4AZ!`f9>8fGPPlv@Lr>hc>P23MtL;BkHBHxGH$o#T~d=$hZ4Kan46 z66@%n$!}m)ewmO-t?cI_tt_Nz1OL*%wJU3mQ5UN@rpe&|;aip7_46wMn5RR+<|AJ# z>MS{AJW&#jQmoF+s9*hse^q{Um3Ib8mF1;rQP$@Sr5&gxEzpV7QWlj0%Yc_gNB>%K zL~NK6^?;^ZsHZ41*Vgy_UF3)OU75~oaG6?;l+RTBPgJ@~(*d)7H~l{&FEw*YQv6QT zxnR&=T@T59Y6UCwDfQ?~glEe$4b&LC)ig6XW0aAWj<-wsT)#XpRZRvd7i_cHdTS{g z=~mbC>rXckRLlUsjTE!1urej~$rK zxOc;Nh1>czR3>Av)FEzCmn`zBb+?>=?=(*JHlh#38knjlH=%>w<8ecc#nA-?aY-Z5@0-th|4-3XL0bqD)h z>`(F<4Vu``a(y>O>5Po3P;u~Qf6p}(tT;+8smFOm4 zh?4^0PmZ%>44y~hKHcSwhOdF6K#!HW4AaI6AEr$ljJ!2BUC)P@Xn7m?8hJ7t^IagGAuJIMU`P<)^t^V zmT6hG#4IgLV!FCTJ_6|rq{}n~(qcM*bP!-j@OL=GE1A}V_1sF=}XV#kh)8$V%U{OyzOm^@|boe6g(PMe-&NuDt? zC3RNX?DUMxIa%3O+uV8c7u^0)AEZ3R;(-Pper0UP@IxnLH=`MAv z{;TJ9eSb#$E2;mZZ+@=^FsIh|Gjilp^?eI$7yd`C+TpK{+Uig(MD@LP=jKK^$X&A3 zo|k{`eai}#J605~ynof|zdi8aLk~al=wpvR@#LDnKlSw5XVyKt{<-Hj{NsfeUwZkK ze{S6L>T9nTZQk<6n{RD>yZD`LCEIuGeD}RwyZ7vU|APhYe*>k^~zff`U(yx`j;nbAB+7Mh_(O*|X z@V`y}|91X=ng74n5MSM2HN^kh^w-BQ2pco1s}t_=6^6PVcDXOaJ;zzRR=V6{OSr1{ zt6c6^yWIcH<^BPedmc+_q*>&0?~Xh;mP2}?DF74D`n-62N_tYpyf|y3H3fYryq}P5 z$r>{))iT{0m7biDHCuVaYjH*;W?RFiTT|v*Vly&rnJOLaSxZL5V5U8BhGl5R{CLch z$Elg|+H>N)(5vOhAr0enUcJ(7X=&Q6z(-IG&}c)Wxi=6E>b3BQjEt<$YoIsAHVka` zi@m*}rx@(*9UqI2FwAB}uIF7d@4^LRN9KH7o(O?6-8?S~aidvETNTx8&W zZ|=QIb6eN1j~w#z^HE^l-np^4%MiL(PrumsT*OBvv9Ze#(cCL{SuSD;WHWQ-Oi#~- z^D{H&WoFh$pB9190NM|_A*>CI^cm1MDGiE=<1w)E*U(KpY+D2dmw#FR8klG&PJ&d9W+n=SJ#({0woX=&`7Pe|tt{gP3B=EQV! zVkQD5Av}BQD4@PBVX`cVN#^9NjM)gSqKjL)aI6>xOh=%MZikE2bj%c2bjCE`Q&sfl zIW|kyf|T?b=46G?75>>4tJTF@WLuL!PLeG%EoHhA9wQ>sh98Kp+Y^44tgO2Is_C5$ zVyqT(re@VK+p?K;&+cj2{MN>WXEQ8Tuq?%Do}Q6pF=r$jF*v^G{1&+=Fmk3PBMa^{ zjLFjBYO^}Z6F-sjGf@$=QqVY!^k&aa1kb^3W(`t%_C?wB@XAyi0T~-nkNv#v7g z*`+5~1=HL-C0~eLt9a|cy=Dp3$l(eZNX(jHW8r0+ds*h9cBdqpEweMN3vRu3KGPsF zdc{wq-H=tm5J(E0dA4PC28QTeMR(}eY`A9q&RoYk%q>u8{^aGs*_Jb*f^EyVxQMu* z^t;xFmHCa%9em5Dr#JoFgCDep2TWmml09W2gmJ+Xv^i{8vZrLH%+5@+B&6v&Z%R^1 zwsp!pXv@qbP5J3K#SX?iqCe&g7uAgcaCU`_uc zF7YM&+=FWR*XG~pAvOJ7<0D(~Oh{-|-kV(RYllk;s}7gxa$g(Y))80FZ);?z*azTc z@S}G82_tLzf9OIl>EW`c6Q2)bUlUf z8Py1g6U>3cgrje0WFcDD!`qmQBwLzg7`n-$6K8`cvEF?w`@b$@q3VQYxX0MitSLhm zSS{l-;!~0=;WIJU{g6&kRXFtZlbCj#HE9HTj_iTg<1HF8yz{y7>7i<#aXlP9|g+d zMvRINi;YqPly=p$YpKBheOeL7pp2Asbj!s4dLt}}nQA$c_(0ujOP5kK+OkkG;}>L- zXZO~UjWF?%3SUzqgqW_yM2FIrNe0JbpltL!tIOphj|u5B72lE~=TC>>0s~bP5K$8A z$(5^nyGLb5Sf<%#%&=s|f~^#cA4Mdo3C~KgVtA4khwcamzQW&qlqGSl<%;)T*NaL= z(8RQqyK8%fr6&`d|>CY!a2t}gO z9+B0+%q-s->H0v1X%2wXAPc+3^`w^;kTw)%RFSmKp)4Pls+n`9jzp zU@GHNrq9wb-L801Lr;OJ3zSSR$cA~1L;bL9MGNl4s~&M_7TCz{*YyKE;>BUDWzuqi&G!a48XB?BS1p{=YKgLN|*2Ga(xf|IA?A6F8m-wy=vnYDt?;rcU_nTj(`e;=p2)Dld85`cMQs#L|B4c82ST2X6;(9|nj zNe9E{0*Lbx-CYWowAcak&jT=j%$ciM=D+LDYvB^VX93Lbc>u$00g$GB0Q!Flpnp3s zh5R9qFTC6|r^?kItDX)2tG&OP{~rx+q+_^7{QpfC!##C%TN)~^2mZ6Twz%r08!cF|e)+zIVw?Rw2!^3+K#5f!0}bCtUR;kU0ly3VY5U-7%-vi2%ecZ;qwe3%}7 zGs5qG=F~GC#5d14pP791^cSyHF~*`n9((l8qh^tCH1+6!E5m0Stc%?lHh&z#c#e;B~+Tz_Wm-0S^Ll0aidGbUutWDGkp_fGL3S zfKh-5KoHrQ z12B9$KvO{FF>Gc7I0ASVuo18U@HAirAPEo);AP;g9{!C#*2ZBqQv0jV#IrW8e$A`% zFn1l2VNoXlTE@zx35EM;P8L(;rG@y)vI;6>z6?*`O+19U?(M4`<%}3`ctmC z{sZk@BeL)4diBKv!5d{W3Y!`H9^~kkFT%$n{MCSE&GKj3bd76#De4!%xC}dj@hnEx zXBYav*I(oSR2(jM{9#E7h9k}4Vyu$OG1oGQqZAk^*dk%FIPB8D3Mt~@fa1*`o-gs^|%uQ^-0L^$V8SN%!f~m%H zukrBjUj2Mv0iMwVH2mi;M|h-dJfB>NXYj~)9&x{$xWgsCUhp#mLIG|S(9i>94 zz<4O(djsLKs za(`|K{lOpk+k|ZumCBpp!Qav+E2Q#P{AUtQ&70xXP7+5nZ)#~4&KXL;RbLAGz6Fcoh9YG1;7mWJvsOF z^~JX+_|iEpPTYO>-3m8D##gOcC0=;p1@ZdpuPYh<@0=54xk;Zs~e0k3{Nm=d?JrR6`!Dpu5oqm-d%Wkd11Yww-_>HhzJYAH~CRf zig#njjuqG|4f|A05>uy66^oOGi5W9yh}6_nF?;rGk(Gt-r=UL;Em|bj+Cs#OcMlP- zEe;g+8IfYetZ?yQMwoaaD^zS<5+GjAjS`P9nI^V8nkjbOKT;f6H%s(-M~d5aNYQ_n z6d~_QG4OpUg7!%<0OGH6IfmpqI zwRq%_N5o@~Jtm%f@=5XZ(@%@_>({ID-MDe1C@Lyay6o+@-xeS3+$fg*Bt=oV6z{$F zp4hv0ulU!${v{3`JSYwyJ}izNJu1HV;tO%+n zyu4goxNt$q3fWq(ALch>F{cTk@mz&@z>N?dRQBfOkW+bs7$WVPS0l|$LVk0wIzBodu^x!u0P!(tx)J^0{m?(<=)_3# z4MGO@7jnv2A@80eVk2dm@PR1FE*<@z!_d}!Fv zun6^PRPSEhyLIi_V|Z17u%XdWQ4tZ*;UOVm5ea>I_3YWb+wkGHBk+rb|Jdg%drBp`lB7!wGO zPPnO6tM;0|!m(RdGmwm`NsGBs4p9ksDJr|bANWTwepod8Tes>oQpasnZ=!(s z2_a!IqoboEqZ2wXf%fg%w!N`w)21Cq5l8y-fCRFE{^5}^QPI&ckujZX z1b~0f-YWf}F%i*`F%dDtZtl=V&jL9l+}f+pDA3RJ!&Mdr*Ajx-0R=vRe=7a{fiYll zOk{M7B0}M4@E`amMD!0O3ehn!2_1$FYt^b1h^X>U7}Ovkv|l8$7!CiMhYeFycC7Lb zH`O=!HTRB;j)(!5hjms|b{eLOM@T?e*Z_A^eeaNjn1q<9$QTVvs1WUn3illv-p|9W z@o3$Hyo!o|y3+j<{QI?N)WFTHXr%5Z7oUI`~KaK)^zqv}`^m zAv`7|5VBU~4S(Yr(W0e)Oc?kW<>F2LN2m~ZKiDTc5LLz1TcwZkjgEjIMnw(`sO9aF zJ_jjsi!H;e`X0~E4;IT(BE_Rw;o|kBkz(WHi^bqQ=u<$?jrdfGiRe49&oCeE z!w)|!o_OL3v1ZL0v3BiR@$9qDs=mT&uf3-FgKxd{mSA7u z*mj7I#&*?hh(8GNLlHk3@h2nx9K^p5@z)^!tBAiB@lRB>GymK29 z1oo8c&;gZ5i1uohvqQV?{rdIm&G+UG9olv1(z|nu=FM+Lf4F_uUfsL*?B1_s3)8L5 z@Z6!xEj@c8Y|9p%`+4>A5*@qt?$xhT<9;m>pn0=qH+SsPy;r{`jeFg!zc8D7_3r2C z*|Znp+<3^3VFb?R_{U-O0@I{nPct-wS9b5Oj#j9UGub#jk`SgMIu z#Y@rG5f?3iUAm}8kz>OC9}N)@ePso1aO=AG-U!3+h$lL$IYEK$xws49Cd~N#OIJO_ zMvq-B<3O)nx%J^P=l_Z8YSdpVZ{pho(57?e&RyUVx&eALYSaiLAbgIZIE5}WwmAee z0~j2~?Z4$Bf0!;tH`jHMw=Dn^fAdpMJr$33XX}|WXTJUM#~;5#+w;{AKm70w+!N={ zojZeZ>8WGKj&0h#d$$9239q1_AV@w-UoE|1I$V-MKKv2>v8>rJ-_WB+k6Yl9SDaqH zo~zyD$&*6{;X>e>FpqdY#?f-ywr%pe@4l00PbKDEI6);(oH!vd{+9Fy4H9GUFPu(i zDQNxj(4j+r-@bi&&d{MleUJ{zsUx5*fJBprvod9hySK(7pQE>_P zXgIGM|MuYbe$+ujl2PEbc zR5->#AA#>rAt8Wy_XD==Me0?e;O5nVAtQSFVgjznZw+3h-*%wyh85HtrZO zU;ug7E+QhL7t_W4AHx6a*|QRTnbo)4a?3#QaOL;ke=jkoTgB)7`}fPwKKo4JiSkjn zvusYDJSji_{B!k8nhZdnN8Ll_&i(Y$PZf~6Uy-Ks(xprC=bwK*`|-ygzxnmoU!Oo; z7a0e1K7jf*5IC%4+&_vx`co2lRRQrgfP6K8WnuvFC$H!l_k1Q8bkHR}1^{Pi0Lok% zV7xQjKZF0yojX^c9J@j$@<_{$9Xk{aln=}|NtOZTnUuT`N7f0{VdZ}G(MOWNy20|G z%lg5(!#E#&@PVR%Wr+GOSq_-1=M-+$h38-KzLNf^!!+_=Z~bk5{``4~W=P4upPye3 z$}xdufO#{yb?a7%GMDHxt2)dwV3}ZEO#yM=vuBTFT{&{(h`J{Y)B%(?))STyaohZ( zloo7ZAF>apneUad-+MgAPRhO|I3M$UDPO=b=D(aj{}Jj6>;Ls!3jcTBd1rOEZrui= z55E%nQldN*4b(X-1M-#huq=o>?^*uGj~`dSd-9z8Cccyf(n7q+f6C}Hr=^VeM9R=Z z*xDOY@PlD9@Yw?z-j%X9Xy{QaW!JZ)T!~`=Dl03WydLBcM&S=#FAX661{h^wxPPXe zupUre;*U!?93R7lWB2P2(tt7;^nsKEKtuYcuy<5qzWX3f0ewe_Rr&Sek9ktc@(Ldx zpRSOFJko)3Q1lpd&?PS^N92E9bdc}lIm?K0L;0uP#h6VYGzR#O{9L19C_VxU2Cf01 z0rSZ+6Lb;(l#itB_O@0nt`~o_8Pd@Ilz+5Gl3C*+?Mw|I4F(u=)Kw-1Ev|I@`s=Uq;G1jY`i0T*@i`&#;fzp)*d5AsUzg9%3zF+>L*(DHgB1-VU}`D0%-3m%Jt5^N;4}g> zRMltFpx5WV)Jf2(-Jz4ZfQHU*Xu1{2EU86OzJ9PZ`uW+c(iR$`H8jSi(oz#Cf z_`XxhTS)`>Z^oYWW1y#S?W+ra=zAsqsB0_PA7Y&|=rCwu8L&NYEfa$ly2PJ)vS>+| zeC3`%`2uKI4;t25HT<*BNO@NoXmH|42hhO!Oq~=Bon+K!(qPnQ(qPnQ>ZD#<75*1$ z zbSX2WgMA*lq=oO;)*AgX`mw!bpMlp~x&CtVVt=^_G`tKNHh_k=AIp&E&Omss<0=K6 zvr#5~Y@b1c(Z}fdjuyE#|EVtw`40#P=)tlufc2Gl6F1^+&_dl$ea~?X^(NaYwr#xL zS?VX>1Pw)t1LVeqf$}BLP`f@q1f8^cc9>i-Yp7h75-#mC!evfUgml>MkjFngAW4I3 zA4An==ptPkX~Q6|*!uKi>7he#vkf9JAbSqvK901dA% z0u87Otk0x@^_e>9G3Zs&u!=OK4wd)K1P#d%a&c0mqJem_kHPlY=wl2-`-bZZ`KQic z8^(SFVZ(+E^2HZllpb|H+>tj}mVkz>ph4AV&_JDJ)aR$6lb*~9k&k4C$_GHh zO3<(zG~`2$mRKSbqSQ%YE`5yGQ15YFq5p}$A^(Aafjx)=@x-c-DjHZWu5?_BmfwD> zlwWRtT)wv=TyC@b$*p>QrcN^I^Rv)Nq~QsjhQC3MR?Z5O1t}U0L{#-L(BB>fSOa}^ zS;`M;$vz+3v&_-{uEsis==AjT_lYa(BkRE8#fudUuJTozmfB^)XX*sjh2OhAefO2B zJ_hS^p2>O) z->c7{fma>8uIp8M@(F8gwnKNyo;`cYgoFf%wHcEAy6o(1X|-DAiWMtVy~dP}(mU)6 z)kO<=&$fZOihUvWM>!tg#rnMeIh(A!gz?6@xvG73?PENq*XLEJ3%u$=)_8}x3KRPJ z1QfnylbKz&L%WLrocz3YgOhn^??=jHp)&aLiapf0eF!TL;{^!WT3qW`<&FU-su;1dR~ujTst`#*>^J!&3=dFypqVvQnC zaFm$SlDFP^t3=;g;X*vv-*5%eV1PjfU8YB%Oc`lYxBhF*H2HRZDB5SOkHPl&#YN$A z%L6my=O1jA=YRR-Bb4`b&p%)-rOeCAJBu|+a_Q2gzua-h9WpH~P4OM$N+nCAf%Ss( zKE#c5)TV`V5NwAj_v|CFu5dnL8^)7-$37C<3DQ!>1r}U9&hq-B^FK@rb@yq^QJ%Qx zo_j76PplhNz&dTv(7ShUiM4C09N1=1=Gpf#=peqtpE6H6IX0ln`oKEE`e2j^UCJQ) zCPv%Cu{X!hke5F+|H-;V`G+j6#d@*Btf#Pu%Y_RUs&XI=)T@|Vmbc%2yL!(uAPokP z24mdIF(v!hlzZYymoh}!4Ef@F_NmzChA#OV=AH&&o#Fi)L;M;1Dfa}*66c#q6LI65 zGuGaz`Wqe|E<-~@C03`Xa-cus7&Oo|3+9C6G*xne(%WoOK90Dqd> zngXupQu-5guYd`lD|nd4`dnOGtY{#AIB$shp=d} zp#I@|ju9w7>}zjBJzyPRonX4u!NC18(#dPru3ad~{Ta+VIe|27KD6sx&!zegh77X* zWAK&uoH%hp;e)c3=v#1Av8oR!ZwYx*GQc*9d}eq8pGgDtD0xc#|LUu+%2!@_MYVzd z{O3PaJkAk-PfqBzsc0h?0{0zwcOQ_Z-Q#-gH|>cp`#;qGsE;f8%=$)`xUk%*7a*sq zF9AD=(ygfLs(qubp?p*3h(GHA>m|#Ca>6p;d=~wUi}y^IWyJDedk%Wn18?e0+6k}s zQurHkO4n6}NrM5zjdW3lSQqZQ?>-5gCD~59Uc}7+#-W~}F5*S`;h2a(+)39{Pd&vs z#;5Ta&mYB|G%Ea!{y+GgM;Z*hXwYKNK$rZcZbI9w#tD=uwzaPOC9ZrX{wy1#4?(*Q z<({%cp0f@?XFLi#{tWI__`AwKb%bkq)Kw;=i*xK#rc9Aom#gI4;JK^IaFiv=I^VMm zAnt~J1Y@K%e*t%eKl3ogzi4|_QZCsBkq*{D@`?I`x{mFUYnhO@n>TNkk3asnl4tVU z0Mbp_VclWBiO-Z7rpLAbI`esa>h)*&PTYC5CqAz2BJnqXG#GHLGGQ866Rpyrj4=*r zBfgZaZP1;h#lRi?)93!AylYrj|El>P)GsyuMf;scTF8I81|4(_I_Q$8)Gw?H1eOio zu^gy7Sx;GJ)J3!<&;~_X$am!NXY@=p#x?I0{)YUcuh4^hB3!FX*uQ80hjh^{N1$xc z-L-3%(!1mV>nr7*__H2B9-l?_e^q8&DQHiA8uOpf6IJs+q=9WG`!CdglmoU$Y&*$g zjycJDmIM0&Mmg}AI-k0hdJJO0|Iaj!haH&2iHDuoyK3jWG)()ZS&?;iiYxX z$h>(c@qJFY=HgC0|NC5bJv}$w#n&&$57%FGjr5H4UGUCj@^~)M-IIUC&C&6cn(L;! z?z-DhcTKw6S$9KpH%E7+=DN}C1$@kaP{5yDr^WR>?B5b7&sXg$qNJAqFbgiv16(gY z)1p#)J`l;(c`>hc7z=u%&5yQNEMw8%kHDON7~XpW*ynBosFG8r{Y8b8b7AWl@rh<* z<6JOp3Ikxt})^Jg$m zmWn=b1j6?RbVYyT%naB#XeZ-bXej1yY4fJN`0!aN_hVKL-y`w*5#g_Pb%G9Ghde8& zKdB=*ULpv`7cASo`uf+2zv7^CQO?t?Txfs)4swvaoUGIJ}2b?(0(1Z-prM-`~ve(O9ee1lZ z5zAE`RQ?xXZS%IED9m0g1K;k>5(0DIT>n%y)GIMD9PH4vQJ zr_I-pN7{R7XAFY-aZQ=*%r*t%*F^MlClU_>7nVQ!z{G{^&(_Cgt2u4joH-|NE;-KUac2~pp z&GcOJrw$;nZ$og!gSfE#x8D~cY43U!IB-srbK|tJP#$ULqJ4$7MrGsD^Iw!Sj9IBV zVaVeU@Bvp<`Lk~ZJ47PByPQZ}!0|r&=cJpskOy=RtxHjML)xooyQR&Lw&$mGdm-xt z?IW}m=E6p0@PL`Q>S)?-aaGAb>lVK4O^5zZWnHA+;kbZfJMx6Ohx|C_{7k<8K&-M8 zaDJNdNE<%qrj0s5I}>evw3FrNI54x=6Pg`=3i3X7fp4mGP-pfpDTAzs?DNwl9t84( zKs?ANmczBKk9Pc1*-UAx%L5Lym)5$j(0|O|;cz7W{qKLDNZSzm#WnQ-|f5uRp1lN7M=0uPKjDES|*d>ump_TiM_4%=}@C_%bAfy}EhIHjO?n20Qt`R3G9=xk7l@N zt1f|`_aLpOV6XTAd2=Z6dzX?w))Phzof^ zy-l4-JhyGzc0V$1{(G6jU)6uuZxity-9+ZiG+EBX0s90<>?$#NvrT)P>`c$TO0(0_g^Qm)aTfm+L%ErukLHu};55^oX z`^&Vcu-^_EoX=O`5W>~z7mKBf%&mL!X61qH*l>gX{XGwyvS?PO`VLnm*@Uq z-YS3Q16`x^e_ePOePDcZAo0DUnk#0%kaJFy$G?!b%AfQa?Js4C^fFBXbsp;+>o)16 zjB))A*IwYe0Hq)PB;CxLS7*{+oA0jtXBiNvd#U?a&e#)z^7jMg!wQfF=TH9Z#cxeo z{7GQ_`aS)2RlF)Y=YkgiH-NYfU^pXAXG9@{;GF?ExF^WmI`&}6z2duF zv$)!YwOUNO&I|Gw{?t=Xy$&1AdFZGgu`c9q&pr2C4ZQ;!z!{{G@xlu)sP_otv2NYE zD9mTS^YY6ttMWA3FqSiP`X{cL(Vm9&&zxud6=|G={GDSx1^U1JEw zR!?9Yh--_H2E$UO*@rXwGe(`FE=$0;iDM?tAsj-t#uw{4aT)z%LHhyoVqK$NB2Zo# zmscW&CmaKB(Yj>no&NI++|ATYtVhI^eFw_+fw!NPt24B|;dFic#_=P^uN*6Itm2Ka z(qq7jwpNbGi96+=`Tk>ZlTC2l3i^Z=~vPas0ut2gg3_>vFvD(vt;} zX>c4q0(h|eu=k9zb5nvs3QR7MW-Pv#FxPfD*MV8?zkGK<3Zos$}-+9ZN zoE)`AgmyKSCFRB7zY2_>1KT&Me*Yqjee{dO7Lk8w7nI(jO^)ja7}t=wO^bu&_a{G6 z{NxxaKSkw#&X6C<9(6tS4$BhXt}C8WSF~o(3;3)0#5SArNsP;JHrsC4Vrs-?I*S{=6xISD+;tFeuLedKh9{U9)(sPxW;4!( zg|g(1Yi_Qbu`Jg}n?pUi5fwfcHUJRwX2J)~Lfver-p zC1z*i{PDB}=J~VJ(zAzjwPmFTWKW-InVp#3XLic;tc>i8WNRNBZ5EK2J-hGR{$0&D zbT1{@l5L%cgA8zD$B?f5`+9c`Zr0dr4zgz90CS#6ah<98Xz>s;8)quwsEY+E9W$<= zEDO#%#OV!5u~{i|anivIOLmo~t9PV2juD8A!uf4!<~05e>6(}wl|DCPmL;pJ*_IME zeL9YE8PYX5F)iEDH8`kWRoXQpSEcU~;)>jZ`qjWSs9zP)h#u6>;Ae1bSZr8CMAYcv zx8JVc{P+3a#}D>CsBG$cH}Sr~yR&x>Z!hmq?^y5Y-p_dN@c!7l(z`+boBRLVf82ms z16~}kb-UgLhYTH3I^@g{x8Qq% z?ZJ-*uM2)R1Qi;mGzl@=`$Y^j<&U(*frP zwDFnm^Mp^a&mN!4KFxjoe4~6P`cCzo?VID9=X==qTi;y+4-EWb;7x(PF6Acv$4Ga99VxgkGU(Px2Iqz4Th-fs?X}XE&4E1Ut*wom> zb!CL8=){+z@ecsEh}$89b&>kkh3fxZWbalb2>bl z;O~bd z^K_2hsJ8)c2SGbO>%00d{jct3`WeqG1ih>`--a}j9u^illp2nfp+>Y07A2PQPsVJqB&piESPuHz{r zhpZxdNgMf%gwe@#I?bnL^boyBdswkninY|rvFfek;OU+=8L6bou~!ZM#yXOrqudXJrf*Kt*`3adX%w@Hq$@> zu#w!(nX+L6@EA<7hc!;d^-$?=ptdQ1Z86zEwv$RyOAe7na*Nz0?PLHQLC4d@G>5LC zhrtu4V6V?v1L5_zS=Jj+!EM$KYoE0O)Stm}*cKLGpRf~9rIBtsA0%dpd{H5)#c|Mh zcX>!As#3LE_1ELI3w}PT@8}5A8&n!=2AN@IipepXO_?b-6((S+OpU2C^`_l~9-KnC z5{4pBB#J_BgJa9lUQ~r@P#ro8>K%myp)L(Cuf;Zq#@%K1L z$3SLLhdIJAaP?$ouCv^E*Qs)@fzNue(QGZ-#~RsX_Ka(}#?5xuxP@-Bd(rLZF)!K6 z^7eSuUYmD`U*WfSUxCFEu|kB)1eqbTWFGm*}7T(pd-y#^G^s< z5N4&oS2*f}l2Hci>NQB1b9fAyL#jy*EwZcZhjur}Rn8``Tz56BzRukStHVX-p8f~z z0^rn#vv4lXhrGG~>D0#WfDbx&goqL`Vi2TiqOgP%$s$e66d91QSz;yVun=+q?sNU= z5RoVo4f<+u^4;7+fP*w|SgBnKS1W;Bw&cJ!N442~uSn)Av%7r~= z5`SmXT?aQ0qe-A9AI!lB4~v0pjYhF3!A`W1ZP~6Z?Ib(dPPNnQJTH&ugMSNoF)!g; zc^NMUCkJ>Huiq{{c%Cm0JJ+ literal 0 HcmV?d00001 diff --git a/pipenv/vendor/distlib/w64.exe b/pipenv/vendor/distlib/w64.exe new file mode 100644 index 0000000000000000000000000000000000000000..c41bd0a011fd760ce20ba795d9e535e0d2c39876 GIT binary patch literal 99328 zcmeFadwf*I`9FR(yGxc_IE%0lE|C=$MI#uSs)Det5JUwPb(RAdM3ZnmzxOj|HwkF_`h5TY z@zR_*bLR5QGtWG?d1kiku4R&4k|YQIH%&=uz?1$3#NYq?rvsk{j9NWFdZYi=iyCZ^ ztry)s`$zM=^QsN!p2=|Fv=#eGgB!NC#~6 zpmc^LIq47nrJo`b$aAl-;Y*+1L3Vaec0dAyQ@iO7N$~`dm5fls%5d&9Z4AgF)e*sCF)aUj8Pxiq;-A1`? z9o{4CgK+FNcUf$5URi9a_qIFLn!_sSL1oU5N7*E`XuTS%^%Wu~!ZxiYEQjNh^VE36 zR~U>>GK)+#7W8@fm1U?B&)t0*+{9COfaiMqz<`pHuXjOohl%5>!p zYBq67)kPjMRB_b6aN__2U6st28Sv?&pYGix34aFj&+9ID#VSAJY2hb5_ zTlv1B;;FJW&PChpG|*9i;{f$Bm_CeIjJ7MxaKRbXek%DS@c0%OfrD-4bpx$l(IuAD zsXD;c(c3EnOw?qc=RM(VwC>j1FU)h_RtlOuzhW6MyauMu^^3_O8-E zoPoc(NOVv23eExoe$4<$Dp=e><1ScyxaLb5OK-29RIlkV?xA6RJw)IVy>*`K+uJzw zc2j&tfm!DNuxhxx(s>-8E0q$vmQ_};ADQ#NGVEpSQ-S08`4~@phA$9i>%;8s;xLyN(Ux;2ok|V=)NjE`l92KARR(Ij_J-RYtE2udJPC zfK==DMQ;`FhR-IxG|le-mH=3^#c+yFMLWC|e3xi?TGxJc zM58)p18BSOzI$n?=dGiF%HCJm3DaXk`>H-h!Wt|j$+DJ)AOLBNu+1vlgB3AOpXKvn zMTSt8wWHS@(=!Zdy}O?r{D>A)xwV$2p}zpFCH?TYx{c8bSZ-C=Ce>}!Ttz!g&mZ?e z6QSl&YFsm|Ypl0LzP#ybe6Ft=tcf^0_{t3N(w2``%kn=(BQpb#c&V9g@mG%6O&6s*L^z>LK``@4a+PfnUrdB@9D)d*Y&;%3$`~4F# zR}=aYq1VAS(fMCdg{Ztd0$i$uk$E8^&Y&-#V#fvX!g{0`5p|+lNHArG?H{V_Y;rrb z75D5#L8XE86XJ4vEXIxeB=YMTBdmpZ_nL0P_!D{ZN}0MGNS!T0XM&v2qupYMXHW+< zc$|vsiHub794cOB!Nyg#zcN^Ii8f4LnN9guS@~J2-kgCCW1?TAK8zF}G*@LXHikZ` zY&)NED$RK}(f9;>D(fcZI}CkR>er2qURtU9M_cf`he80KKswol&*_%*mJ9-~x8P|i z_^c?n0|fyA4O8IsS;z^Xdl*$VWI!yhY~|HfW)8t6ue4VJ1)tHfpQgs{0Up6;RW=tC z$Py$>iM!qh*@_ih4#_i(Ykl*OV|pTcz(Yr4ZYoQP zHD~l*XklauCVX(MAPhj!KN25r{{}pFbR<4I!?=L$CUAGS#TW*J z^$i({yfP#azy~Aot9D&$lins7RJ1Y7b_dx8({m}htSl*AjW4V%nvFvLKvLBYWvKC| zd^5okD?>5g9WBnAFSAM0_->%fLbeMy#Ehh5;Zm>#HLhE-`ZTd5fBXEQ)g`%_TwRi- zWvOP9>Tj(&YS_Wjr~wGzxaWgTF%LX~+QIru1WzkC6=BV6_p0O>NeZ&<5HgT7(85g5 z)6mdSRcIMIQB$oDlpv!rW^2}>X=)sd4@5P~H%%kjWxIKcEKmV*!~BB|qr6hTevCDO zwbVPPeH*xxF~CCIb5?oGG?8;AA?|b3xX7xpCRO9bJ~d{2M-AczAc^gf*eqXw_dA`& z*&75k;fxar-+@+$iUI;3oxsO>Y9C)F;!9CuSl?(utqZd=@o5>AU;PcMSnb*oi7S*V z-p}A&H8)1=O;%1=l<1p-0^#@!S0-kJA9AAixaXxb5Z#LW?0WRgw}Jylfau}~xKd#x zo=b3I7NFQ*5X7o1Vb5;|yP$O$0Am9?6LO9Zf?`51-}+ZsDUTwp0CoJiKN)P3q65{Y zCU<35lW9gXjyJ1K8{Nr|hk!GtWMLncSf~PZ-Vi%rX}Y1JBMRG~La%?e5mPdh{{gEc zA)&6suRyY>Fb{B@Jc_F^1S58rEERWAYnl(cYYN$pAlulCTKPM>|vjbSofxa{OU#S$< z=6dhl&0CEAmHu_wKvi!7q1uR0zeGmVp|YmtqQ>S_pXe-}Te_Q=%b(sw%f#9+v=PVw zKQTRbr81!+n{BC<)9|vrd?iFP88m;{Ddg8G;ycL+K6gzL_7rm_BXhAElmEXoI)}jU^eP237y2`QwGLBkUY8?|W4yhMz&OHM>;*{;DJA z+bk|ooD@@-M0=@~p}@r;m1P6XT86nJT{GFYW=bEly3$K0-D(4B_iC{Ha<4&}KXkd^ z8!j6B#gy@E0RWDBmhPfQ?%jCsCx9Gubr;^R$5(#nGWwQ(g(y;5h=(!Yy9S__?z;uu zk?E4V6DfWMwJ|X=)a#;+3KqNRS&e)e)4BE;qzUZ6AV}EK9fW-b;|%MBy&q&J#GLS_ ziCXp2eq(KtO|G?}tAz6_A6_td!TPaZAX&Fht^<6Sg4|;CWO{xDj|nUyS{;QT61nyj z>+1#Lt0um73qBlSoq{7o7^nAQRsRxT!D3YXXRGG%@K`idk{Qh~KBtWPR3@9A?~D5F z{Brsj%QV4OavP^nzr(FBcwa8wv+y4AACEdCey**Fu;WA#<5cTe*wZH#9s2efFJMZB z5(&q1yFsFSr7+1ngRLwl9{g2gEeq8h)MN`g^Sn^5>JCOPPB=dvVWW7=%Va=ekg4sF?%oGLzT5h^RP z*t%p1W1={ zVB=w_K`tT(y1^8@gIieKA-yJ{`v#yRLnG$)@uEEOz7ZwJ&^&wwH37=Y=C>jRRW%TE zkz3&0Hn6N(lspvN8C}Bn(!z~RcB^bzBOq2|SZVbQA~h8Y)UyMA6>SKsZbep|^VcwC zqAotKm`XzQJD(g5<)UP=OB}7IePR4gJqzqM01Y|}8Wo-rg{?0ms@_C^g8L5V_mAumAVy5l z@6ninAN>vvA-nGC{sW*E`Q_#CbR=3D% z^pXOhnp?F%C3J9qrN3QkF}^Ra96+jpm+!d-f_|keUgVYSJb`yLrKVD?EUM+CT3evd z$t#-+nu5i!XtW%%Jqq>T6W-1cZB8T2GbM7^BG@iOr8CI3hYeZ;fwMEh_Levwx&jzp z032vPO^qLeP6^PX!&Bo&UD%~{7=NVT{mJsmoI$WP#Hdb)Q8js?Ol~Mcz7Z zSYMYBHd_hijRf;1ZN0p5oxE~mK`RKCnuJltrvs5z`{(+zh-d334lGz?nelZSRXKD5 zxAD81q6lWq7ZC1S{5H~0S3XqK;4&@IG5mq2IdRD zk5};4T;nhN#~5cqxMq1pPf}$q#s&O7l;St_WTrVUmOc2JaF1;v+~pJx)Lc+yG2H0a z;jVV!WN3n{old?oB04hVp}X9J|D(lfY;geoF%@)Qm5t#PZGcUW#ok#)boo_E6BxqZ z8`i4{+>dnf5wL1ra4kmUZ>j1B<+jLpKg>cBQwIabw<}N#p`NWKjvgfKjPy1yf1w1t zv*G|Sa6NyLngnMd<>FDKnUoxf(qz04q2}W6T?;_8jowP)8NwDTiXMO-1#3bIvn^r& z*YgKa&-@x{3L-^H-XjFw6AzM4VA27#>zJx{XH=C>#bR-*H7CeJwBSlL4hNUNX+f6S z(1G&!C#(8_4*oo#Qwt7|jt&W9YL_N5w_whULHrOgZE$oFfeGou4{0eR+=iXB**Y`o z=5InrvnDQi1==G_q)-5aq_6-RUq~VT40sJ%x&U+mUlozS5ah6SBZohXc2Y=D<2QHI zlu%z$C;ufJ45aOrVT!i$n}u4A8DyA2h8bj-!T1Aia+|@E)T5#VV9Agd$zm04d^=hp zPV2SQX8r|+RlNvai0@joSf!Q>NxqH|!B0*8X%J8`9MI#!vB@1_y@E^p?umqq^~EV) zofp2k9=|L!N^;7wV`?lyK8sCd>_izU%wMUo+kNwhUWb=~Ts&oUozYA9yrDe{{>%{4 zn2CFkQ1kW(H8{#tw#HYDxuzK!fO*zi&2ZP(5r6BWm#X?%Q**<(F4{?c$_AQGwv7tD z(x+hef@j0*Gb8p6wV*Zmdr=MFL#!$6=T);qK#Lc{@E>{MO4MFxz zxT_;r@RgB8N&lgcJmo7iPZa$KnLd9FL0qT0gAEP2yCg^iXE{LiZd02BFJ=KxK8gWR z1@0$Dp{apK11ys2mniiDAz<`jvjx_gzzD73*))|Xb0oQ6uuC93w+G=*P2q&uttEvqW+@-k`vw{1Efp@ImB7Vl%zM=~NhI#{?fb z8DSq2vCJf5xtpkhW+ysY)lZsQR)npC^L{hr6Q3aZ2JJ}vt-)BiSJ2{GoC{@>)&z)g zTN!LL1~2v&ep6~qqr`EPfM*0=NI`M|Ql`KXVX8;iEQl^)C<&h_>`#c@G6v7QfV5|Wz(V_e*j5Z}J*9{WJ z<}bw(*{K~Q5p`9Vx#&6Gcn>N=WvFDBmKP$MEa-295Q2Dw^Dd(@gtiXD&KwSY&}+19 zg}r7JoL|rOUG<`(9$FX{-ENSdY#6lz$_&S{!g7$*wsF8?dcMEqcM^_9VSMKCA7UJ- zA$M8-8QJ@uVF2-Ghn6Y=ry1s$nMSy)$V&NOVK|)9gN+bXAD<5 zn{C=){B>yq6nXWhSyh>d$v#3AReCHyl@fwm#=tGX4g>PD8{#1_NHTDt!9vo)5k?4s zSnT5u>P3jc+6mwj=V0YGkS8fJ9)~1BKOUNMmVU~nIrUQ+(GU3t@L4Rwz8>iU+xbyK zR6C_+VDE&4J|XJ-zG2X&_gi5{V-F0&$p3{0DjYi|*XkfT;*dpZN&Z8~)S|rLMr6gD zui}k=p%S$`)}Id%i70kZ^KYZN0BouX*>(eY-ani|jdrKp6h(Y1z55f74nt0*KJl^A zsBCF4d=$QbTlFJ9hOwO-3i05=j5Iq1_Ij1np0$5IuNQg&Z5gl11n_(=*4472eHeaS zr{z%_#HY!*U z*une|I!_Vo0wf_8F9sX|t)G5>o@8|J3H?$l`YP}{YeoMHSq6x=XGN#h2L&^{Or!|$ zR%m*<-+}+&t+Kl)qx94^@`;z^AIB5U!+pk;YK3$3c0g`V)D%;=Q1sBMY)n>ViJBMP zU$jqes6|r)9_?T9d^ZyEv(2#=eSb%aYcP~CKcn^1trx&u5_R$mk+TZ_OZ*L(29`m# z$uLds0e-Ebe@GQQ4l5Hu4k#MyDf$u>-3@JY8FvHISSneoPz-yMM=@s4IE({JDRv!} z(i=C6bO^2Szu%N9i}ft=6)4MpJ2jPsr7ZDRkR{|jzvwVI=Cn*q;?q>_1SYmK=$mVk zS3)sXR)IxIK{>Pu+q|SXZRg)l60*sogk??DMp3oz;g2z#b?a*cCg8}xmx0LK!Y{$! zWyEG*^fjO{wPyXxco6Fn`UC%x0EakE-in1n^97Q?bk$SYcIwJ+Ykcg)f4#v84=6NzxP*o379RsevH$CwwZTRDe?BAb8pbTJ1m&!3ODS>Dw?pofA>dnv!1uA+(SE5b}Y6W=yi_ znT{8|bO;JTld#G?gmR{5?ixv2OcUTvpIk#y*ZRDCU-~t9rBaQQIt(SO=MO8ITtFyg~lcwzQl*q$)kNI!_-+Y?&N zVyR;O!v~_{RK0Yy9}R04V#NPIiVqeGL>nM{f-2jJeSKuJsSZWN1&Fq`^QS?dVa#8E z-R6i?z$m2|ri^i6`<@6f&aIT?H9bM#iT?t{VMgf9ZW_r-z>qA#LV_tz>$i+4-l8Me zKPf9nEca^uqMao}AH53ZuZt-dTVquwv*Gb*jtP~!$?YFHKhO9K>bJd-MG|gkwf)%K zM2=w(vLxY6Wgq#knLk}Mv3v8WL$&W`lVKfpRFzm*n}21f?uh0L`U|uxDdC@US{tYP zCG1gy78k~Eu>HfP0CqJ9%$C^`irJP1z3NSYgH+VY-9YS|0K!)KhOAiYEAA{k2}(%$ zQJvGSwMk@-5a-Dh+L__>H_MS`wW4*k3;8kUcogB!ml2^lZP>;nspN}KbAj2%^4;|D zaqHv3ORbNY4R8fP0*bha?I)<>U>g*9F#0C=e>Q6{6`d^on83zf#djSkz7aOq7ABi28-LGg&NBE@`z12KAj~lIU$h=JH%8wE7l0OYH?;Y~ z5&E3%*D0 zh=#wsCjMQf7VJ>F4yjEi?Qj^VP08!yk%4YL10*D`o|6Ypjp#Z)Rfx2^RdZ}|6`bG} zuHo#^PYhl>2xP)9JHN(_JvCNR4e-3U=UIpnY{*oB+?>M%IIRmVl~?}+{S<8K15;P> zJb;p$F>%0kApn?%=BV3Td39BujJ}iqOCRIt&>VYPsxQl@IG1AA;0f!?4+_~;OW|FH z(=E)Bq4*Qlp5puPoWQ9NV!K1^BM8cCqv?PIzt#ySnYAAE?)$Yuy}L>qqjxXRhVt={ z#3|vu+9~Y7$q=FM*synR4M_fnW9!M?~rP_@}$j*p9s(*1-|NOMRJwsl7G}D-ehN3@2BTBYu zROrduwVU9U$_bs~9G`h%(Z2Xp8~+ojibW=Ez4=RTyb4~A_+f&-q6^GvI5z)OePV~C zRjRN?I|F%obb41HPpZb;M&eV>=-uuG?)c4az7VJ*_Z0~Xm3cnK?x4yBe+8|G;uaz- ze>-wuydS4F$d%cKPjm3eaANn#SY#%Wk71Cj%2A*H3w8ixHbO7~!*kSBEC-I=jT*w7 znhICHER%HqO#HYc}(C`c) z;sqZWM6`7n54jN}ifN`0HHbb$gVnu3Bl#clppeD~F-JRy{PW-A9eV3Eso*t8%mB4I zlhj8jdr%80x)(3d_sZb(06_4ULBabYY8oR|R_7vkV|7-`>9^%#X2l!0<2(?Irp7|m zh|BvIDP^o1acf9X2de35L=v;(hUYy;KV?FT-pqG}@R`MF@M_T(KK>(2R{TXDVF%Hr z&`st;Tz`tFC-RR&ZdvSe^-ySVN?f(^7qN~^&vXBTR!4b6A&_6--&4^U$%-`p?)6c? zuEmCZ?)%s`QP|!vnL=Ng9s^AT+2*uwe=DKuq6n$E5%|0jJ54JIQ#_YEjO1n!^Q>a= z0f|_g{9XVebo63EvKMlp$Fy94Pbg2mc7(wE_cz4g;g<`>11^7-R$w-U?QMGZ_^Pks z$`QSL;DUY)FyN%-nx-HsHjrtlpMdk@hQ?;~d4&a=92PG47;qKRIQl~h{-x7mfQ@cK zfFKZt8!{i;YSfcu2@#IVG$qucS3Au(Y`P{tuJxeq8Fea9Ce-21=p>+nJB88iyhB zON3*Cf=8n=uYw@5Trj{(xv+qFB}y%WxU;H8$EVnHCICa7p(F?w?1vqEb#L8HBR3qI zaYi!w@frAp+PT%}-1Kbv8wgwfu1V>MGq>ED70^>lUq4Qm*arc%A3wz|elHDa`79z|}0YF7)BML79j*Xrn2FRB@O)42i!Vp|8UC4c-HtVh5<-^h}{j zF=rc5llV$}CU+yf#&S1VkImL&3m!twvfNdaV~&0De~B^C{G?b?pKZ57wpWPVnMIv! z2LdJK+6{mQ3GIjUr;PrT>xb;xZTcaHHbFnsPa9%x(yro1AXHNN<4=(^0$2_T^~Z9r z#UuF?nnBieR@OCm7>n?JvGqQR-skdNP&rc@-7fe$lT{=|*s_-iC2Uiq!ueLKB$fQY zp$&rAbPF@&01UNt6n@Fid7ba1Vh9e8#P5g6e7Vn{^|>6v0|5OV53wZ=aWT{MRQ;7i z77y!ZY;e3cCZWwsWpziQSxUUj&QF6Hx4>2Cf-G`lGMDM6T-reWRJH~Y;?Oc9@OF>XOWLBn4RR>nLQr%i%YW!ZC*rkYxh0N1?R5<3gT)e>9gp zM}4vFius=5WSjBu@7D$p$o&DbT$cwDio^uG{{3=Yf4l}`0?OnWjF8B4HnE?1psjP1 zH4s-kQ|J`uKGQbTULbc73YeY?UkBXaZEmbIav*4#g0l(FuEn^)oXxRIPz;Qg=%3~O z^D7cF*cLQBzd*j8`!X^}Y>i9GASo(GJjGgTRzzIf`61|2jn%x20qgg>rG!GE08*j` zgd*HHEj1c4Cb!gd(F2_7bu@|!}Y%?sgY8=95dG8VzYp(}x)3fF^OigafG5m#c@*KIstcZ3M z+QjNstN^|dma&zo6|vmkpeQs`(5Wc98OR)hZOabKl<9et@^r+;$Lt8AeD!_!M)onp zcikb%3awBWulJ>V3j8OimFf^Y$<4#4(i3t-dos0`(xM~gkU}&jHC#*_$;pGF@Kn`$ zyV`myx8BBCZ?mnp;nthj6L#_(>usU+?d)Fal2rq3R>l!4<7LW-JG0m0;crEE?;-yk znj>iC&z=;s#AudCWFyXEqc*a`>_W*B8#)&4^Y`9^ zG&zU}a!k)m@acbu>Dm8h^3m-562ysZa#c*PD^Wts@zbb)tP9C^ zyP4;ZiRJkf@=|kbEuu`m8AO?OreIxIFbI!O{Kh;U6K%JmdDD|mm1E8c<480(N6`a* zU?+|O8rXFiL$MOpdOTlViz9MEuVKgd{#^q5xnIEhV|o;Dw+KaJ8mMRLiMjxB*+*;Xmq$D7envgqXO% zVC?so_q#D3k@Dc^&@N=R0)+OrQBFNEC1lAg;A`Tey4v&uwv?#Au0Po<&N^tWWV5uv zUKt=;!)qeHV3F zOHr;p3g#ET$bOpJVmL*OU{t^;LSu+?8 z9{{`_<{VUQh3(=$xd5;uDnqWrK>>Ul{{j^hhd(Q0S3R3Sg!`W9`RS{$zY#+A1^T%R zE!>J0DB@%*FGK@CkEhq76TUz`7dw#%L)gmaT!zjFE8P{^#R}>8CyC9f+O^FZn zc`5uux#mbjaDm` z09?X4yM{xm%NIi71DmLJ5+u%-4&Wi2V)KA^oygWOqgaD5-Bc#Bj^ax6~76DR|9 z&8xRz{h3Yv=BjU&263n?=$q%IfY3XPOXU@J+Jf?m#-M-Qf+0#lo5C8wP91dc)b>v= zzi}YCKuGn{%-Y82yX=M>cyEfuRL_G)qLAI-fLn-s7;3z39w<}nu#s;g2&d1ViBZk+ z1tb(>cjO?i7;d?F5LAU!U>s3C0tTClHX_T*pCp<+zj)m|4CCAyir8&RRVqUz9E6=0 zLH{VJ7GMSbmLO4KxJL?Fluo-%>tEg9y)>BhsWAYr8JfGgzgJlrbbVT1L&!2RTf`Gw zswoT~IX0T2t;Y34n>XbH4=*Y4fLHGxw!npE;*-2a4gz$BEf|1SK)mO*f$cmX`rRt* zQOCvnVChD2WR?7OG=W&Ud04hyoC=5k?o&j5VsMr^%fG>SFz}OfbpAF2#0+b)p%KI@ z(cahdq+uF!E!$ue77J{Hmn^N5aO%(h%^?(=L}Wgq!^}o-N8DNdIdG9yKb<_Ef)NlG zje=_I=b|oi&Ju5!PuLiOD6Jme#%?=lWUlyBFJA-xlTuC6>ZPUI1!xP5{GmH}5RL0& zrxQ~3Q)i=N6V4lu#v2r~w?54(~6O(dn7mDN1sOd@k&RT!KJBX0%Vr0IM%G@9_ZyHGsYvL?C2^%8zgd=uu z4xOL$%0GKblnDARE}r4LSQ}qlZqvreD~jb6s?C35fu~?Q+Po$o099u>!uDO^f-TW+ z09ZzGUi4ixe#3>U&Z2>8mn&>vFTSyx95u;T?Vt;6Z#m#`OX6%Owx1jb^BLaY01Gh& zHC#ZCoCb#r4`;(byv1(F3C1d2hCOz#3rCXO;B>3{p=GeS<4u8uU+FRDp-m-@F77Z;@up8*c;&xAsFS_jO)if-Gt;&@#>@_!KgbQ{> zJBT!@V3&b`V(k=@0RCGGEdZ=y6en^MQa5$S0c{D;nd9^0zi{SO-)i!a&{<$ya5soe zWxu0U2NAUvZ$WmzKiOfaa;v-`L(a#edDf0=A&80p5HzB)=d5+wsx*iqZQU;O80$2K7;-^{K-UC*uMpKYyM_{5ECGDnD)+}(fY}d>#(yg zbvp0R>g-!#l&v#9!F>++UUQOYe(@j_SsENRa#DS*jluv>;A7fOt1*dJ zEFPj(Y+qVCjASO2@FG;hOc=QD!MWx$8j_2Kl>2w2ts&hA_NvD6UlJeffZ2HnPQC&t zWYLR(hdV!}f*;@j39Qx&QI>!BMcN-|IE#S1FX{X$WHI=KMjIIChe!(_kX{-e28f_K zk>ShxgYFZ~1;|3CWvPy9?l5sgss=$iLPfF*Zd&6#_cia_*TTN9aiv^oAaaD>x(mZm zOQv;b*(!6>;UF}JjfI8X;#>oQD0;FQy>u=H0mwDwXspAmo{0xS4B;>NFSJERHadbf zh>sq~9C!yhdO%CRr(sLKr-8HH`$Bhh8aixD3h1Z^c7F*x@Kp=ERGtMN4&Ykw1#R^N zAX#E5y(=B_i4yPu;0uBW8@0#asbD0S{7u@!@PVb#dcR&QMQ6ZCVsZ2l(?u*iN#Yy32K!0?~{KfL9OWNkx!w{CZVZf0<0!U%$4sIWtfVr zcdBW&3bykNR^Bu!hSwF=D>cJkv(hvM zz<1cKX}{?EL`VwNpBP~SVLfYCs4*7=Ihs z$+hH&fFz+q25=`JkU!;fx)ia1U}I@rCIiF3Dc`g%Gl2IO-=A*;28;sBH+j<+q|n3} zPjGIkX|D5dCn=gWD*j5+E8P2ziCWWn9jt4u+=dRfIo{j+sE$4MC@eHpAyYV1Zks zqUONgZ+A))HXGxbcHaFtVy{Zv50x4qot8L&h#ES26fbKJ;s_I+5juf$aBX~<6Jv%? z*u#&0l4f`OaU>~N`%#1yYfcJ^MvPvW8eyTpDqwm^#Q!mlY{LAOr!=9-wnT_eBHYEI z2k?JeEYuE%Jz2b+S`khpTx6EDF9ahAwbkbt*lwTR26z7gjE{_A2ZC$|l-$!kseZ=A zru^IH_~f&+Gd~R)Ftp$K(0(6>0V|AGu~(Irp?%L!K#4<{WEckw#l1l|DkIk(21Il~ z!~>#5fEz{X>_rxKN5GUYW)#^d5(xWgL?R6Rp^Wp7NeREf2Cia*&A?zr27R+5R!rYG zYpWX93y3MIC#9zQY;-RFCu-@%qvbmC6Y^K_7Rxi9;J-lv4K;wxpoJjwrWylBBU=b9 z&!oEwL}Pw5))E|L0x4&s!|60vG>_k9wHqzs(~t_ryvC&n$Q{>`DN5z@YmsZ6H!`yM zc=0lXAXtsfQA}V;TFp&ww?kx=gJKG4D;9^@EC2*3Vz-^YM9b9LW4G34h{lg-tc^HJ z(5-YzjtJYK4N_RZRaP<0(bXQY??4=1WXz6mMe~hpFsK`+hEYdl z=*(D$E!TwQ7bTyHFtQ566>#jevyam+_`c5{LM%R;nx|HDbT&O19m;oqL^RePUFcKuyrjf` zACuJC6wq?{YVl2=@slF6vk8H>vBzDVO>Vh1h6+Ru`p0MC^3)!Gr>D#Lnmyq&uooNcGVquVi<1ELG?rBDAjG;sc-ZGg5>p}FghkVMn8KNVZUq} z;)|g}Jq61QN1!Mmlp;Y@d(Ju|6YF;U#~7#L@@hkicCu|S@iX}JX+)mgrd<0atra#E z4g+6dz0DZ%Hoy{wTp^0G4g3+&wf~G5jH_ROk4l zu>O?j`sh6A6t00mWnTYi|3s)A zi6utnM`Zc7n zaPCA~y#f#I^$NV+_9(O(Y<%v0XZuRL*;d=p@AG?+B8hhyB;KVccm);9=Tnf~Bl4~l zFGA!cu&J36dDCc(m6Me=6CAJio}X{o5hqAB@bIruH5K?#gJcp%J|vvMTdvEs#B7!a zcz>sBw<@`wIOM-u}nU5Iz$Q1HY-ryVBKx!W56+kYzhJ;?{u?P-kk|! ze2R;V^;f7ev5*&#=$w-GWb{6&@RdtsV`~sEpNW^VP_psTju$-D_*I{DBURC^Hl1?v zb;u2}c1Y$?WpnnGN9-d%GBV*s8So*Dxuug`z9%O>8Fk95AIZzDSt1VmMMt1Fm*xRf zIM*$~$01I9%fY4?!6}(3(%e@KLNN8-7O|Hjr^lj+ny$MMmpPI&Ld2E&}r@}QU4c0$A6Yj6U;F;O1xvOotO$*LfvkNhs})0nh%XGJUvPk3X~;s= z&R`*UYb|X|FO3xTSFVMz@-$v(I6uQvsUMK8qu^k+Vt+pzzrz-a&+H1;2Mha$YL93? zOcy`{C1^mq1>WDX`mA&|>Zs;AROzNFB}z3Si6*2!qRzEcCm(f&_13W}oIq8ob8UKB zBJr{h;I2oQ*p86e892aeT>=^sEBucx426!N#pKmQ4ma1VXHeJHzhU6CHfMavjY{iX6^M{7xAsZcf?nig? z8BZXjDoLvqBNHIe$iVQyT5_(mf@sl5YK)m;jL|I9^II4J&{--=7Js1Kk8$NfNA8+=PQ`g8l0?cj~4WqP!&O#-K}x zug0@Pn8QJs8I>jOXn%ZmB>#$Ti8jIM&!^3Hi#CCA#GG!(ND=2l;1U??0MjF3tmFiK z4h2A7TmSd4NCXmRW%>Rw%LR1aE@eNGxsk4e4`~aS)ADEtRbP~AE z=m>s%M_Lkb*G@`;Q%gw&zAU}_;i!Y{H{Jw&WJ+=9(a3tQw6Wp{EG_KEqLR?%PSFM-{7-hKj7&KL z&ePq(bm*W1@d0r4h+9VPuo}4(jig-yZ=w}IHIYo73sh{j;<6SF)X5d>2PYMnZ1Axn+t+-?Cx-SDx#Qvt~$$b)p4eL*e zKh%m?lC^^=7y#ipZE#mdMNse@VzV*lU<#RYO4&f()>!Vzqj>YT%@5(MVSiP>vCFJX zOfsdcV64iH)D==z%cLvmZA!4dYBIJt-lGcoI$UmiW0%zo1rcw+o(Dd_$)NduDA$s$ zF1$|Y+}fm!RCwQ#ppf87T1T1vvywbHlIPzn$04;ag3_O(vVdL%>=}R^6}8#Sb2s%C zuP116N1*+^m?um*{-*hg8lPqXT>ZlBlFm)6+Pkdzq_d3=*RMp z9|0Y+vvvEPSQOSL`b=#$Jp&T;-X&%S1VHsgj6~TT-MX#D4(G+UzazF0qBH7`01&l1c+a7gN8hfb`zdIH2?@58V z4y?xv`g4q2QCv+p$Y%(C(_F5Y>(!c5!ZY%eiaf+v!~KbPEL6oUbMY6&QE|(qI;Y2_ zDlVEQe~@Ko_Ux}z?6uBNZRR)JO@qe{nVWCllID@LGH=D*zi??WTzr6$jzKp#h>vo> z>hGSw>Vl&m1MUy5xd-(OxSX)T&Om;8lAi#Ej_mU5fB@X;-t7 zg2!;&>xuEa?IdO!9xt3`1Y!q3)$u2yGW=?_jgQItfiv)`8JWUKw@tXF=*%4)T(E<2 z*ByMAWV@xzww`?*aRCQ*YOyoUb;Ug02K#EZ#MH=&^`Kn0>(Yr zhP|6Pz4Rk|ID#iqSgK%(}(Wl^M znIMcC^@#^0Z8ki~2RbL=IODzObyTy6UvYkciMx^I91!<{6e)Dl;R1d&fK~hnZH(Ch z@#-^1;Cy>@2#O$jhYzzpLg3RWOt{-UuZT7Ve}ZOJGz$fH9{yU<%w3o!YE=%NwP+Du z1pIOUZS&3qdgu@VE)H)Ng_BvR*0XN<#)C~G*9_#+`?v~+9MyCfgJX;&orZI^F)eU; z9+C}cWl3_Q_>5Z}!#)mkZW%ToFqd6sW_VCWMn`y6`$$MDPk4-+<7^Z+%IsETcv58yg=%ViHfA_Y64-Z^l!<@v+LC7<#H* zTMeW2WDv_4`^xf4_$q04U}yAe6|Pjol$x#Ni6?r7zJ~Z)LB(_wF#t}UKXNN35*JGV zqfx|bAbT9R<1C*Sro>ipTM6Bv@w3NJssfQi+Cn&ORzuWdKPCrxutEY<_x=dOeGOab zCR#z4$cII8Gpga^wKVwwC?@|UJl_j16yedL)9BL_W~;k=)5n| zyyDMTr5lXhG>(6_SB9}XzlF4tg9InMI4W>KYQCVl{Zx#u91 zdnwp2kN=U%Lt;OHXUhEtEsiAi1OEJPK83`-hmKC-PHAD-j6I4h{R>EJvm9F7*O1u7 zLSkQozK3HZ%P6-qSPY4sjjROb5L-S2_=P>~L+0Sab0`Z3{0Kfa1;81IfRr#_ieySg z)55}8Rg%xv{v!z-NM&s_5-ttFzF{{%`#<=dmWsSCI2!SZfrbJRm3(Bzf+m5 zrdV7wQ=l)*b416ofVb3(W4W5A^dfo;C>>3Y%u*@v-#Dns-$Km67BoiekLI4*z!@BH zTAcrm3JMpT(bCoW${a5?_Hhc}F2pQ>(lOasSMZhz#Xo)Uu%ra%*y2w%0@2vgag(8N zIQ@>B86EhJ+fitjRjx%W7N%cuL4L`#A-rJM$r;U!#KJCwxSkfs400GKhDP9`a9mDazdvk zOmZvmzNTauKBlv4C>y1vni3ek_=O*pVpN8x=$YI5h2SWZh(fS6a@3f>OL7-zGQB#$p^Aw^1&COzbz*J57MUfbw;l&7ocv$dg#gp@;SD~h z*D)k8X;lukSNpvS`d9lUgjL{*-fI8oXn!c$CXT(}XIEBa^B2m65V2y)n~k=KC=NH; zQai%3kN0pI(Q%r;^x0nm%@yJvv9B;D05QP=;=vZso`|OzK| z6Fq#qCgeqJ4Q&iOw5A%h|F2ut_ zV+6CBnidasU_*+oM5F7e1GG2$4?vqf_6@v(+%CbLSf3yJcjCd4o#PE0Gm~AYB ztjnS!G;aejZ$|dY`SP4_Y{Hz zJ9Z?Ef!HKQ|NUArXJ8BjGBiDd$V0ms_lLSBdbKGN`)iYdkZa_%&?B&KMioYYKn5%& z!WDz3iYYu8+4uy6G*h0y0rd7C&~#Y+z+o-=Ne)Jy*!9H)13r}O_it2!or1_K{+yN6`oN*$yx~Ijtdndl*cYuCE^p;;o?weTY#mzJV+y_tO zXos;WZh#ZF+QLE>ry#8epSNMZs};2JR{>J`H%j#%n?FOXh(Kwp9v`4d{TXi-hXrTp+5Cm=HXCe~Zrm4hZRt|Xss9yqvGxlk^_}ni zVgLC5F2Fm?$Q-o`Jli=JpK-q?Sz)H<=b|4dXr@O)5<9NpqJ6lB<;T70+o=dj7pFh^ zkwJD_aRdq@SvB~FoH6ZcLs6=`h<|?s3_TLd?|dB+w(DJ_zDCNTS&KvKV?TZ$69E12 zJNORiRID`IhH)Rk=^jPj0+F$ePkb3fCRBjvIIIsJU74{3u(Hw%Q8XC;Ew|(0->)!3 zk*6G1{D-5hgt3%N0>(glDUB3R0CWGIUc8VCbm?o7ItQ(b3*EQ!5-1`m5H2fm{%N}; zJvy4^C`2fWzhnWB{|n?R>=b}Ft@uwzQTaat@o#ZJE)!gxDqs-nVk=*M3*nK}py(CE zBml)l78Hojdgoz+;&&-1`U@zI1B$N||JM`<`yZhIF3!`xQZX4`y1=X(KNLrS{J61O zT!Dl8rnm4fzYWYY+XPAg%=gX#W)bogM!3B6b zS#GxRqBZc9#P72MMtE4>JA^3t2h<7(e&?S-NY;f@SRXkZ!inPM#%oY}VNdik6R<>m z`l-Q-u0-KQR2Ze8DT3<%IEUVHAG9)$@r>V)QzHLPyo1pmmmo@hVYyJ=SPj)%`0)Y+ zHF%m8{6?@s_ruTPsN~J-drLk71#n^wL|$YnZ&oX~EuX!Qc%3}t1mh3dVvE}Y^^42# zOAQ^N+O3)^wzyka={7Qhu?MuE>p(oCErl+adPN7UWbA_X$R%1c7+ znmv?thHbOXkdl$qsEpZ&!(otz+TGaVSMYx^cVn5xiuG)hf4;JJ3!w^WS`9DyT2UT% z?^NT_ySZ~ubC7iy!vdWxF3p85C3Ja2p$(gWVGpqmRyGsYi8U&3gOf{o(yfiBKV#_6 zF#03wCj!yhkiP?ww9l+F@I!t(aB7UbXJo?fXk@TGtYsD2lw#A@k7)wPV4Kv~i_UKRgD&<&&TEf%AmeXT~694ZcZ6wz39AAs9mJ z<5ks2#mcrQbsgG}rOu1jAkd&~bZGYS>bOz^4>mqwWC3l6ygE4?*G3<8>zf<^1^F~8 zdG+Tx^ZO~;sd7+1P;P{gg*t966X}sCD1HMVW+&J1Mf~*@X&gU^R6?Rz<_nO-&tH(Q z06U(6APXU|xNKTNnDs1#1-Hmo2--ntbQ#C}btG0GQKW}3AF8VB* z95a9BcOokx9(5xS zq!s1wEW^?O4_c4bNnPSUM&p$6FRQ8mVKhKkO)Gkp6aZen!UZv*_A&T5wd`X?Ms^1# zeI3Ysaw%#1+NCh5@feM_46z&ncwB@xha}8xhZM~Z)`!ZCp{wX_$P~q7A>yapa1gt( zRqMvp=Iv#(Wa@{@1l-HJ(BNoYosf~NbB|PhHz%vnQHP6D*7=X0EG;nd#Sj;7~ zq%CRWlu`z;BiGU$i~_|t?uAiEyb7^!;tSou2ui^jKG4YhHH%xneCr)4nv0v#&eE{O zxjgKax0arTQW71GIr#Mv(C^WI_V7J+D!B_#loCAAQKE19anN131yJ;iBf8om7UvjSR>4+iy^PYg<~J#J-+S)WCjXgqpdl71x&$RMbd zw`piBJ6y3Ib#bK8*%~TX&j*bcGy^9GDZlmuA^+he?;AK{ytpw%^A*{_r4N)_G_F)1 zT#}CMO|9MH>Y!_ut5~f-Nas5Jlhjt>PB@ryS{N4hz~UuM+}(H}(-j<894KDg7WE1y zKm5BFPQ>N%rr1w{*pFQIEWlQ3I{+aP){6e$3XG(e7xl0qFr}_h#N6n5N*XSHR@?M= zA4uWjD%cxTvz5o;42o2Y2xB{+Tyj{}AE!(Nh||CH?}Z|nju-dBU5j&Xjx5~rm<9i5lUI!? z2O<9l0W~p6B^m;FkFXr8zlhx2AOGKi8y+HAZt5%#uVFXktH2Ne>5PoI| zar&4bAa1bLje<*W2S(o$(=*?ip5FNu^V4CP3#|FUT+p=iOp`S~O?~DklIoA}+fDcd z!XsHg{q0#a31Loo2Pm(d^4b8Gm?4_JTQHFk3o>hhT*w^wpXO(^Si10+!__p5UyM!D zR5bU${z7Y7gk+6twtlCv{T^Fz3QK# ziAz{<%a-E8$;R59+I`D2f&T{ctoZjt7FJlipj*LkSk)}q;~BL7wrg-c~`@l znAVMYx4>&RMQu9dM!E#)D^lr;kREDfXVrE_SHa<9GZ8cqw4D`y&xpUL#otrn?@96Z zg!tQy`_Xz#E3hRPdd?SK1fR36HBc1>{C|aWfIJ960>?y2{{P3`o4`j|U5)>DvXBrG zCIKR0lVOuc6vCoLgOVW=oM0kh6I3jOEF>C|n9N8}gkVAoj8U|{Rx7q>t=iVMxPw{= ziY$r})GBqUsdaf`P@`3d7M=h1-1|%t0<`vR-(Ual=k1f+Ip=QY-h1x8o!hq&YgUfe zFgRreBkqSavb^=X%fwVxXPosvqzPj4=W;Ccpmxs!5V5#gEMyO9mAGz5Ow%stVj`c0 z!@P4Y4#j?6-^f;!T*%?xxVB#uvLA{)2c)rBgN3xR!wGJ=aUM)e z3%NTi$hye+IBKPAN6rwWH^?*)=Bww6Z`ODgv-09&hl!|7)_&to7n?v^F!40>A{2}@ z&dCzcS;5x?jM06RTVE~}={0G(=WsX7{1Phe;Hx-^;{A#Yc5$3fe4N;t!EAXulR$6! zKHsEZ;%jn-1~;dQ_*+?&@D0Vm{kDU?!Nf;x2h@3awYWGNd8**y~{4O<>Y0Z@ZMYmTg0J5avRwGWQNO9&!x|@%$I=t z5b5Z+EccmQmg^yx#r}wi6uuw2eew!4Gjjf@`admrkp1H_`c34(lv5!FPp_u|KQw|H#im$R-CymqOZJi zDJLDqNuSlynw*oepBFMC1|nS`JX_W+v6GI|7b&-(-qWWBu^S|NTx-f{?=;C?#p@Q# z{FY6wyw(j8i#G{bC#l7c6W!OdP3fxpDr{w3-B;oMlLC?Re@!tP7KqFPu8W&4y1v%W z@Mkwcsi2`2h<7NpNIJKSvGU|C$_($i%TRtVD;!c^Ke~;wGEv^4j1FA3Xkw84ge+b@ znadbw*eF8d+vLDMBf^wtg1gh7ZJ6&^5f(V$2^7@?4l@q- zM|lGdPhe5R;X7+#ra28gNZt&SlLvM0vdhirFZc{{o90vYWrbEBSve#lkB$c-*EFvD zo`|`b+MInDAtF2~IpczcoYOlVm8620Mku1O~q8EAJYcj-xr8bA$oRCbV+Q?KSi+{pK_Bd`j%MIv3f8jkE-J4mHMePdte`;xgVps*YuZ-h1s@$G*?p z%zo=N;4hiT@>ol*oGJX8?J2g59D&M+!-W#e z+MaX_y3TOzRCZis5=R+U#FaKLkxPw>nB-_FFs#AnV@&PqS`a3zA*Wci&4nv3T5YTT zk$hj&k?$Es47^nWHOX+vPx6uWOy_%sm}9~#9KuMNwO9g#AVGkNU}BEAgPvH zBfYG%jX{Q$Y7nnA+8}w>$p)EkB^qRr6>E?ZE8HMy*4K49%u4GsgVb1-Mgp~@&U(w> zYXxsIxTM!7~irCio@i;5C9PgVzfFMXfFa zYX!FqzCrM}41Sy7O$Og2_zr{LFZdRNZx;LkgKrUhli+$p%K8nX#Ny7n-qV{Ncjw(q z!y>J#wT}yU)jsZr#-zlUaJngOoiVw{n9Rpyn=$bilSP<3YD^}Gi4Kjhob`$v>I^SC z35Oozu&43r!sHxda#%N(JUy5mmugJjHzr6kh&#oYykbn!Fd1x2wi}ZSO!^p;-x?Dy zCeg;^PGeGn$w|f}U`#48i8Ut6jY$nAVa8;MHqi|bhF{*L`83NIN=t;HWem?XhSDTq z_@*&T7DJowX6!E9kCQRF90v0b_N;YSyEb-?%x8m#CJ)Hy$VKkPdX>2rsT<6XIe#nD zh>v9&(R*!zb%S^uPJpe?G5mj?`N7d&=lT+hUp($BYhHuaJEpGh*Ylib{1+jy0Fv{( zhquc#-2cIb&gu5_hJHPZSHBx{7O(!g-MQ=qI&qGWQ&gVuZ_9onG(FD=XWHLJ0+EC; z@tN#3&-%EHFHgkR9SAjjQ;B4Yff8JP>1`9HCjdi+CStIuid8PRK*44-)+&DCpSwSkqpFyC3E z$D0m?OTC=R^QN$F{ddJD=WTy~2jJ>`6rQ3~voXUbkI0*1Wtj$KHQG z>KrCAl{ow#cq3yy@4hpldM#u9i$TsT2ca?rNk?nmT=k(jBHMY&0Fqh;NOyXna|LeR zbz%_C15{A=1V&^BEWL>1i_Na9(b0D#w4PTtg4?JoC%M<>GKePR-v?i^dJ`-cnh4q7 zr!IYrpEJl}^WYTg3kF+j4z1cRFaL$h4NJ%qM#o_T;?yZCm~mT3R;K$RX08nnj7*jF zw$%rl!e~4Kk!Nyh(g>QB4pTM_{vh*^N|6ju!VY~c60dUuUop%pL;xXRzJANo114tz z7NF)>{v9$EZAfgt12^eAeaQ$R@QuhR;8Ika4~N+-;ZRrP zpmXjOjq`KE>-Wgzb53yvp25jXoR9@+tNf>m?~}#%0B-@SG)HY4h2OGWUs<1;R_i;> zx`(*h(H?(-XtPhqqJU3;PP#%fA~v%Z|za&5y|pN6)sJ=0nLdorf3J)L1ge0A;fo%OG2-6hEAY|l5lt99)KY{ufPC4~unb|5OdZWp5LSncr!5U0t!=oBY= zh0Z75?^WIBcJ_DXKtiQFv%kIN5Hkhg<(!s-U>w?uryNMJNdAaz>fdALKKxuN$)g=$ z@vc{PV5mEt3qV`p#wrA3NJzMd$Az<=7hYJ&dovV(*XbFu=f|2{%ew^FjZ&vi-)Q8| z&tTRyw^8Q6-rT^m)>9L7=97p4&=X1Qs@3UUe||eUujtlh;rOz~mNepsS&f*vYpI!a zFEXvYjBTsGuraspWcZ7<2hOXO1)AAK&g;YRRM);_<;!*LrAF>SU3!YNBYWLd@<+Rs0*E|cMEu$q)dBNR!Sx`EPFvb9F-jo--`@d>+!3beM z@u=?wIUE9Fhw%Kdbp3xFyvqykxKa-hgpl)oz)t)?yL|8 zTPh_(VUT%5n2MF7PXu|^SpFy}AF_R6sE zog%5>h1ORgLY}V?3M@x>Ql8~BEPV84t!Z`E_s0|(91Bl%)@#|W$Y;QEXLxGMUtzdE z-d%fSi9b_Hi4-DE8k?vIsnx-sVYKp;)w|?m0uoiN8(*irT$i<3>Q@viRe{5;(c)Fy zQT14xj$=<-Gtc?-5pTgE9B&CGwt4VQW6!-%y(Gn5B(C=CB&Erq9@ZZ~UAWj1M0of& zt3RxT9*=}Ho(*jzPzT~(;t&t=pEaGiwR)jjM|>bsp2YrMZNIjweQo;b{%D;SFhpwg ztPdEF!YrCq;g9F+IU^T1*IYuvbRhc8|oq9&}kB9)@p$tWDZfFLhxR67F|$ zyDqWOz9hUx9PKsLdH}C-@KQ5lj&BKu99GZTM z9yK_V1)}xCrBhpT##gweoW;-uMbYZOB~RL1-NEN>u$O>jNwPsk7P8qfYpF1Kqdq6W zX*?N$gcc>)`FNJ{lI=T}-xA+ge#=Bw%#mgO4N^pUwXUl@r;L64;4`9)ZCNHAUfqlF zEwcO%;%X@ZSsy00&dF!``_*3Un8c<9I}6&VMS94|6Set-4)ZUaEn~&Jkp4^{&F#qo zX=G0lNUJIXvB@>g`W^!5;N&cf^ht~85Nh4d$5`s7gR(AnGI}}$H&TRaWFuQj?)7`E zXlRe1yYv`xD1!_3QV2XPRcIPw zw@Ihnu)^CUZd$&l9egdZ^0dqhks0JR4JRdPK4XM+X<||LB0?Zron-KQ9tYG^JWxT~ zl4Lx(d~RYf!Z)7Z6yI=uPv>Yz?U7V&2yu7^3ehbe8ziFTJ%cz}-V}sEY958sS*90mY?F#8g@g+5bQS%!C*$3|F)KED6^esTwGp#&0U63 zX|x;*2E$Al>nB+6nBxBtUR${< zz!a1{*~E!#2OaOSyIiJCGc$p%AHnE&cilNLZ#54(>aKeqE$bt6*PUj+wZHZD^&vi3pMw&z*W7l`5ngfK zeX#Y0F}EJ-bUFo8@3}LnIL>`{SH~8$nM*Xi&k>I94293S{-n6xJaf1th4TU75e;*q6=q1IeN9F}H{)p5`VaGqq7NhSu{ag!6^Qo^6P zMs@G6=(e^W;XdPsL**j5+&Re0^|giA_+1MNsupSNstdy%qTZcF{Y6r^Yb)C1mY%;>4Au5oJWcDs)sSQJU`U??MUrh z!0U*OWEn2|?*5@@z0MQusvn zf-FY<^|{!HlQs3U1naqoQ~Om6C#|s0A%MqzDN*Uy?5)z#w%7X`r%9+#hl`dJIoP+j2%Rw~Sv=synFT=MmLKl2W^0yev zR{s0C#JvCwT?U6R;%=B7=h3}=!=o;0+{8Tyhqj|L+PywvWOi`JJp^6-Rj@DTzFzVUW7o>tutxmT6nRn_rHy3QG49j*bWU&gNZ%wcRqC9UeD^Xq z68F@bbdxRP=wZoiEHoi+N}IQUVW2mCr}O$);!nJuh$H4Ea?b0dM{oHH{E#Z|TqBz% zI#IG(t{t8zSrbGafn1jhlMMFrCd5f@GE9bMGEWj!ypG4|(60XKTS?)wCr)A9cSvDA znIwfemtOTmDI7yWx~DK*QYg>V>U33#3;iy#)KNB)J2PtB(J6|hwCQIqWcVY>K3~W| zz58K#)y}RpT55A;3Uv`6^>{8k=E*jB@Z|Osge>iEM>cXOPfX=bAUz&=Ud2c?luJC4 z-KKgMyyR`TB~G$7izTauxPz?aP}w;ys#^`o`9N&6CuL_aG5c)ci?3HW*G)X>->%xy z8XZg=oF*k&w28}>v8r{*^s4Off!4WH%ek1RP~v6Hu0gs%8UD^*Ec7;xTuXn+ z@K?IcuESqhL;QPVh-5?GQaZ?<1ldJi&a!)sk(X16(ecI*A}?nn1-qNP93ED~qAvq^ zIChftV>V0eEB3zyC1GX+5yQ=2EidPGV`^R5>F`^280oxac+_qDVAgMgkqd5=^6&>O zywrMUH$36`z06nR5PlQAD|4irL~QMgh8M3gT^LZGIUI%~5lAgB(TkwCj=TDGUw!M3 zvpykQ6W7+M45`MZ$CWVWt>yu2UWZN;{OBtaUXCF-r3FSJ>}IrGB*Nx~4}PmPJSXrr(qPj@u9_TXR@21e z_caxpljH)~KI;^lBvNJfSp$wH8G|-piF~drKAxW) ze%f1yvSg&RPUz5)kCdj##oYHL_ZArtST_%qjz?yRTjUmHi#R-+-eTo>a=K$ALThVS z9&oPDQXEqO5gC1NrvNtLR( z<$ID93t0AM9bHZHHSh7Crx zovfrYweKrSM7=#+xE>|@aS=4l!(^GAJpnlyQKC=j%P2sC`ia0I>nQ= z#Nl%Vw1GszS$`%CEG|P2X|%{3+A3tD)kqILCZ;l~55tXgTt>Eujswew9A)06qY&Uy z7?sWy@J0j^zZ$QTq92HEy_Y>8F8jCC(uwM}B=jbk^^k5@8Fuo<-J6WjGl=n2y7-Gn zsakIKtYGlUev#z0y(%FYjHC~HPbox)%Pl4UO-d);n>xwb!iA2lQuYz!i$RfqB6~1)Y*(Dx@<-bAzYD1O4$BH*^TPAbqZz1Y)R?6 z7i>uF{SF&alC|CUqg-^o>_;_6NU4jww*sJNk4=nCJNBb$yz5w1Y{*4i4BJtkA&O*V z9_6KyDF(poNy*GZ_N2;C9_!9Ul*I$r>IE%XC$$R$dvj-wwv#=n`*n5NWLGCC zqzjn-@Lo!-)ZG_YQ)b=m<*dNhS=@VJZHz^SjACS!IqQevj(wd*wyzp4yx7z5yVkWTN*8kjw8*G?m!_iE2WtS(MNl)`);m3WsXTh8XQoEj>5=nkdZ*_>jm&n|Z^x_N?YRcOb?uk^LWg(gt)uX)UVlc~rZ+oqGF_DH8?|?YomKy5XZ>taL+K&^ zWy_>NQpcUi7&^s)NC)6AJX>A@ARwR|LNYFz@Q zXy{bBe@6gtf#I=EiB2^k`zA5VFJyssgJ|wKw z`B%%GI#=0rJU&iBHB!+Ujj_ zS7S75zJz##`?sxLSZY!(T%EE?0w)_~Whyqfk6o*H9&@8IvF^>T)w9TVBX?VV^$jnw z+gnlx&6bqej)HrYPs`?NRzstT>lY4))85$O}`VY!aGhs6tx_9oUVIgnvO zzeZ?CA@lSESbLs=gxSS%)|cR1?_|kz-rI`sJ+}Qh>Km7dAeWAO zWA*wQvz^r#OdK|bgi1Z3=}ZmeayES*STaV8ci545MasELwz7`cTUm&nH2YLNAR0_; zK2-?J-JtiebPdoc2yJHlp;8<@+0~J5mon1Q^^Vp-5x*`?PVEc2FiE=vr+Rz0Dk z-FmtP+2(qyiJKs$2aU~r%-L*I~9$L=8k)~nvt@7ynb9L?6zP!5j5?|)lGN^Jt zJ!$KFkh=C|&NT`6pe`Tm)e;4U0QqQbhg@SCEU}1fk$2o_8sb~t2cd^)IUojDIg8zO zX=nO*?5VCMj2$YxL}>Gt$8aH*6KzYjscg$HoGrIwxkb8cLZiE0FIs7kjaII$Z;_VR zXr*=9BCT(O_s_;;AtnuOm(hrbI4}kja#le)N@gKsoLSOks3|+f*l?bdEh%``@)FHC zQTt|$pOKJmEOml)vn2IbdXXb|2z*PA?bQ#g=zXcjpR@O+ye$W?b_evnlvm_I5(A-k zn%gvvUFb2MBk$I+{UID=i;C(ozvWIL52AOfpc~q$sugEsm>cnk=hd~3@J-k%b&3od zu~kMNApLalPTy`aX&ywQTW1RC)@l4YS~GS*2<{CW7O9NLXcWneta5$H`u^mopg+Ct+<_D`hkq0aI0@k@y*P0=U@Nse*c!W) zgaun8w^Hd?S-bQUp(8%m4MtKf3r3|x&|WZF30mjs^6E^7WFA&-x}BNRf}M!xmFaO0 z-$Xo1*(f5Og46uy+wwu;`KHA48(tO>@y^IupPUpV$Irc@K%zUkOLPYvC%W=l{Vzx9 z2!BITW5ZeFA;yn$PuPy}UeIG=Y)2MeCd)cgqIIuvWv%S+*vfUZn(^sz?Hh>tsonxE z(rEnXLuRZuZ9n9RC+2ir5%KK4EwCv~>Y!w8ucR!<>X|G)9cPWfeUZ-80&da$SvJEE zaB>5ocC7GC`at)vbt@*uD*vR089l9cC_NjDejrOT6GKwTSB=nOpkE&Gbn9=6hkJS z>$an!p<|zsMCOs)$_dHdmj}FcRr-K;u^PT7B1tnP9&8hG$WEF>Oa@3Q5LeX@`O_Qq z@MC0MFvHmpz&5Ag@btjoHd)N5|BO^fD5-3gw1s5qA8^*~!TeArFHNuc6+Zl<@8MRqhsnl>Y!+j-TT^nvn*+PH zH%B58LyO7bc`}5<3oR0UFaBGGQtdqHA2`=gGuRwzX{A{-RI_tuf`H%BWj=X1QzEKr;Q@I~%B4*UBv_h9X9;?5!Bt<(@c+Tv;Ot zUY^2uh7l+W1tM2gnu-*R(q&L8yZ^r#rt=n|xsME`Y-ypntgR(lFD{i$1M6OZX%Ng> z_sJxZDM??ep~J!&2x_l!v*|%Su-6KYFXxTdDCM;aW~6~X$#ayw8GmLoCb_>sqI5v^ zxb!06BJL}4HnvF?U9ZQESh>i_!DMMNmn&rm_j|4b>ML#RuevB`{ZkHT=`ju7Z=K%l z`XW5_z{6I{(MN)*wXzs5(v(dsiJZVNL^K+4luxix%RGIr)ihm(V3r zuhBoE!5eMGY}Q17RVg7i`Y7?q@ZkZiJ>+-kBa&KX3nAZXJNk)n^aIfbK9Ni8*Lwp! za#Id^1K(!v|3)4@s?06mItK4$JT0ux6`9mck^z_A&4$Gu0v{kN-BZt=RN}0^3m2fV zpmKrLa7RRT`)SS37$%{leCypF3_tu1?JmQ&wb$b;=j^47Ew2*~1&sM~zEsxBInv$* zeZ@Ivmwl;xLuk>zgwYQ6g600fI_ds{JGG_XqR|NnOcd#uP0l)o8l)#~Gh&CWnMuIKAzv6?FnPG?DX*fSDeV6lkA1xlo zzdJ+8NpE!4$(bipZUQe^NhSaWx0up^<*XaI8}{vvhAa#pMTnlWS>^Zng?SI%6%0Nd zCb4XY?ZN9ZFZ~8j=7#fJvA-GnTxrgDz^`8qYxFuP8b~>3D9UDm7@p4}!x(S)YeYEP zne}_dMMHEG)0+mwnpY|%i0LR?*k`S|z%Y}8%6qLEG(-t0Ja^N^bc(T+tae#_N;vK$ z&Sn~CEkDMQIsepgZ#P38D$w_KUGBhFJgs5fk7J2ivaE!=>sN#i@eidaC5oA=>&p0u zog#*SY>3pm2gh3@rwz%IPG4FM5ylhP@7~Jbltb^0tiscAcD|v|@%D&*IYHWw7}*Fb z&t4&TNc(>|3Qujl0&&c984U=Myg%sE6$GW zi#sE3cmSOP@lEm8{k8e3-^t`U3KQ$l5@B#Glw_^AW1OPLZPp*Kq=^kC?jAr}zuQ`k zeqDI$@2$p=#cz#8iM9}dq)&F^u}h?6!e)`D;2Xku|AzCMy@&sjo!(gOw3g#76nN{o zJn_oS@IAh0>+~m0A>uR*1@d(%hq@K>oh0uRC3sWbp|nLwf}gcsq(sR>w%y`RJ58xu z#{v0u-Q*SP(K50sCk91U>-mnrXV&`%OcN~ojCq)a$&0tvK1}6MoR2$3ZzJVZ#z;48 zhf~2s5z*-0CL65{^Qg_c*{KgE{uM)c@rLp}-ug*#C~4LYK?B$vw@+~~S&^4Do=~;0 z_$BL)&}UJe7X-7@-|+^1pXJQmh4HpO&}9;pSnq8nCMyTEzG+12qBK6k6t&4H1AwsFq5f@?&)K3dia2M zP|$n$9j_d`Wg%*mwX;alB>`D)g6h)o%l-^1*-~Z^S0DF;xV)V8lGV^b{4jaQV;} zRcn06o=9MlR-47HN0dHA&;5kMZwzW2r5Y82CW(Y(Xtaz6nA=E|?Qi|EP zu4mAG7UZf#YcfHEgoNy7vy)L4i`{csm(Fdr-kByP`m$zs-8KD`bIn6gK)T5OoEgq*~@GlWlsC(!!)uTkr6s%x6~x#56;I~Ifut0 zk}cGxmW@mbJt)Com@a~*)!K3sc|TjfSpLj)NPrqi%`e(?9xi_x*DQ?b znOKILYv?*xVU|qK5|Tfx>+K$2j&;v%45SQQFUK+3tiG89ZiYi+y}U}R1^Vx{T!O8>4NP}Tz$^$-40!EE%0b;hG=&HW>exQiak$| zq6`l;#T86^o8w6g*T%vSyn93Ma>NbM2Al@r2IyDcBK$b9Zy)U>`Pty88qnaVtMzX^ zzXXX)5khs*VN#3ptY_vkH-w-IF(@52b-yD!XpVcCq4_WKySZWULwql?>TTbtU43Jd zYF*x)uK3v+1y$=(n_M?f@OOV8+G@F@TVN7ZBawM{_js68#9e-o^x=;raDMBUlw7+= z`JyN8u|8n?`xx7;y5D=DiisC|WgI-D>on7C(kTBK5q z&Rezz`0E}!6aEFqqi=#vJ)V_h+mAl(481eJf?PPlY&UT&+lSsjzvr!S^E%xf6G6k( zy_C+QI3qb#cP(-&C(`{?)?GUZd9&UEMzC6XC95c7t~Hev>6NnnVWd~aSmAb@j>=cF zu2j-J44tVlQW|o*Sm_vcB*(0uoZiL0!zz%QVcl}DGu~2K|6Si&m-Ko{-D<=8ZTOfC zn{4<;8@AanVwmwa(1vH)aFz{AY`D^fKepj!8}6{-t2TVkhKFp}Lrxv=oovHY8*Y?0 zZuqXZVWDkaW5cyJyv2r_Z1|83ci8YX8-8TN9wQ9hkv2@TVU7)pZFsc}*V=HC4Y$~^ z*@kU4j2>y|CfV>58&0y}c{aSzhJ`k)wBZUH-fY87HvEkZpRnN$8#dYST^oLG!*6XE zKg#B-4O4A6-G+rWyxN8vZ1_tXK4HTq8-8TN&uysee2KN;U>lCL;dwTkWy3-nuC(EA zY^XDtpBy`XLi!WI_s$SO(@OR88hGF$14mwCV0V8te>CPh&OE+-SN9KI@#-a_m##Dy z5u68j|Dzl<0tESYKS5;9^RL$YSrK(yLs0vl3iufC=3RM|@N~L-nI(|(yEWj1c z5e+p*O;e|mE%HE zf0d@tNlWZYwM{<$O4Zf2y9&%>;6{kPxGTiIK-(ADVY&#-ujz}ui*PG+s1lz_{)D3O zQHo7D`Rjr+l1dZnrI=Q0y|}!BxS6>72v<@srJ@>&W!g=JD#lFcNf`;HEMJEl;?wDx zPr}Pm+!PUKmr7RSFfD`HspyKKSB_q|A#oBDDG9>QeAKa=D%Ww85+uG%nK8eeK0P{4 zxloXJjn%m;^J zESVBoXL#c~!aIKX5^6{1yMz(qdnWOogI@AS^1#>_P7BW@o@4(<%105|%ZJX$VY2BjQ_;?&9C3F@p4dZGAyxBOg2 zSjP|X_<3shFo9Mh6#wrYp42RJ|J3+4BCb#_O1$II^ua#ixnw5n3SD|Bli zt!5Ezq_kQ^nnI`?g`i2#_-^G%VkH!%eK2i~v|mzoj@Ji-XmysS@mV@b zi07sS7fyA}yH0+jcFDi*US5Q{DWg(uq@R}_|L71*d{evR-*=~ ze`kANOukiU-bpJXeQ_mv!KD|F-c5Q+!!hZ7Oz&X)NNGHpM>3|8&vnF%!(>-UzQ(^A zeNUrRi)_6a)hHEj>sj>H-)5F#9binO^_OSsSvD}At!EuVf1AEC?papWU#hKV2SI<$ z?Z$qft*^B8T=O!Yt!IBjf13{*_b1!>5?jxGBON(g_I}xI}Mk)_MfzR#$-7$mp>V|=hEVe`8eA| z5tfYR**meJ;2uh-w3RaMlreSayO1=>ShKtN4BbXnbhw_48yWX@cj<<^QrDyplW#h> zj15Dd=%t+ueQwQH8S|M>Mq*WjRbuO>;Z|}-axs)YVl^Foc^6ZY9>o}6;;$IZOPem^ zeu<}~UE&prkAdarB|M3piHFe(H%z#paD;9t(SO6{3KkN_egjl`l1(lk`AFG@fQEAGkMQh zy7U~GxtXt!?=)8~pG%PCeA*&6cU)oNIHSp<*tkv6QJ$iLqNR(AItt2#6;~alpHbke z5bN1RmA;UL+rPwLO=PG0%QW%n{&Jo1?#ilC$?x3!s)CMuE?QjW&#&rW?DYI{ttDJV zSPJ?=TR~Ikp~~^UTtQ3dsA)xu6@Jw0e4oFn5DUI(Ri!otUrPK{)g3zcRa97B)Wu+q z-{^8H%C)Q6ejDcdgpT+ZKMHpWHwt^ruh5SpEIcBzM^tprm|n4Qy`Aw1efsuGJgI-u zfPsSs4>@_LYuNA+BS)PwI{DNwDW{!2c3kTC2@@xsapqZRXP=Wk`P}oSWVokhda|aS z@6DcmK~C<4GxBE6nmvb&s`(dRa_NH0zPE5u{^EkdqT(ebrI%k(wzRyW^2(}epZ}_5 zHOsHQX2rEDSxEdp{<&xukN!($9Ao~DHb1`3%eM00`m0yR{vGXpwd(wHsPFdImBMwZ z^Dm4wnD6oz^6&aTp8JygWAnA-|8)s)tnT!7G=D$E#zlAc-z~MBuHscEZZLl<#kal=hF|L~Uoy7fo5-Tvb{Hs1M@pKiMA?w{TB^Ly{R{};df)dRo&&2Kk9_|U_T zJo?z(vu+Pb__PGmurB@Z@RaE+AGGNS&nIWfg+11EjQZ%)q#`v7e z$T^Ey+(b2j8K8&Vg&gfKnbEY&Xv$b4kuSO2UseXnrOO_Q7ARhY_fS2;;v=%MvLfX> zBbE7=ICQ1u&5w#!XQigj%VUS#<;@cM3aG$5Kk(dw^9ZSD7K zSJx6i`1fnSe_d_A$Vft1vt|t$otj#kSGyM1$)nTq=GEe#K=Sg|;?b2{yS5fzieO4g zN}{8qYAP$2RaPEDUv^wBM9IF)NZk8j>ck0S3(M?@StozylrAl*@cZ02;+vN5%P(_f zRaI3~xk}4jIr;wbf)eHdRm|FqeQ8-yq03j{s;Dd~cNNtX75IJmi_7#x00(o+MI-jb zzAo0P+E+-j3jLL3r3JcRg*@8yv4{@JJVG_!hA)pO0&b}@>feVMvp419xXvl)6o?^Eh+NB#!{cFprWwIRZ$%BVInk^ z*F@wX1R1$!c5oFRP}K3S@H1MN0~t@h9P66d7bdGfk{(&C@ppsEv@t|KlSg2x0s@2HyZgwlzLgzQCOuwx|S9#t+1C8hO1Gu z)zxaaa@m**yt^6eb+bLQe^LGAyferCF#Dm!mp^prsT-Zj*VLymUelED!dawEa4Ge?P16w*reY@aNF8u`+!;f6IMpCuZ`Hpr zj`+kX=fs$*9?cO=VRT;6-=mMgzau6>#Y|%LvZ_a=qnAVVBBhRgF{S`pk8xPeO4S603&zdZ{6W zF>1)1o@xm0hfEmI+`lQYF+ry>N~ba8|Krm*Cyk1J2V#7Ve$lF5A!<$(@#!J)sZ8hx z6thW~p%3{@pi6O~I{Z4}Um^5uK5E|13j3RBkoEjgbdFLnla;#EPQ!q) z@V;;d{C`#raXhVhH^x@>$}?r4nF?X}OrDcR{h-xvR(z!+!l5GUQt#kyd6eT0oyS67 ztHfxP=tIHdM0lJyA)zthShJ3@k>XaW6m^vyr=CK8YA0`6u=^P0fWE+l{xOtW%I>5% z)qhsswuI)+x+HE^XL1hPqtq=ny#!s3dq~;WWkT}uqAbQpsE2Gfe(;T`mqUK~Mo=%J zRNo2C4nIexDRt`koqp26t56+&qz-f~{}U3M`!*#shVuAoF9%1nNy7=!`S^752NmCJ z;DFP6sX>J^)u1_fYS5$^YLH{PGBw=fPX-AYFs`RM+4rbAx$qHn@}!5=$&Sq`-p;?u zNO+~w9@UX{;ZM~}$|Pz6;m9{&Xtau-#N58Bvp!23AU(|AtJeqSDpk)(&k!xUoytqF z+pAF8GdXE=F)FuNLL@KyM5sRGU!R+M`PLE z=k%xkoumdk`Y7WET2kJjL)o`WXUw7yFYc^TY9A^@NAuVhOFBs_@ub~s3zZuws}opL zzY4XwSwdCnD@I>8qJOYiYwbGPn>rdt9gS7}Zl;b({ht#<{ih9}{wE~#PzlgZm>|7Q zXIy5m@m7tx;!id$-EQ>4AAa#?@*s}55kL5-<5!i~X!?d|U9OB9@`tu}0PXPr>imES z(vC|zdu$u@y*1PsRHzMdjEqquCk;>|KOfkZ)O=D?zs5e535(+MOxZR4Pl$J4F>xKt zWOZ*S?Izl~Z5VJeZQaeintL`yHAYrOEDFyHOO;_O%aWuK2O|fzTw7+8|TZtDs^9Jm(Bi^&Atx%YSg4F!$&!a!;eSX#pdtdqW9ufPI99<=nacjedai+zr9tTD&1o&>KHRf z9kRz3gBV*3qC8Tc^K`pV6_#q!c`xbQc-(Y0v1nn+ZfE~&+91joeaZm(6uW#ihswl5 zFDms4D$y>(ratw?E#;Q*2UK;o!&0C2zob+x8 zC?~WlZGD>Kn+zvn@=O`B>rfSC;rL^nHW}+UBEwYVBz6xH=_jF;_)zc4xJ9vfj-C$H zb1}T=BfO4**T4xT1Vg{GUg`e$I^4$gP_fk8*jZ*wHef=8ayizi0hLL*9;6Cyb(=`M zX-|`&N!in5#sQ;fJAIR=8xvK3N2&^q0V*lDj-jz?s4qbcEsUp(I@Qq5v1Z&#weVYa8xQYk~3vjUQE$(oT}6Td5FjU&KnNi#!`r@(`-Iwn?4V@I2Qir zT({xXf6jFqKK%dvT=!5%*JIQT4%R0#%{ogCGuL@4C1Q4wEPUnV`%31imFT*zYOFQBLXND%mu@UuEjmBl~zZOPD(~Wq(x_Q|!>8?dZsP`K7*T6;-pD+LsmC zl$PSMyr_yvbY?|)bwycGrmw0jizzuHij$PPy1H~pxlLhCMP~%m2NHrLCX;ojP!MWh z7^b-uh5oXlX)Lf^n7@=nsr$lb%DP*Z_1R8BrQx~$GGFP`<-Vdh74u39i!w{t&;Ktw zMIG+kw6j8cwy)63s+6q!MT=fmclGik^CZzrOx%TqRr-|rdEq$~`Gq!@ppY*7?2=4F zsY)GG9(#qa$R<8ZWtLS`hjbTY&AKovC$t`#<(TcO;y;^|&GzQZbLVC2h0>Ed({89> z|AL&H5C&D0mb28OUJvsYE$_=)Ey(Cy0Sm*IO^5oNktK<=(30xnr&EQ=u*4pl*o#7Hhh^c?oD>8Ns;piS=Ryerp{+5bSeKz{ZexCB9@kP`hTz8m|9E^ zsOjg4dsQyDJ2OwI^TfxDtX#ok$tz-6TBvIuI~QtPcur<+ekJ9SGDOOr4WC84q!KE% zLgH`Aq+yr(TBwSpls4rPd(nl8sam4jm#of(S3|dYp8AvcJf4LbAn`l8?o;Pnk)cXG zr=p{#t6!KFA+M-Pug#YiP+rJK3h9r9%AC@IE9{u=(!4mTC4Vd zzqpu8Q=HMF;jYZDD(c{Pk@)JunTztPN@byPo_a%vKC8$Sj(p>;a7M8ZRBEQA#cn~R zbwDbFDLr;eMP~lz`zJXd<9zuh-2Kk!x2Gue;$^Ovi(i#X;pWR&WbQ3>*B^DNAUXV!w5S%HllC$rTs5lh+^!i*u z6fhPQZNr{6lymoDCub)@c0yPD3Us1`t~3Q=mVoMGD={C267w@qqNfAYdX}KIp6*8b znPr>LwxP7~LjPivge}|dLT4$;g`!hXZ2z#6YvM2I5cjnxiSue3UI&!4)S<+_9wl^c zLb2VVeq!tI0!sYuK}mS`p~T(eC`l7bAlm+Qpx7tD6yaBhXQ29Xgpd>HyICL8Hg@;_ zkGlUy{r^wX?;fA-`tITXU)2Zsx~6@@&E}rRzY2Dx{jbO4PxzUzTqpeg^>O)kP(1PB zL=&46ACCKX#P)wC%o7#?dz5&>BH;R;iO;{8>i_a0(RP*@|2x7Wq}x~bFo0Ne-SX>( z*=)Vn-lM(i=f+_5Pn(Yjma(!{!~1c+{bz6d%w?>P`Ca|3G0L#vqui#{W8J@U;E&B;DK&nji5X&-f|*@h z<%Jo34NY}jnWw`_R8hOgSN z$%cDvxZ8$1Y}jbSr);>zhMR4;$%ePs@Om4D=-1o&6*lzQu*8OoY?x=m3>&7|aFPv^ zZ5VGuWy78fNV|>++Lhr$n++`+Hrw!k4V!GZ+lE_gc)tyAvtg|bOKdpbh8Z?YvtgYNEU{sp4Kr+* zY{PgPM%eJkwAzjtpXtlJlEf1&h+`1P*M4;kOeS1XgVf8i%{Hu=Wd zDwKasc0TT&ZSv!vg{$oERh#Z!o9;gfrR+&uyO+WFu_m5g8%qBppSmDYrndk3k5SnR zs0j9(_W~y}$sOF!p(X)O>C4^`=E=Z>e)LU)9jYI26>0@|E$|-H2HXg|m5FsK_-(*? z78jDh1r~8`q6EAIScMYz8-TwR99c@6felRJZvo!`{4=TvJc7#^=b{A90A6I{mB5E> z{1M=Xr~|mMfWuB!>eYb`H4^xWjlT*ka4Gd3ZZ-m+K?%(rK*um-M)f5uU=~Wuy}%Y5 zKL`vP!P%GrxB+fNN&E%Q8fkb{36$qE21927@II6Wd^7N6R37-Nz+Nmg%m+|9s7<)SBD}@g5X;d?KBk&vw)ED5Bfrn9& zkL|#($0;=l{0Q)>RKuSd;2%*w%$tGB84BG7z5;jyO6L*qjfqM%VlHP${=nkRZg6C6 zDdc7u9s*lY;{G6T##zQZ54hUK+kh{g1OM^c1ROnCaR|ksQi0c;%M}pt6~JGkgwAH* z(DUFq<}ToQDB;gq;Qc6ZBXGBk3+z3`gez~edr&2~&l8BM1g`{MiSmKh0#~DI!0Un0 z8I)&mfj6RrpZ5cAa1$2h8-TBhqFf0)jCu<5c3|RE6CW?|Y7b{IF~0@43ngXgRp3`B ziSrTQgIVO?N#r9iZJMDeZ~;o(3%v1s#?QFf08HSpm6#*fOwF_L`M|~5qy;yHz-v%K ze+BRnl*CiuK^qr1e7Z5;4Ezjr4}M)2D0OWvc?n(%d=@2ib^r%nXz;eC;Cg6dc)Xaxq_rl{#OkU$D3* zW%U7I_$4O25y0iBTHLPy&b*ZJ4?YKY&H|+#0G|x3zl`{U3%na8<>Ve<)b|V>Id3%z zCFx2A{?InR1^5~2E!wfvg-WeNHAANs7?)4Fz+J$Y#l$sU%$kS7kZCqgNwT6zsH7KF87Pw`lslNh$P;1(*wZN6D zDfiH?1^TWt?W(}hbxJM5JQesGR2leY;KX`^PXcbW@uz^EHKu;=Z&Xwu>W&bgVq#*M%Rl;qK7;QSxLQ_KY}`Y+m5aDnfmgeMm8 z@mmeP1=x-fzXG4R9Uj7m9l%R|OnSi=0H3&n^nyPHJZB^A68L1`m`&t0_-Vk~P|_a> zy!$R=eh)D6ZWESEAZjh{n}PTJjBvplf&P2&4_*U&8YS^;1Sb5Pehc${z+a%GUw#01 z&1UXVV7>yF@Sw@Ne!#=1X3X1x^S8n;>axJIo}hmRPXh)}LjQVTD@xLSQ1GYVGj1Y& zNBw=8HWs`dxCbR|!d_tCZ3gcLyZ|M1a)5WDB%Ygq=QNslP6pok0_noo+A2@}A8-XvO?gwuI22sKX^#-)xgtwRrJb;q2Cvf~*^rx6l z0zUgTeVDibp8ZGD7EJ~&Lmk8%8I|hCC|xFjazCYpI17~fA%Y8(I}@i7Pl0mhLd*rq z9SJcPC})VpT%g<^5L}>~{TE!IobeZ2;EOgcXZGbBzL*OPZ#H;5aDt5s%(ZcWB{nW_ zwT%n>g^fQ5+-u_k6aQr5DKHf!VF|pz#`A!3mQ>6I*4w!H&-X9mK!m0guEK%IG&{Y4 zl|Y8?YAq0;KjSV+&s#QO9){@_p!lPT)r!3Zj0EDSwg-T2sg`Xr#Ubxnfc^{}hrW@? zyM?v3u?m?xFj#aPb8%NAmi8BNFWR*yFu&~t=J%h#e9H;Ucb~xgzzNK) zH`9l}y!T83RNF0#PcRSZlWwICj_%Yq{)9dpriSd3@4Zx73a0J{MV}6 zZo5t0ci(+#>(;HRv9VFT{`%|c+i$;BGL02i+A*3o(Z#A8_ttW+Zr8G9`?Pjn@Y5r; zu}79ITd_hMd>Y()WM6RaVXSwdk4Ez;e$g*mCVFcha<{b})1Zpx=hNVkVr{={MO*Yi zJO_8$`epl~+r@uzG4$nICGPj`D=ywg_*$>&7Z>wLRuDIb;LmO8$hu2Q2Ws28Dg@Zd#x@epLioLk& z>QCq#Pp!f{SgRbUa8wk^i5eP|avqOzq5fVupEPNbLJ*jmJ$tsg`s%B7+(Ko1!-fs& zm%sd_dgPHubQyo`wb#`9@4w$s&MW13z-HRe&5uV(Nx!D@n&4x_+tjLUpKdF@qC%}A zi;B1LK>4=Tg_MLVR(YG{zltwxb{Sa*CC zawG0VISrqLM;V$=G(>;n8#QW_N=ZpU-a@K6_uO-p+l{>F>}<`unKNgqi!QoIU3~Gy z>axo&Q!5LnsU=I6sLL_W!;bdj+H0>>cloENUtN8!dU$2Js#}t!uD>Ev z-BRIJcT{DlCs&`NeqEcbe!P0Idi=IZwe!XcRMXF{P{~gR)oITJ)%cx3HDyOoo$-87 zO@1k;7Q7f#nXj^J_C`?6d?%=?QFGr7s!Kl%ssa|}7at6&)vH&lb?ertn{K*E{pd$O zQn%lJyV|&MquR7-lluA3f3EX)^XAR!(MKQEZP`;#J*8fGcC)(fv!HskEvTM*?m4w* z&mQ&Si!ZAE`}eC?Uwu`*@x~kK?YG}npT2)Uz4Ccb{pnAC(&eJ1rA2-5=|}3%UjW3!q%{H%q8-BW~_$)XWCes!v6vM{}KOx#(xX`KgIu{F8;@z!Wx&GvVJLJRpz3b zZ(vXE7tEiYr4qiJpn?a+tKi!+Rq(wlRq&&mRIu$q6+E=RBmU$1;eR;(Q}KTu{&VpU zt+rD9FHcax_2X6W&Y3Fs+bdOY`%Tb%PzB%L-|3&hM_vT}d*Ht({$ub@`_slCCg|#; zg6EA_!38r_@aij7@JBbP;A0P};H&#P{j1){$>`5`XPDZ6EX>VFC*DWNZ$q|sFyRsv z9ABw|GrzBbS8i0nn;ufZ2Y0LB{&%|gcj5m`{O90*G5%}te*^yS!T-bfe;WV0@K2aG zy@mhx@!!_r|1_jeTmXeqDEt5lKZn8&DEtWuZ3#j3`S_svVrEbsx-zH^-xO3|Js4Df z-5>1mKM4P)<9`bNXW_p9|5xFEE&gv$2&#L>2h}4pgX)qOds<4$ zd3GGd$@JXmp>Nutr2Y%X;y@^j96ro7f=Ffy{pr)Qr%Ycsr0;+v>~WB6J2+(_{-?Nw zK<4y?gZlOzVC;1qM-F!p$?OjL3lsbG?R&wwnm~$9r|FMR z&xOUgS<`cMB6J)>{3rekJ>xSZ3e$6Q7Y>{@t#9AHB%;H9;h8-aW{k@shzqeldD=9c z%E2A>na)UOTJO}X>7HD;JZ-2><&bH1@tAUs+dU!N8JRj|VeZ1*?5tc9OPUb#WoJ&B znmI1Q5p$t!A-u}=&|KN}3(p=G-?N9qk>DkU62;uyTsYM|BfWQ2js!a9Bby;fjk5{U*b~C1+o%6mP zR2MOJkTF9I@YY*zRd?KRhr095JJnrx-KFli=N>&)c=+Lm^?2}!C!SC;R``A6ed-Rz z8=Dy`?AWnGz4X#cdTj9CyYHxX-+foT_uhM|wY60pJa|xj{@JH`Z1ClmU#f4u`9|Gz zIH(?Etg!#DUE0}k!|3SFqNAf;ZYZFmTTVxJBOTqnDk1oY8XtUG%?$2VR|a2EHwE8P z4+h_-|L*K()HpiCyd?b7*={%u|7YSq1OLaA4BFVPl5I_wIc%D7(E)d3GqY6rHo5agNKhv9ycUrTmlYy$HkpIc-W}qalK-aPqqzQ zuH-S}qN8Jz@pEdQ-f;ty`i~jcD>^!+XJlmTsZQs>h#mvS4CxgeBL?H*hbBfjqK4yk z+_)hEM zLL3MEmwdt>q08*%JAK02c$5@>*H3@?(|PnePk#F8ryqRs$tNGu_q_Y@#~;5BeCLZV zzW9`R>3eUz_11&CcI^sq?k8pPaukk&uF{PN2`*}i@Is;N__PK1u+(_mD8R02xk^8dAW?!i%3X&w%%yINDbtF=|T zLe)+s8(l;#G=fNgkc-4IDkGV71|!Ri1dMV^2m}JGkOYXLf&sjYh$2Z^32M@fs1QZ2 zUM34O5=AiT4vCksBCd*ZnFtK;{+^RQv1vjG!KvCmJXI&Bzs~oa^WM*UIo%Cke-iGz zhQ|EwU(|o9et~ioeOp>u4(qix&Q8PML-gJu9lVM;`uFeO6Ziy|gMZt$ZKfQl2|lZ> zt#xoKhh(39_LX{YKKmYvfb?es6mfLsjym|A6<>%+8mz9-e z%C82tOVlsB;DQVKDz`DBPoF-}u6ufVdN1bH>(hdN|Ni|ZUuMaL7hc#;G%VY@cdsd@ z+eYUdJ9gOn@4xTh6n`A-@TRG$$v*t>Lw5~MzRTxvuO)K_zWL^x7RlWa&DnhT@L~J* z+i&;p+O_NDPd@piQEPq2Gs5#B>03X+P{y;T3%~p+(^_pE_sE&Y@}Yc_!2{))oV)-dIw2i) z4@UF@-Qk(-+qXL$;Gy*2;Dd7Yq;T7su8-*bqx@ew+($TcR)2dOI&{di z4LSKwOib*F9Mj=}@@BSv{dyD6O+K^JVR!&flvi^d*z4-*3|(nxXmGE=0ULn4p(pSN z*w%h!mh*vG%3Eev{kPc_ZZ``4m^Oa-~%s!o!9Vx&z?Q5^BS5%Z{S51zy-LWKQdaW-vdqGt)HoW z$83;N@cmUY=vyZoUN^f`IP~0L7XOM_nSPM?=+UE3bOw0c+bm(D6RUllgb8lLhNQ;h=o7 z zcJoJOHwlLuB>z7Xy}@B{z1e_2wBnE;T~MCfdTnyIV@EsFdH}y{@e<|u`-u-_=oz-9 zJv@;2NS=7PVub$upM8O6u``iv_@~lQaQLIy^}^v=;jmgi$=iwcjqX`?5@dRO_uY4< zedf3h4!(Ol+T)4GB@&M#M~>LrFE6*JXWd}SCa2hMC#5aCkqg z&ytxWug};d=`%K|r}O}Q?k#yjr;n-5)(#F1e(`&uau@Nk>!sj=J!JGaFe3NJbu=Dc zzL0ffsqyzOTGjL<3rrhqPZlIQ96;dLpPStp#v!xOY^Y$$5Dsnn3=Uy^z8sq*oBHpv zN&hAsVryG%tEQO~g@c1XK0dz9{%enCpFe>g=r8+$=Lh=0wbv8im)*0?kKS(2%}lbD z(+69na9A!JXxXx#BZWhjZ2T`jHUkH*&*0$o8Jl$F??m_4%q{^3(Le5GvthDRYHzg% zzwEt}f9YBo{t!Cn@$k671NMVRo_Jgsfgd~h(wsC4-ZjXc6%J1ehl;7K@E3e-HtGxE z&}?>#a6q52NjJzQd3^>4ug~D%^%-cnuD; zb;Sd0&8&eI{I5aw%nadBIM@#E+iUMhox$NY;h-9u-4dbC*d+8hO}cq|A7Ms_JkL{ zqp!dXY{2eu!S-YCiEFT%>{aY-w2gBU?PcNcQqe$LJ!_Cv3x{ZZend8DNp6}goN$fZ zJ8p>0yK{&Y-=1y@rj4*Y@6{VPMEV#`pJkJ1?ZGd5@8nZ?Db~ z4$_4eghME-&)B4jY00*HO0q4@PqBydQf<-1G+Q7X?hy`ig~M#&K#TSddcKCQ47*B==I3Zg++x+$mwzagsZ59seg@e;);ebu@`dluX z^u&}Ddo(}Q9uf{^!r^}5aJS@WPENW56q}S5;bXiYy;nQI{sX_4|3QNW^#lgs)Txj* z9N@DP4g0MeORBdNnW3yl1&1K$HO>0C^;&dkY@LdYsCRT zosS`Zd#L(y*{fq_+oNQk%O3Dt`|lE+W4IwNFK-*LqL1i6QBje@AyU4gaf#*$*VqJf z;Z*It*Mn_72KxL!Sf8Vw>(KuJet4gekpUm)SXiGIOBZPE0qfsWuHroT z`oGk*gTHt0-dF2rPo92S@C*6Tjt`ZUl{ReHFw@y0d*FcwOf@BY_~C~gJjD0V8ywgR z*e8%HIx!45U{8@l_Cjd%`hs+OrwaHF*|w(&zxOeu3-}o5Gd5}2jGqJkDdR8Y{_ayL zbPn<(Szn^Kzn{**R7;0V|1l<&p(fK_(59917umdYR0terD4|*ee#5wqO z@B)26r=32F5A4rgpRq}i`uzKKEARs!_q6V4957(O!#dOB@*u1o)@9QfMbnuiQ%=h+ zx#SX)Z|z_K4*ZSC4i3J1JQ$gej!gNvv8{htKH6TrJ5~F0tB=9{{M_szw(g-j?ZfSB z?a;x4JH_u#=O1*I(n?E9_v?(3&7C{<;D`|;Y~sX;j_!&poh*R^dO_X?*uWzi7jh8n z!^k~861qY@Vw2)Y-or;?p8%J3nn>{79{6>-`5)$z?v^V@*?8AocO3&xof~x>o%T3f zdg-O6vulnI>>0>BzK_QPc!3|82Tx)HM)Uz4K_5I%7?DAI6TkNmdlNrPUQR3jiEbhP zlBEir7kd{yRYlxp&6?%-01nty<(4fgE6cqP55U29aPV<2F(p1Wau1x0$Pl=B`Qm+i zDtvC)k_VN0x?1N97ZF2r1%Bk7jx3RH1}9)6=d81LPJf3C8DgoasixB@jt~5wXFLv! zUamY(yq=&R!0t!nh?oJn!ViRZKEEOODYyN-dS{xmpTc{gN&xYqVJZ5&VZ#Q81N0$p zDE)9aWM^mF(4j+}Y^f&i;D^@G99)1KJ;46(J~0CFgRi|wdVmg~6U>VZ7VO6~Pign= z-BZQ)O68rJ1vE7u>N=fi&VTSSi2vj13OtRCjSe31*5q4os@Um+_-&FmCj;zJ(3$(` zT!RC46q;iH*Q{A%!C=tsfuT^yJx7i}bZVAu`?dDSS%Q75-kUF=se5#`e$xYZ@qe)Y z(#JBc(Kkk5f$!J_$*J=tR3~w^Rl4r>H*5{^jm!Z*dVpTS6XXOQkk8`(zVVuQ;Su~` zKNr4F3vTQtb;8az2fvq7#z+|k2j77WypSPuVgCI2CYxpK(~%9>eCHYL47P}d{16k- zfgQY7tXM&gv0NXMKV8_t(ZTQif6==X9K2ohxOf~Gp*OZkd$)@dkSX@sNcsXR*T4^N zybnQL2f0UX8xhHm_ILtb*U+hJaXhdWC>a4 zef9ug_w^CQNXyRvb_YLe`1n_QPZ@H_9t0lfAaufhVC&eAB6$LB*REY_%a$#3@(jIw z2XABt-NA3-8ku1}_5#_=Dt(ngSLqJyv>w0{xi130@8IBjM?7H;or!kyAY(iOZorFd zZIbN-7Z1Dq)0JJ6_g1LC|LgKU(k~bPYX2?;7wFIE@nH0LFhW!83%WpuH@pWQu$|~B zJi``IOP~e?F3??Tbj8km5ApG@9Q@GAw zUy*mAPigrxm6=EkdO%N~|CF6*%m07_dnf)2_76E=KVt8M#>AY^9zNg;cs_8A z&BxYa#}rezx`uH6p^P zV{tlr6Y!m?{gZCs48mWtvj%4$IHz&O%}TVnY??m(Xv{fwEM@xiCVf6?)rxt z&kE1iEj;SeFIrFS3`akopFaZb5~kPn=J42bCS!3JPHP>zg~zkP<2m8+yzm$s9#g_& zad@=Wu^VGA!4s#Rs@}zOT4(D~{98Kmd{SRgoSvhuEVvy~k8E61QHjn6YPxnCYjsyF zn4mrXhMb(7U&!B2SI$38?wIQ5}j2h8e)dnanWvDj(l6|$9k8V(HjPQ;$T_^EbKq)AIo()ZEDWso>2YIAMMXi%M;jPjU zwYo4*V%f))cgfS|@5Zw?(>)CM9MA;cI#@Ks)%vKkC$z5j~?>t;|KZrBKsefIJS5l1H82 zcRImY8sssmP8ZAis;6WtuP{6k4%SA_8EMqpsF6@Fq)xT=(fm`^4EG*3o6@M7r(obb z4?023mge!tub4S=W?pe|@pybtx`%_j3ACV*U#HeYje}YlbtdW?)XAtZQQM$KMty14 znAZN=x#J8Ns5K?2?&@pb%on*nHh_+ALyv?5Sm6Jb`6))dYmHzar%7&{8VmABos0Sk zwMJLt3a>x=_Mfqo(+Mw+gGC3mHvZ$AsSc5?&s`4377*{_pMy8BKm*2i0^?lWka`uh zTWW^Xo>zqHh3EwJ5o(1cs!@3wuvnyxrgp2=CjaP`KHHlo`#&CC#O@Fm5Zgf$Y!CD} z(EPq_dnnV@3CK?)kJRwVO?#c7&P1(`I$3cT1{TX~Y^~#u)Y|{^XWnVEL9zIk$RK)% z&(8=Pbm&0`4(J3QI%+$&hFr~*T3xANpk5l)POyKhzhJ?F>?famayYdi{Bhub=ET*+ z)Z|Tp16p8@_#ch>E$0BdPP8YF+!vO$_J&w|Y~?HRlyAv}4vQ8oa&`~9%l?P`M;4$5 zvdG%NLPICM`uJV@^5fmoi70uLP9Tr8_7?W?PdrYZgPbljCccs`x_ArS1SZd)NZ!DU zHbvaR^rv8=CJ#78M9cy-fVZCc-{VE5mD zzl-mH3A>0M00%UHKhS1>^9S};#RPkKUYa`#;N`J$=Fe=+Bl-5lr*cldm$l=_ker;{ zSNYzV;GsI^q|u{C|4yIZwh0p^xID1VNV?pGkA0vy^nm`vcgOAi~s+3TsxP+O9pWjgom@b>W+aL}O#a76P4_}I6w9mJ)Y{%UpnyN+5c zF{u2Hz0?w^OXzc^M*bBTCQqL1=#D*LA7pMX1FVHz@Vcsf;;8B}cS@EEdVS#E;05(H?aOW*`~m3B`gD9Ud^A65uR1Jx-le%#s9y1< z)+Q7=p10H@Dg!2|39&jAaxz;0tRfpgQQO^dYfKb~rF z{om=o>bKeY9Nlo%W={AF4Emk`)AtJ4O*h@-U;_p^_n-sZhuq*NVJ|qBs9bo3<|S6> zOmp@Z`=fL8cDNZ*m*Yy)Ri!5x`{U(g!7vB}E4tUS%N-TJH}TjT71dvJIkSf4pCecsXK zit!7{IU$c{u(n$teEt55EP*d`(y@8y9J&pj$Qb8$ID4Vb1vvZAMZ8&?77PB-bdRJz zJfLHHv3>Aa-wA>IeW`rdeVT*(Nmm=6H95bF9sN3$eLE>!M`QJw-EM84*NrU^E!4ZI z$8@4|pMNG+kI+-}p6|taO}CPEzJsOYgwO59#hk3^tQK>(+a!-eR;*a@qG~jUWTU>) zxsV4}u3UMX-BAtTYt1p~*=L`1@9Unk1A)Nx%4ct^si|@N^m`b5mQCLsxft~{oqs0J zdPH+HN&XI?hs4InYfv%B&GPFw*O;Q%>T$(^YU}(Q+>1=(!+C$k>lC)^HpNZEOym&W zky~?>&ULDJ|2T&F0c)Xa*d;ph%Cod=g(t+o>snnh_g?tVL5*v%P3RG@;yWPQ^{+l< zOD47YhGW9<8}TFYE3pExN`hjg#{?I(R$_8sNB&v&nWF2RkF#Gf*yDnkIf%uHy~srn z_Yrp{D()mljZX#K*ex0{3^CLTk4!e^h-_cT#FEn?k3h@@{ooUO5PlGC{XN$@e~b8o z*n`*yUzd2J`ic9DIf%nE1PA=nch9&wH#Q79G`(J7YfA>ZcoN?ozn!>&*lBjob#9HD z8_iZJuGQzft+=?@oe`m~1}~8pPk(o_{?u<-?fm}PihaTjWM6`Q+83PNq9(`r0iN|T z_k2;Z?QPoW=t&H9_c*uy0WUwu9=0C4126U2bw^WdHN4!jwbItiz0RHIUOwYS+xAKo z%fIBG*JoFpuZI0XZm`?Bd+4yH?93`_)cBv zp1%v<8Rx!b?VOmT?1F-v+|d(f#?8o`m{%}3e%h40fdym6=HzA<^v)eOX3C_3Nq0=` zJ!Vqw!0dwD%L}iJkIT)@8+S)ef&Z<`!SPpKo)Dk>4?l^EOPV@mT7mmj%udeMxAi$a zS&%bkntot&rkf{DEon;5O-I1bjN zE839O<4IR|dM0P4Wu~R4Uw_keSy|y1KVJWE^w3xQ#E{gktE{hVsBEfisf-DX4de&z ztzKHas=BtiuDYSRxw@tL+?wt+y=oF`GHSAF#@5WJxwmF%&8nK(n!1{X8ot=EUUS5i zCzO|0t*TmGRa>>Ws;;WOs-dc>s<|qzIzdmS>e);^Jxb5#Ylafdu~@T|Yo67bX|v|4 z*KAES%{BXKT54i~=LXLYb`Qn{dj%7MiNVxhMlds&6&w{D8_W;R2$lqw2Fru1f~$kI z!Og+CV12M5*c5CI?hCdAV?yVK&JT4D#f5r>5<-cg)KEq!Gn5q?6&f4L56uXbgzgP3 z4lNCphgOAFhiXHcLv^A0P(!FG)Ep8bsrv444O!(g%9obcmN%4luSlp!tVpfMsK~6y zsu)!{*3qiDa-XBuxq[^-]+) +-(?P\d+[^-]*) +(-(?P\d+[^-]*))? +-(?P\w+\d+(\.\w+\d+)*) +-(?P\w+) +-(?P\w+(\.\w+)*) +\.whl$ +''', re.IGNORECASE | re.VERBOSE) + +NAME_VERSION_RE = re.compile(r''' +(?P[^-]+) +-(?P\d+[^-]*) +(-(?P\d+[^-]*))?$ +''', re.IGNORECASE | re.VERBOSE) + +SHEBANG_RE = re.compile(br'\s*#![^\r\n]*') +SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$') +SHEBANG_PYTHON = b'#!python' +SHEBANG_PYTHONW = b'#!pythonw' + +if os.sep == '/': + to_posix = lambda o: o +else: + to_posix = lambda o: o.replace(os.sep, '/') + + +class Mounter(object): + def __init__(self): + self.impure_wheels = {} + self.libs = {} + + def add(self, pathname, extensions): + self.impure_wheels[pathname] = extensions + self.libs.update(extensions) + + def remove(self, pathname): + extensions = self.impure_wheels.pop(pathname) + for k, v in extensions: + if k in self.libs: + del self.libs[k] + + def find_module(self, fullname, path=None): + if fullname in self.libs: + result = self + else: + result = None + return result + + def load_module(self, fullname): + if fullname in sys.modules: + result = sys.modules[fullname] + else: + if fullname not in self.libs: + raise ImportError('unable to find extension for %s' % fullname) + result = imp.load_dynamic(fullname, self.libs[fullname]) + result.__loader__ = self + parts = fullname.rsplit('.', 1) + if len(parts) > 1: + result.__package__ = parts[0] + return result + +_hook = Mounter() + + +class Wheel(object): + """ + Class to build and install from Wheel files (PEP 427). + """ + + wheel_version = (1, 1) + hash_kind = 'sha256' + + def __init__(self, filename=None, sign=False, verify=False): + """ + Initialise an instance using a (valid) filename. + """ + self.sign = sign + self.should_verify = verify + self.buildver = '' + self.pyver = [PYVER] + self.abi = ['none'] + self.arch = ['any'] + self.dirname = os.getcwd() + if filename is None: + self.name = 'dummy' + self.version = '0.1' + self._filename = self.filename + else: + m = NAME_VERSION_RE.match(filename) + if m: + info = m.groupdict('') + self.name = info['nm'] + # Reinstate the local version separator + self.version = info['vn'].replace('_', '-') + self.buildver = info['bn'] + self._filename = self.filename + else: + dirname, filename = os.path.split(filename) + m = FILENAME_RE.match(filename) + if not m: + raise DistlibException('Invalid name or ' + 'filename: %r' % filename) + if dirname: + self.dirname = os.path.abspath(dirname) + self._filename = filename + info = m.groupdict('') + self.name = info['nm'] + self.version = info['vn'] + self.buildver = info['bn'] + self.pyver = info['py'].split('.') + self.abi = info['bi'].split('.') + self.arch = info['ar'].split('.') + + @property + def filename(self): + """ + Build and return a filename from the various components. + """ + if self.buildver: + buildver = '-' + self.buildver + else: + buildver = '' + pyver = '.'.join(self.pyver) + abi = '.'.join(self.abi) + arch = '.'.join(self.arch) + # replace - with _ as a local version separator + version = self.version.replace('-', '_') + return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver, + pyver, abi, arch) + + @property + def exists(self): + path = os.path.join(self.dirname, self.filename) + return os.path.isfile(path) + + @property + def tags(self): + for pyver in self.pyver: + for abi in self.abi: + for arch in self.arch: + yield pyver, abi, arch + + @cached_property + def metadata(self): + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + wrapper = codecs.getreader('utf-8') + with ZipFile(pathname, 'r') as zf: + wheel_metadata = self.get_wheel_metadata(zf) + wv = wheel_metadata['Wheel-Version'].split('.', 1) + file_version = tuple([int(i) for i in wv]) + if file_version < (1, 1): + fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME, 'METADATA'] + else: + fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME] + result = None + for fn in fns: + try: + metadata_filename = posixpath.join(info_dir, fn) + with zf.open(metadata_filename) as bf: + wf = wrapper(bf) + result = Metadata(fileobj=wf) + if result: + break + except KeyError: + pass + if not result: + raise ValueError('Invalid wheel, because metadata is ' + 'missing: looked in %s' % ', '.join(fns)) + return result + + def get_wheel_metadata(self, zf): + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + metadata_filename = posixpath.join(info_dir, 'WHEEL') + with zf.open(metadata_filename) as bf: + wf = codecs.getreader('utf-8')(bf) + message = message_from_file(wf) + return dict(message) + + @cached_property + def info(self): + pathname = os.path.join(self.dirname, self.filename) + with ZipFile(pathname, 'r') as zf: + result = self.get_wheel_metadata(zf) + return result + + def process_shebang(self, data): + m = SHEBANG_RE.match(data) + if m: + end = m.end() + shebang, data_after_shebang = data[:end], data[end:] + # Preserve any arguments after the interpreter + if b'pythonw' in shebang.lower(): + shebang_python = SHEBANG_PYTHONW + else: + shebang_python = SHEBANG_PYTHON + m = SHEBANG_DETAIL_RE.match(shebang) + if m: + args = b' ' + m.groups()[-1] + else: + args = b'' + shebang = shebang_python + args + data = shebang + data_after_shebang + else: + cr = data.find(b'\r') + lf = data.find(b'\n') + if cr < 0 or cr > lf: + term = b'\n' + else: + if data[cr:cr + 2] == b'\r\n': + term = b'\r\n' + else: + term = b'\r' + data = SHEBANG_PYTHON + term + data + return data + + def get_hash(self, data, hash_kind=None): + if hash_kind is None: + hash_kind = self.hash_kind + try: + hasher = getattr(hashlib, hash_kind) + except AttributeError: + raise DistlibException('Unsupported hash algorithm: %r' % hash_kind) + result = hasher(data).digest() + result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii') + return hash_kind, result + + def write_record(self, records, record_path, base): + records = list(records) # make a copy for sorting + p = to_posix(os.path.relpath(record_path, base)) + records.append((p, '', '')) + records.sort() + with CSVWriter(record_path) as writer: + for row in records: + writer.writerow(row) + + def write_records(self, info, libdir, archive_paths): + records = [] + distinfo, info_dir = info + hasher = getattr(hashlib, self.hash_kind) + for ap, p in archive_paths: + with open(p, 'rb') as f: + data = f.read() + digest = '%s=%s' % self.get_hash(data) + size = os.path.getsize(p) + records.append((ap, digest, size)) + + p = os.path.join(distinfo, 'RECORD') + self.write_record(records, p, libdir) + ap = to_posix(os.path.join(info_dir, 'RECORD')) + archive_paths.append((ap, p)) + + def build_zip(self, pathname, archive_paths): + with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf: + for ap, p in archive_paths: + logger.debug('Wrote %s to %s in wheel', p, ap) + zf.write(p, ap) + + def build(self, paths, tags=None, wheel_version=None): + """ + Build a wheel from files in specified paths, and use any specified tags + when determining the name of the wheel. + """ + if tags is None: + tags = {} + + libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0] + if libkey == 'platlib': + is_pure = 'false' + default_pyver = [IMPVER] + default_abi = [ABI] + default_arch = [ARCH] + else: + is_pure = 'true' + default_pyver = [PYVER] + default_abi = ['none'] + default_arch = ['any'] + + self.pyver = tags.get('pyver', default_pyver) + self.abi = tags.get('abi', default_abi) + self.arch = tags.get('arch', default_arch) + + libdir = paths[libkey] + + name_ver = '%s-%s' % (self.name, self.version) + data_dir = '%s.data' % name_ver + info_dir = '%s.dist-info' % name_ver + + archive_paths = [] + + # First, stuff which is not in site-packages + for key in ('data', 'headers', 'scripts'): + if key not in paths: + continue + path = paths[key] + if os.path.isdir(path): + for root, dirs, files in os.walk(path): + for fn in files: + p = fsdecode(os.path.join(root, fn)) + rp = os.path.relpath(p, path) + ap = to_posix(os.path.join(data_dir, key, rp)) + archive_paths.append((ap, p)) + if key == 'scripts' and not p.endswith('.exe'): + with open(p, 'rb') as f: + data = f.read() + data = self.process_shebang(data) + with open(p, 'wb') as f: + f.write(data) + + # Now, stuff which is in site-packages, other than the + # distinfo stuff. + path = libdir + distinfo = None + for root, dirs, files in os.walk(path): + if root == path: + # At the top level only, save distinfo for later + # and skip it for now + for i, dn in enumerate(dirs): + dn = fsdecode(dn) + if dn.endswith('.dist-info'): + distinfo = os.path.join(root, dn) + del dirs[i] + break + assert distinfo, '.dist-info directory expected, not found' + + for fn in files: + # comment out next suite to leave .pyc files in + if fsdecode(fn).endswith(('.pyc', '.pyo')): + continue + p = os.path.join(root, fn) + rp = to_posix(os.path.relpath(p, path)) + archive_paths.append((rp, p)) + + # Now distinfo. Assumed to be flat, i.e. os.listdir is enough. + files = os.listdir(distinfo) + for fn in files: + if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'): + p = fsdecode(os.path.join(distinfo, fn)) + ap = to_posix(os.path.join(info_dir, fn)) + archive_paths.append((ap, p)) + + wheel_metadata = [ + 'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version), + 'Generator: distlib %s' % __version__, + 'Root-Is-Purelib: %s' % is_pure, + ] + for pyver, abi, arch in self.tags: + wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch)) + p = os.path.join(distinfo, 'WHEEL') + with open(p, 'w') as f: + f.write('\n'.join(wheel_metadata)) + ap = to_posix(os.path.join(info_dir, 'WHEEL')) + archive_paths.append((ap, p)) + + # Now, at last, RECORD. + # Paths in here are archive paths - nothing else makes sense. + self.write_records((distinfo, info_dir), libdir, archive_paths) + # Now, ready to build the zip file + pathname = os.path.join(self.dirname, self.filename) + self.build_zip(pathname, archive_paths) + return pathname + + def install(self, paths, maker, **kwargs): + """ + Install a wheel to the specified paths. If kwarg ``warner`` is + specified, it should be a callable, which will be called with two + tuples indicating the wheel version of this software and the wheel + version in the file, if there is a discrepancy in the versions. + This can be used to issue any warnings to raise any exceptions. + If kwarg ``lib_only`` is True, only the purelib/platlib files are + installed, and the headers, scripts, data and dist-info metadata are + not written. + + The return value is a :class:`InstalledDistribution` instance unless + ``options.lib_only`` is True, in which case the return value is ``None``. + """ + + dry_run = maker.dry_run + warner = kwargs.get('warner') + lib_only = kwargs.get('lib_only', False) + + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + data_dir = '%s.data' % name_ver + info_dir = '%s.dist-info' % name_ver + + metadata_name = posixpath.join(info_dir, METADATA_FILENAME) + wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') + record_name = posixpath.join(info_dir, 'RECORD') + + wrapper = codecs.getreader('utf-8') + + with ZipFile(pathname, 'r') as zf: + with zf.open(wheel_metadata_name) as bwf: + wf = wrapper(bwf) + message = message_from_file(wf) + wv = message['Wheel-Version'].split('.', 1) + file_version = tuple([int(i) for i in wv]) + if (file_version != self.wheel_version) and warner: + warner(self.wheel_version, file_version) + + if message['Root-Is-Purelib'] == 'true': + libdir = paths['purelib'] + else: + libdir = paths['platlib'] + + records = {} + with zf.open(record_name) as bf: + with CSVReader(stream=bf) as reader: + for row in reader: + p = row[0] + records[p] = row + + data_pfx = posixpath.join(data_dir, '') + info_pfx = posixpath.join(info_dir, '') + script_pfx = posixpath.join(data_dir, 'scripts', '') + + # make a new instance rather than a copy of maker's, + # as we mutate it + fileop = FileOperator(dry_run=dry_run) + fileop.record = True # so we can rollback if needed + + bc = not sys.dont_write_bytecode # Double negatives. Lovely! + + outfiles = [] # for RECORD writing + + # for script copying/shebang processing + workdir = tempfile.mkdtemp() + # set target dir later + # we default add_launchers to False, as the + # Python Launcher should be used instead + maker.source_dir = workdir + maker.target_dir = None + try: + for zinfo in zf.infolist(): + arcname = zinfo.filename + if isinstance(arcname, text_type): + u_arcname = arcname + else: + u_arcname = arcname.decode('utf-8') + # The signature file won't be in RECORD, + # and we don't currently don't do anything with it + if u_arcname.endswith('/RECORD.jws'): + continue + row = records[u_arcname] + if row[2] and str(zinfo.file_size) != row[2]: + raise DistlibException('size mismatch for ' + '%s' % u_arcname) + if row[1]: + kind, value = row[1].split('=', 1) + with zf.open(arcname) as bf: + data = bf.read() + _, digest = self.get_hash(data, kind) + if digest != value: + raise DistlibException('digest mismatch for ' + '%s' % arcname) + + if lib_only and u_arcname.startswith((info_pfx, data_pfx)): + logger.debug('lib_only: skipping %s', u_arcname) + continue + is_script = (u_arcname.startswith(script_pfx) + and not u_arcname.endswith('.exe')) + + if u_arcname.startswith(data_pfx): + _, where, rp = u_arcname.split('/', 2) + outfile = os.path.join(paths[where], convert_path(rp)) + else: + # meant for site-packages. + if u_arcname in (wheel_metadata_name, record_name): + continue + outfile = os.path.join(libdir, convert_path(u_arcname)) + if not is_script: + with zf.open(arcname) as bf: + fileop.copy_stream(bf, outfile) + outfiles.append(outfile) + # Double check the digest of the written file + if not dry_run and row[1]: + with open(outfile, 'rb') as bf: + data = bf.read() + _, newdigest = self.get_hash(data, kind) + if newdigest != digest: + raise DistlibException('digest mismatch ' + 'on write for ' + '%s' % outfile) + if bc and outfile.endswith('.py'): + try: + pyc = fileop.byte_compile(outfile) + outfiles.append(pyc) + except Exception: + # Don't give up if byte-compilation fails, + # but log it and perhaps warn the user + logger.warning('Byte-compilation failed', + exc_info=True) + else: + fn = os.path.basename(convert_path(arcname)) + workname = os.path.join(workdir, fn) + with zf.open(arcname) as bf: + fileop.copy_stream(bf, workname) + + dn, fn = os.path.split(outfile) + maker.target_dir = dn + filenames = maker.make(fn) + fileop.set_executable_mode(filenames) + outfiles.extend(filenames) + + if lib_only: + logger.debug('lib_only: returning None') + dist = None + else: + # Generate scripts + + # Try to get pydist.json so we can see if there are + # any commands to generate. If this fails (e.g. because + # of a legacy wheel), log a warning but don't give up. + commands = None + file_version = self.info['Wheel-Version'] + if file_version == '1.0': + # Use legacy info + ep = posixpath.join(info_dir, 'entry_points.txt') + try: + with zf.open(ep) as bwf: + epdata = read_exports(bwf) + commands = {} + for key in ('console', 'gui'): + k = '%s_scripts' % key + if k in epdata: + commands['wrap_%s' % key] = d = {} + for v in epdata[k].values(): + s = '%s:%s' % (v.prefix, v.suffix) + if v.flags: + s += ' %s' % v.flags + d[v.name] = s + except Exception: + logger.warning('Unable to read legacy script ' + 'metadata, so cannot generate ' + 'scripts') + else: + try: + with zf.open(metadata_name) as bwf: + wf = wrapper(bwf) + commands = json.load(wf).get('extensions') + if commands: + commands = commands.get('python.commands') + except Exception: + logger.warning('Unable to read JSON metadata, so ' + 'cannot generate scripts') + if commands: + console_scripts = commands.get('wrap_console', {}) + gui_scripts = commands.get('wrap_gui', {}) + if console_scripts or gui_scripts: + script_dir = paths.get('scripts', '') + if not os.path.isdir(script_dir): + raise ValueError('Valid script path not ' + 'specified') + maker.target_dir = script_dir + for k, v in console_scripts.items(): + script = '%s = %s' % (k, v) + filenames = maker.make(script) + fileop.set_executable_mode(filenames) + + if gui_scripts: + options = {'gui': True } + for k, v in gui_scripts.items(): + script = '%s = %s' % (k, v) + filenames = maker.make(script, options) + fileop.set_executable_mode(filenames) + + p = os.path.join(libdir, info_dir) + dist = InstalledDistribution(p) + + # Write SHARED + paths = dict(paths) # don't change passed in dict + del paths['purelib'] + del paths['platlib'] + paths['lib'] = libdir + p = dist.write_shared_locations(paths, dry_run) + if p: + outfiles.append(p) + + # Write RECORD + dist.write_installed_files(outfiles, paths['prefix'], + dry_run) + return dist + except Exception: # pragma: no cover + logger.exception('installation failed.') + fileop.rollback() + raise + finally: + shutil.rmtree(workdir) + + def _get_dylib_cache(self): + global cache + if cache is None: + # Use native string to avoid issues on 2.x: see Python #20140. + base = os.path.join(get_cache_base(), str('dylib-cache'), + sys.version[:3]) + cache = Cache(base) + return cache + + def _get_extensions(self): + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + arcname = posixpath.join(info_dir, 'EXTENSIONS') + wrapper = codecs.getreader('utf-8') + result = [] + with ZipFile(pathname, 'r') as zf: + try: + with zf.open(arcname) as bf: + wf = wrapper(bf) + extensions = json.load(wf) + cache = self._get_dylib_cache() + prefix = cache.prefix_to_dir(pathname) + cache_base = os.path.join(cache.base, prefix) + if not os.path.isdir(cache_base): + os.makedirs(cache_base) + for name, relpath in extensions.items(): + dest = os.path.join(cache_base, convert_path(relpath)) + if not os.path.exists(dest): + extract = True + else: + file_time = os.stat(dest).st_mtime + file_time = datetime.datetime.fromtimestamp(file_time) + info = zf.getinfo(relpath) + wheel_time = datetime.datetime(*info.date_time) + extract = wheel_time > file_time + if extract: + zf.extract(relpath, cache_base) + result.append((name, dest)) + except KeyError: + pass + return result + + def is_compatible(self): + """ + Determine if a wheel is compatible with the running system. + """ + return is_compatible(self) + + def is_mountable(self): + """ + Determine if a wheel is asserted as mountable by its metadata. + """ + return True # for now - metadata details TBD + + def mount(self, append=False): + pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) + if not self.is_compatible(): + msg = 'Wheel %s not compatible with this Python.' % pathname + raise DistlibException(msg) + if not self.is_mountable(): + msg = 'Wheel %s is marked as not mountable.' % pathname + raise DistlibException(msg) + if pathname in sys.path: + logger.debug('%s already in path', pathname) + else: + if append: + sys.path.append(pathname) + else: + sys.path.insert(0, pathname) + extensions = self._get_extensions() + if extensions: + if _hook not in sys.meta_path: + sys.meta_path.append(_hook) + _hook.add(pathname, extensions) + + def unmount(self): + pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) + if pathname not in sys.path: + logger.debug('%s not in path', pathname) + else: + sys.path.remove(pathname) + if pathname in _hook.impure_wheels: + _hook.remove(pathname) + if not _hook.impure_wheels: + if _hook in sys.meta_path: + sys.meta_path.remove(_hook) + + def verify(self): + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + data_dir = '%s.data' % name_ver + info_dir = '%s.dist-info' % name_ver + + metadata_name = posixpath.join(info_dir, METADATA_FILENAME) + wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') + record_name = posixpath.join(info_dir, 'RECORD') + + wrapper = codecs.getreader('utf-8') + + with ZipFile(pathname, 'r') as zf: + with zf.open(wheel_metadata_name) as bwf: + wf = wrapper(bwf) + message = message_from_file(wf) + wv = message['Wheel-Version'].split('.', 1) + file_version = tuple([int(i) for i in wv]) + # TODO version verification + + records = {} + with zf.open(record_name) as bf: + with CSVReader(stream=bf) as reader: + for row in reader: + p = row[0] + records[p] = row + + for zinfo in zf.infolist(): + arcname = zinfo.filename + if isinstance(arcname, text_type): + u_arcname = arcname + else: + u_arcname = arcname.decode('utf-8') + if '..' in u_arcname: + raise DistlibException('invalid entry in ' + 'wheel: %r' % u_arcname) + + # The signature file won't be in RECORD, + # and we don't currently don't do anything with it + if u_arcname.endswith('/RECORD.jws'): + continue + row = records[u_arcname] + if row[2] and str(zinfo.file_size) != row[2]: + raise DistlibException('size mismatch for ' + '%s' % u_arcname) + if row[1]: + kind, value = row[1].split('=', 1) + with zf.open(arcname) as bf: + data = bf.read() + _, digest = self.get_hash(data, kind) + if digest != value: + raise DistlibException('digest mismatch for ' + '%s' % arcname) + + def update(self, modifier, dest_dir=None, **kwargs): + """ + Update the contents of a wheel in a generic way. The modifier should + be a callable which expects a dictionary argument: its keys are + archive-entry paths, and its values are absolute filesystem paths + where the contents the corresponding archive entries can be found. The + modifier is free to change the contents of the files pointed to, add + new entries and remove entries, before returning. This method will + extract the entire contents of the wheel to a temporary location, call + the modifier, and then use the passed (and possibly updated) + dictionary to write a new wheel. If ``dest_dir`` is specified, the new + wheel is written there -- otherwise, the original wheel is overwritten. + + The modifier should return True if it updated the wheel, else False. + This method returns the same value the modifier returns. + """ + + def get_version(path_map, info_dir): + version = path = None + key = '%s/%s' % (info_dir, METADATA_FILENAME) + if key not in path_map: + key = '%s/PKG-INFO' % info_dir + if key in path_map: + path = path_map[key] + version = Metadata(path=path).version + return version, path + + def update_version(version, path): + updated = None + try: + v = NormalizedVersion(version) + i = version.find('-') + if i < 0: + updated = '%s+1' % version + else: + parts = [int(s) for s in version[i + 1:].split('.')] + parts[-1] += 1 + updated = '%s+%s' % (version[:i], + '.'.join(str(i) for i in parts)) + except UnsupportedVersionError: + logger.debug('Cannot update non-compliant (PEP-440) ' + 'version %r', version) + if updated: + md = Metadata(path=path) + md.version = updated + legacy = not path.endswith(METADATA_FILENAME) + md.write(path=path, legacy=legacy) + logger.debug('Version updated from %r to %r', version, + updated) + + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + record_name = posixpath.join(info_dir, 'RECORD') + with tempdir() as workdir: + with ZipFile(pathname, 'r') as zf: + path_map = {} + for zinfo in zf.infolist(): + arcname = zinfo.filename + if isinstance(arcname, text_type): + u_arcname = arcname + else: + u_arcname = arcname.decode('utf-8') + if u_arcname == record_name: + continue + if '..' in u_arcname: + raise DistlibException('invalid entry in ' + 'wheel: %r' % u_arcname) + zf.extract(zinfo, workdir) + path = os.path.join(workdir, convert_path(u_arcname)) + path_map[u_arcname] = path + + # Remember the version. + original_version, _ = get_version(path_map, info_dir) + # Files extracted. Call the modifier. + modified = modifier(path_map, **kwargs) + if modified: + # Something changed - need to build a new wheel. + current_version, path = get_version(path_map, info_dir) + if current_version and (current_version == original_version): + # Add or update local version to signify changes. + update_version(current_version, path) + # Decide where the new wheel goes. + if dest_dir is None: + fd, newpath = tempfile.mkstemp(suffix='.whl', + prefix='wheel-update-', + dir=workdir) + os.close(fd) + else: + if not os.path.isdir(dest_dir): + raise DistlibException('Not a directory: %r' % dest_dir) + newpath = os.path.join(dest_dir, self.filename) + archive_paths = list(path_map.items()) + distinfo = os.path.join(workdir, info_dir) + info = distinfo, info_dir + self.write_records(info, workdir, archive_paths) + self.build_zip(newpath, archive_paths) + if dest_dir is None: + shutil.copyfile(newpath, pathname) + return modified + +def compatible_tags(): + """ + Return (pyver, abi, arch) tuples compatible with this Python. + """ + versions = [VER_SUFFIX] + major = VER_SUFFIX[0] + for minor in range(sys.version_info[1] - 1, - 1, -1): + versions.append(''.join([major, str(minor)])) + + abis = [] + for suffix, _, _ in imp.get_suffixes(): + if suffix.startswith('.abi'): + abis.append(suffix.split('.', 2)[1]) + abis.sort() + if ABI != 'none': + abis.insert(0, ABI) + abis.append('none') + result = [] + + arches = [ARCH] + if sys.platform == 'darwin': + m = re.match(r'(\w+)_(\d+)_(\d+)_(\w+)$', ARCH) + if m: + name, major, minor, arch = m.groups() + minor = int(minor) + matches = [arch] + if arch in ('i386', 'ppc'): + matches.append('fat') + if arch in ('i386', 'ppc', 'x86_64'): + matches.append('fat3') + if arch in ('ppc64', 'x86_64'): + matches.append('fat64') + if arch in ('i386', 'x86_64'): + matches.append('intel') + if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'): + matches.append('universal') + while minor >= 0: + for match in matches: + s = '%s_%s_%s_%s' % (name, major, minor, match) + if s != ARCH: # already there + arches.append(s) + minor -= 1 + + # Most specific - our Python version, ABI and arch + for abi in abis: + for arch in arches: + result.append((''.join((IMP_PREFIX, versions[0])), abi, arch)) + + # where no ABI / arch dependency, but IMP_PREFIX dependency + for i, version in enumerate(versions): + result.append((''.join((IMP_PREFIX, version)), 'none', 'any')) + if i == 0: + result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any')) + + # no IMP_PREFIX, ABI or arch dependency + for i, version in enumerate(versions): + result.append((''.join(('py', version)), 'none', 'any')) + if i == 0: + result.append((''.join(('py', version[0])), 'none', 'any')) + return set(result) + + +COMPATIBLE_TAGS = compatible_tags() + +del compatible_tags + + +def is_compatible(wheel, tags=None): + if not isinstance(wheel, Wheel): + wheel = Wheel(wheel) # assume it's a filename + result = False + if tags is None: + tags = COMPATIBLE_TAGS + for ver, abi, arch in tags: + if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch: + result = True + break + return result diff --git a/pipenv/vendor/packaging/LICENSE b/pipenv/vendor/packaging/LICENSE new file mode 100644 index 0000000000..6f62d44e4e --- /dev/null +++ b/pipenv/vendor/packaging/LICENSE @@ -0,0 +1,3 @@ +This software is made available under the terms of *either* of the licenses +found in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made +under the terms of *both* these licenses. diff --git a/pipenv/vendor/packaging/LICENSE.APACHE b/pipenv/vendor/packaging/LICENSE.APACHE new file mode 100644 index 0000000000..4947287f7b --- /dev/null +++ b/pipenv/vendor/packaging/LICENSE.APACHE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/pipenv/vendor/packaging/LICENSE.BSD b/pipenv/vendor/packaging/LICENSE.BSD new file mode 100644 index 0000000000..42ce7b75c9 --- /dev/null +++ b/pipenv/vendor/packaging/LICENSE.BSD @@ -0,0 +1,23 @@ +Copyright (c) Donald Stufft and individual contributors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pipenv/vendor/packaging/__about__.py b/pipenv/vendor/packaging/__about__.py new file mode 100644 index 0000000000..4255c5b553 --- /dev/null +++ b/pipenv/vendor/packaging/__about__.py @@ -0,0 +1,21 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +__all__ = [ + "__title__", "__summary__", "__uri__", "__version__", "__author__", + "__email__", "__license__", "__copyright__", +] + +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" + +__version__ = "17.1" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD or Apache License, Version 2.0" +__copyright__ = "Copyright 2014-2016 %s" % __author__ diff --git a/pipenv/vendor/packaging/__init__.py b/pipenv/vendor/packaging/__init__.py new file mode 100644 index 0000000000..5ee6220203 --- /dev/null +++ b/pipenv/vendor/packaging/__init__.py @@ -0,0 +1,14 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +from .__about__ import ( + __author__, __copyright__, __email__, __license__, __summary__, __title__, + __uri__, __version__ +) + +__all__ = [ + "__title__", "__summary__", "__uri__", "__version__", "__author__", + "__email__", "__license__", "__copyright__", +] diff --git a/pipenv/vendor/packaging/_compat.py b/pipenv/vendor/packaging/_compat.py new file mode 100644 index 0000000000..210bb80b7e --- /dev/null +++ b/pipenv/vendor/packaging/_compat.py @@ -0,0 +1,30 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import sys + + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + +# flake8: noqa + +if PY3: + string_types = str, +else: + string_types = basestring, + + +def with_metaclass(meta, *bases): + """ + Create a base class with a metaclass. + """ + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(meta): + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + return type.__new__(metaclass, 'temporary_class', (), {}) diff --git a/pipenv/vendor/packaging/_structures.py b/pipenv/vendor/packaging/_structures.py new file mode 100644 index 0000000000..e9fc4a0496 --- /dev/null +++ b/pipenv/vendor/packaging/_structures.py @@ -0,0 +1,70 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + + +class Infinity(object): + + def __repr__(self): + return "Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return False + + def __le__(self, other): + return False + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return True + + def __ge__(self, other): + return True + + def __neg__(self): + return NegativeInfinity + + +Infinity = Infinity() + + +class NegativeInfinity(object): + + def __repr__(self): + return "-Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return True + + def __le__(self, other): + return True + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return False + + def __ge__(self, other): + return False + + def __neg__(self): + return Infinity + + +NegativeInfinity = NegativeInfinity() diff --git a/pipenv/vendor/packaging/markers.py b/pipenv/vendor/packaging/markers.py new file mode 100644 index 0000000000..5fdf510ca6 --- /dev/null +++ b/pipenv/vendor/packaging/markers.py @@ -0,0 +1,301 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import operator +import os +import platform +import sys + +from pyparsing import ParseException, ParseResults, stringStart, stringEnd +from pyparsing import ZeroOrMore, Group, Forward, QuotedString +from pyparsing import Literal as L # noqa + +from ._compat import string_types +from .specifiers import Specifier, InvalidSpecifier + + +__all__ = [ + "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName", + "Marker", "default_environment", +] + + +class InvalidMarker(ValueError): + """ + An invalid marker was found, users should refer to PEP 508. + """ + + +class UndefinedComparison(ValueError): + """ + An invalid operation was attempted on a value that doesn't support it. + """ + + +class UndefinedEnvironmentName(ValueError): + """ + A name was attempted to be used that does not exist inside of the + environment. + """ + + +class Node(object): + + def __init__(self, value): + self.value = value + + def __str__(self): + return str(self.value) + + def __repr__(self): + return "<{0}({1!r})>".format(self.__class__.__name__, str(self)) + + def serialize(self): + raise NotImplementedError + + +class Variable(Node): + + def serialize(self): + return str(self) + + +class Value(Node): + + def serialize(self): + return '"{0}"'.format(self) + + +class Op(Node): + + def serialize(self): + return str(self) + + +VARIABLE = ( + L("implementation_version") | + L("platform_python_implementation") | + L("implementation_name") | + L("python_full_version") | + L("platform_release") | + L("platform_version") | + L("platform_machine") | + L("platform_system") | + L("python_version") | + L("sys_platform") | + L("os_name") | + L("os.name") | # PEP-345 + L("sys.platform") | # PEP-345 + L("platform.version") | # PEP-345 + L("platform.machine") | # PEP-345 + L("platform.python_implementation") | # PEP-345 + L("python_implementation") | # undocumented setuptools legacy + L("extra") +) +ALIASES = { + 'os.name': 'os_name', + 'sys.platform': 'sys_platform', + 'platform.version': 'platform_version', + 'platform.machine': 'platform_machine', + 'platform.python_implementation': 'platform_python_implementation', + 'python_implementation': 'platform_python_implementation' +} +VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) + +VERSION_CMP = ( + L("===") | + L("==") | + L(">=") | + L("<=") | + L("!=") | + L("~=") | + L(">") | + L("<") +) + +MARKER_OP = VERSION_CMP | L("not in") | L("in") +MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) + +MARKER_VALUE = QuotedString("'") | QuotedString('"') +MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) + +BOOLOP = L("and") | L("or") + +MARKER_VAR = VARIABLE | MARKER_VALUE + +MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) +MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) + +LPAREN = L("(").suppress() +RPAREN = L(")").suppress() + +MARKER_EXPR = Forward() +MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) +MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) + +MARKER = stringStart + MARKER_EXPR + stringEnd + + +def _coerce_parse_result(results): + if isinstance(results, ParseResults): + return [_coerce_parse_result(i) for i in results] + else: + return results + + +def _format_marker(marker, first=True): + assert isinstance(marker, (list, tuple, string_types)) + + # Sometimes we have a structure like [[...]] which is a single item list + # where the single item is itself it's own list. In that case we want skip + # the rest of this function so that we don't get extraneous () on the + # outside. + if (isinstance(marker, list) and len(marker) == 1 and + isinstance(marker[0], (list, tuple))): + return _format_marker(marker[0]) + + if isinstance(marker, list): + inner = (_format_marker(m, first=False) for m in marker) + if first: + return " ".join(inner) + else: + return "(" + " ".join(inner) + ")" + elif isinstance(marker, tuple): + return " ".join([m.serialize() for m in marker]) + else: + return marker + + +_operators = { + "in": lambda lhs, rhs: lhs in rhs, + "not in": lambda lhs, rhs: lhs not in rhs, + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _eval_op(lhs, op, rhs): + try: + spec = Specifier("".join([op.serialize(), rhs])) + except InvalidSpecifier: + pass + else: + return spec.contains(lhs) + + oper = _operators.get(op.serialize()) + if oper is None: + raise UndefinedComparison( + "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs) + ) + + return oper(lhs, rhs) + + +_undefined = object() + + +def _get_env(environment, name): + value = environment.get(name, _undefined) + + if value is _undefined: + raise UndefinedEnvironmentName( + "{0!r} does not exist in evaluation environment.".format(name) + ) + + return value + + +def _evaluate_markers(markers, environment): + groups = [[]] + + for marker in markers: + assert isinstance(marker, (list, tuple, string_types)) + + if isinstance(marker, list): + groups[-1].append(_evaluate_markers(marker, environment)) + elif isinstance(marker, tuple): + lhs, op, rhs = marker + + if isinstance(lhs, Variable): + lhs_value = _get_env(environment, lhs.value) + rhs_value = rhs.value + else: + lhs_value = lhs.value + rhs_value = _get_env(environment, rhs.value) + + groups[-1].append(_eval_op(lhs_value, op, rhs_value)) + else: + assert marker in ["and", "or"] + if marker == "or": + groups.append([]) + + return any(all(item) for item in groups) + + +def format_full_version(info): + version = '{0.major}.{0.minor}.{0.micro}'.format(info) + kind = info.releaselevel + if kind != 'final': + version += kind[0] + str(info.serial) + return version + + +def default_environment(): + if hasattr(sys, 'implementation'): + iver = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + else: + iver = '0' + implementation_name = '' + + return { + "implementation_name": implementation_name, + "implementation_version": iver, + "os_name": os.name, + "platform_machine": platform.machine(), + "platform_release": platform.release(), + "platform_system": platform.system(), + "platform_version": platform.version(), + "python_full_version": platform.python_version(), + "platform_python_implementation": platform.python_implementation(), + "python_version": platform.python_version()[:3], + "sys_platform": sys.platform, + } + + +class Marker(object): + + def __init__(self, marker): + try: + self._markers = _coerce_parse_result(MARKER.parseString(marker)) + except ParseException as e: + err_str = "Invalid marker: {0!r}, parse error at {1!r}".format( + marker, marker[e.loc:e.loc + 8]) + raise InvalidMarker(err_str) + + def __str__(self): + return _format_marker(self._markers) + + def __repr__(self): + return "".format(str(self)) + + def evaluate(self, environment=None): + """Evaluate a marker. + + Return the boolean from evaluating the given marker against the + environment. environment is an optional argument to override all or + part of the determined environment. + + The environment is determined from the current Python process. + """ + current_environment = default_environment() + if environment is not None: + current_environment.update(environment) + + return _evaluate_markers(self._markers, current_environment) diff --git a/pipenv/vendor/packaging/requirements.py b/pipenv/vendor/packaging/requirements.py new file mode 100644 index 0000000000..f87c57cc80 --- /dev/null +++ b/pipenv/vendor/packaging/requirements.py @@ -0,0 +1,130 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import string +import re + +from pyparsing import stringStart, stringEnd, originalTextFor, ParseException +from pyparsing import ZeroOrMore, Word, Optional, Regex, Combine +from pyparsing import Literal as L # noqa +from six.moves.urllib import parse as urlparse + +from .markers import MARKER_EXPR, Marker +from .specifiers import LegacySpecifier, Specifier, SpecifierSet + + +class InvalidRequirement(ValueError): + """ + An invalid requirement was found, users should refer to PEP 508. + """ + + +ALPHANUM = Word(string.ascii_letters + string.digits) + +LBRACKET = L("[").suppress() +RBRACKET = L("]").suppress() +LPAREN = L("(").suppress() +RPAREN = L(")").suppress() +COMMA = L(",").suppress() +SEMICOLON = L(";").suppress() +AT = L("@").suppress() + +PUNCTUATION = Word("-_.") +IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) +IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) + +NAME = IDENTIFIER("name") +EXTRA = IDENTIFIER + +URI = Regex(r'[^ ]+')("url") +URL = (AT + URI) + +EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) +EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") + +VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) +VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) + +VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY +VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), + joinString=",", adjacent=False)("_raw_spec") +_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)) +_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '') + +VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") +VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) + +MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") +MARKER_EXPR.setParseAction( + lambda s, l, t: Marker(s[t._original_start:t._original_end]) +) +MARKER_SEPARATOR = SEMICOLON +MARKER = MARKER_SEPARATOR + MARKER_EXPR + +VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) +URL_AND_MARKER = URL + Optional(MARKER) + +NAMED_REQUIREMENT = \ + NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) + +REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd +# pyparsing isn't thread safe during initialization, so we do it eagerly, see +# issue #104 +REQUIREMENT.parseString("x[]") + + +class Requirement(object): + """Parse a requirement. + + Parse a given requirement string into its parts, such as name, specifier, + URL, and extras. Raises InvalidRequirement on a badly-formed requirement + string. + """ + + # TODO: Can we test whether something is contained within a requirement? + # If so how do we do that? Do we need to test against the _name_ of + # the thing as well as the version? What about the markers? + # TODO: Can we normalize the name and extra name? + + def __init__(self, requirement_string): + try: + req = REQUIREMENT.parseString(requirement_string) + except ParseException as e: + raise InvalidRequirement( + "Invalid requirement, parse error at \"{0!r}\"".format( + requirement_string[e.loc:e.loc + 8])) + + self.name = req.name + if req.url: + parsed_url = urlparse.urlparse(req.url) + if not (parsed_url.scheme and parsed_url.netloc) or ( + not parsed_url.scheme and not parsed_url.netloc): + raise InvalidRequirement("Invalid URL given") + self.url = req.url + else: + self.url = None + self.extras = set(req.extras.asList() if req.extras else []) + self.specifier = SpecifierSet(req.specifier) + self.marker = req.marker if req.marker else None + + def __str__(self): + parts = [self.name] + + if self.extras: + parts.append("[{0}]".format(",".join(sorted(self.extras)))) + + if self.specifier: + parts.append(str(self.specifier)) + + if self.url: + parts.append("@ {0}".format(self.url)) + + if self.marker: + parts.append("; {0}".format(self.marker)) + + return "".join(parts) + + def __repr__(self): + return "".format(str(self)) diff --git a/pipenv/vendor/packaging/specifiers.py b/pipenv/vendor/packaging/specifiers.py new file mode 100644 index 0000000000..9b6353f052 --- /dev/null +++ b/pipenv/vendor/packaging/specifiers.py @@ -0,0 +1,774 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import abc +import functools +import itertools +import re + +from ._compat import string_types, with_metaclass +from .version import Version, LegacyVersion, parse + + +class InvalidSpecifier(ValueError): + """ + An invalid specifier was found, users should refer to PEP 440. + """ + + +class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): + + @abc.abstractmethod + def __str__(self): + """ + Returns the str representation of this Specifier like object. This + should be representative of the Specifier itself. + """ + + @abc.abstractmethod + def __hash__(self): + """ + Returns a hash value for this Specifier like object. + """ + + @abc.abstractmethod + def __eq__(self, other): + """ + Returns a boolean representing whether or not the two Specifier like + objects are equal. + """ + + @abc.abstractmethod + def __ne__(self, other): + """ + Returns a boolean representing whether or not the two Specifier like + objects are not equal. + """ + + @abc.abstractproperty + def prereleases(self): + """ + Returns whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @prereleases.setter + def prereleases(self, value): + """ + Sets whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @abc.abstractmethod + def contains(self, item, prereleases=None): + """ + Determines if the given item is contained within this specifier. + """ + + @abc.abstractmethod + def filter(self, iterable, prereleases=None): + """ + Takes an iterable of items and filters them so that only items which + are contained within this specifier are allowed in it. + """ + + +class _IndividualSpecifier(BaseSpecifier): + + _operators = {} + + def __init__(self, spec="", prereleases=None): + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec)) + + self._spec = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + def __repr__(self): + pre = ( + ", prereleases={0!r}".format(self.prereleases) + if self._prereleases is not None + else "" + ) + + return "<{0}({1!r}{2})>".format( + self.__class__.__name__, + str(self), + pre, + ) + + def __str__(self): + return "{0}{1}".format(*self._spec) + + def __hash__(self): + return hash(self._spec) + + def __eq__(self, other): + if isinstance(other, string_types): + try: + other = self.__class__(other) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._spec == other._spec + + def __ne__(self, other): + if isinstance(other, string_types): + try: + other = self.__class__(other) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._spec != other._spec + + def _get_operator(self, op): + return getattr(self, "_compare_{0}".format(self._operators[op])) + + def _coerce_version(self, version): + if not isinstance(version, (LegacyVersion, Version)): + version = parse(version) + return version + + @property + def operator(self): + return self._spec[0] + + @property + def version(self): + return self._spec[1] + + @property + def prereleases(self): + return self._prereleases + + @prereleases.setter + def prereleases(self, value): + self._prereleases = value + + def __contains__(self, item): + return self.contains(item) + + def contains(self, item, prereleases=None): + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version or LegacyVersion, this allows us to have + # a shortcut for ``"2.0" in Specifier(">=2") + item = self._coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + return self._get_operator(self.operator)(item, self.version) + + def filter(self, iterable, prereleases=None): + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = self._coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later incase nothing + # else matches this specifier. + if (parsed_version.is_prerelease and not + (prereleases or self.prereleases)): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the beginning. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version + + +class LegacySpecifier(_IndividualSpecifier): + + _regex_str = ( + r""" + (?P(==|!=|<=|>=|<|>)) + \s* + (?P + [^,;\s)]* # Since this is a "legacy" specifier, and the version + # string can be just about anything, we match everything + # except for whitespace, a semi-colon for marker support, + # a closing paren since versions can be enclosed in + # them, and a comma since it's a version separator. + ) + """ + ) + + _regex = re.compile( + r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + + _operators = { + "==": "equal", + "!=": "not_equal", + "<=": "less_than_equal", + ">=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + } + + def _coerce_version(self, version): + if not isinstance(version, LegacyVersion): + version = LegacyVersion(str(version)) + return version + + def _compare_equal(self, prospective, spec): + return prospective == self._coerce_version(spec) + + def _compare_not_equal(self, prospective, spec): + return prospective != self._coerce_version(spec) + + def _compare_less_than_equal(self, prospective, spec): + return prospective <= self._coerce_version(spec) + + def _compare_greater_than_equal(self, prospective, spec): + return prospective >= self._coerce_version(spec) + + def _compare_less_than(self, prospective, spec): + return prospective < self._coerce_version(spec) + + def _compare_greater_than(self, prospective, spec): + return prospective > self._coerce_version(spec) + + +def _require_version_compare(fn): + @functools.wraps(fn) + def wrapped(self, prospective, spec): + if not isinstance(prospective, Version): + return False + return fn(self, prospective, spec) + return wrapped + + +class Specifier(_IndividualSpecifier): + + _regex_str = ( + r""" + (?P(~=|==|!=|<=|>=|<|>|===)) + (?P + (?: + # The identity operators allow for an escape hatch that will + # do an exact string match of the version you wish to install. + # This will not be parsed by PEP 440 and we cannot determine + # any semantic meaning from it. This operator is discouraged + # but included entirely as an escape hatch. + (?<====) # Only match for the identity operator + \s* + [^\s]* # We just match everything, except for whitespace + # since we are only testing for strict identity. + ) + | + (?: + # The (non)equality operators allow for wild card and local + # versions to be specified so we have to define these two + # operators separately to enable that. + (?<===|!=) # Only match for equals and not equals + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + + # You cannot use a wild card and a dev or local version + # together so group them with a | and make them optional. + (?: + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local + | + \.\* # Wild card syntax of .* + )? + ) + | + (?: + # The compatible operator requires at least two digits in the + # release segment. + (?<=~=) # Only match for the compatible operator + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + | + (?: + # All other operators only allow a sub set of what the + # (non)equality operators do. Specifically they do not allow + # local versions to be specified nor do they allow the prefix + # matching wild cards. + (?=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + "===": "arbitrary", + } + + @_require_version_compare + def _compare_compatible(self, prospective, spec): + # Compatible releases have an equivalent combination of >= and ==. That + # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to + # implement this in terms of the other specifiers instead of + # implementing it ourselves. The only thing we need to do is construct + # the other specifiers. + + # We want everything but the last item in the version, but we want to + # ignore post and dev releases and we want to treat the pre-release as + # it's own separate segment. + prefix = ".".join( + list( + itertools.takewhile( + lambda x: (not x.startswith("post") and not + x.startswith("dev")), + _version_split(spec), + ) + )[:-1] + ) + + # Add the prefix notation to the end of our string + prefix += ".*" + + return (self._get_operator(">=")(prospective, spec) and + self._get_operator("==")(prospective, prefix)) + + @_require_version_compare + def _compare_equal(self, prospective, spec): + # We need special logic to handle prefix matching + if spec.endswith(".*"): + # In the case of prefix matching we want to ignore local segment. + prospective = Version(prospective.public) + # Split the spec out by dots, and pretend that there is an implicit + # dot in between a release segment and a pre-release segment. + spec = _version_split(spec[:-2]) # Remove the trailing .* + + # Split the prospective version out by dots, and pretend that there + # is an implicit dot in between a release segment and a pre-release + # segment. + prospective = _version_split(str(prospective)) + + # Shorten the prospective version to be the same length as the spec + # so that we can determine if the specifier is a prefix of the + # prospective version or not. + prospective = prospective[:len(spec)] + + # Pad out our two sides with zeros so that they both equal the same + # length. + spec, prospective = _pad_version(spec, prospective) + else: + # Convert our spec string into a Version + spec = Version(spec) + + # If the specifier does not have a local segment, then we want to + # act as if the prospective version also does not have a local + # segment. + if not spec.local: + prospective = Version(prospective.public) + + return prospective == spec + + @_require_version_compare + def _compare_not_equal(self, prospective, spec): + return not self._compare_equal(prospective, spec) + + @_require_version_compare + def _compare_less_than_equal(self, prospective, spec): + return prospective <= Version(spec) + + @_require_version_compare + def _compare_greater_than_equal(self, prospective, spec): + return prospective >= Version(spec) + + @_require_version_compare + def _compare_less_than(self, prospective, spec): + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec) + + # Check to see if the prospective version is less than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective < spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a pre-release version, that we do not accept pre-release + # versions for the version mentioned in the specifier (e.g. <3.1 should + # not match 3.1.dev0, but should match 3.0.dev0). + if not spec.is_prerelease and prospective.is_prerelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # less than the spec version *and* it's not a pre-release of the same + # version in the spec. + return True + + @_require_version_compare + def _compare_greater_than(self, prospective, spec): + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec) + + # Check to see if the prospective version is greater than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective > spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a post-release version, that we do not accept + # post-release versions for the version mentioned in the specifier + # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if not spec.is_postrelease and prospective.is_postrelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # Ensure that we do not allow a local version of the version mentioned + # in the specifier, which is techincally greater than, to match. + if prospective.local is not None: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # greater than the spec version *and* it's not a pre-release of the + # same version in the spec. + return True + + def _compare_arbitrary(self, prospective, spec): + return str(prospective).lower() == str(spec).lower() + + @property + def prereleases(self): + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "==="]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if parse(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value): + self._prereleases = value + + +_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") + + +def _version_split(version): + result = [] + for item in version.split("."): + match = _prefix_regex.search(item) + if match: + result.extend(match.groups()) + else: + result.append(item) + return result + + +def _pad_version(left, right): + left_split, right_split = [], [] + + # Get the release segment of our versions + left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) + right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) + + # Get the rest of our versions + left_split.append(left[len(left_split[0]):]) + right_split.append(right[len(right_split[0]):]) + + # Insert our padding + left_split.insert( + 1, + ["0"] * max(0, len(right_split[0]) - len(left_split[0])), + ) + right_split.insert( + 1, + ["0"] * max(0, len(left_split[0]) - len(right_split[0])), + ) + + return ( + list(itertools.chain(*left_split)), + list(itertools.chain(*right_split)), + ) + + +class SpecifierSet(BaseSpecifier): + + def __init__(self, specifiers="", prereleases=None): + # Split on , to break each indidivual specifier into it's own item, and + # strip each item to remove leading/trailing whitespace. + specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + + # Parsed each individual specifier, attempting first to make it a + # Specifier and falling back to a LegacySpecifier. + parsed = set() + for specifier in specifiers: + try: + parsed.add(Specifier(specifier)) + except InvalidSpecifier: + parsed.add(LegacySpecifier(specifier)) + + # Turn our parsed specifiers into a frozen set and save them for later. + self._specs = frozenset(parsed) + + # Store our prereleases value so we can use it later to determine if + # we accept prereleases or not. + self._prereleases = prereleases + + def __repr__(self): + pre = ( + ", prereleases={0!r}".format(self.prereleases) + if self._prereleases is not None + else "" + ) + + return "".format(str(self), pre) + + def __str__(self): + return ",".join(sorted(str(s) for s in self._specs)) + + def __hash__(self): + return hash(self._specs) + + def __and__(self, other): + if isinstance(other, string_types): + other = SpecifierSet(other) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + specifier = SpecifierSet() + specifier._specs = frozenset(self._specs | other._specs) + + if self._prereleases is None and other._prereleases is not None: + specifier._prereleases = other._prereleases + elif self._prereleases is not None and other._prereleases is None: + specifier._prereleases = self._prereleases + elif self._prereleases == other._prereleases: + specifier._prereleases = self._prereleases + else: + raise ValueError( + "Cannot combine SpecifierSets with True and False prerelease " + "overrides." + ) + + return specifier + + def __eq__(self, other): + if isinstance(other, string_types): + other = SpecifierSet(other) + elif isinstance(other, _IndividualSpecifier): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs == other._specs + + def __ne__(self, other): + if isinstance(other, string_types): + other = SpecifierSet(other) + elif isinstance(other, _IndividualSpecifier): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs != other._specs + + def __len__(self): + return len(self._specs) + + def __iter__(self): + return iter(self._specs) + + @property + def prereleases(self): + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value): + self._prereleases = value + + def __contains__(self, item): + return self.contains(item) + + def contains(self, item, prereleases=None): + # Ensure that our item is a Version or LegacyVersion instance. + if not isinstance(item, (LegacyVersion, Version)): + item = parse(item) + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # We can determine if we're going to allow pre-releases by looking to + # see if any of the underlying items supports them. If none of them do + # and this item is a pre-release then we do not allow it and we can + # short circuit that here. + # Note: This means that 1.0.dev1 would not be contained in something + # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 + if not prereleases and item.is_prerelease: + return False + + # We simply dispatch to the underlying specs here to make sure that the + # given version is contained within all of them. + # Note: This use of all() here means that an empty set of specifiers + # will always return True, this is an explicit design decision. + return all( + s.contains(item, prereleases=prereleases) + for s in self._specs + ) + + def filter(self, iterable, prereleases=None): + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # If we have any specifiers, then we want to wrap our iterable in the + # filter method for each one, this will act as a logical AND amongst + # each specifier. + if self._specs: + for spec in self._specs: + iterable = spec.filter(iterable, prereleases=bool(prereleases)) + return iterable + # If we do not have any specifiers, then we need to have a rough filter + # which will filter out any pre-releases, unless there are no final + # releases, and which will filter out LegacyVersion in general. + else: + filtered = [] + found_prereleases = [] + + for item in iterable: + # Ensure that we some kind of Version class for this item. + if not isinstance(item, (LegacyVersion, Version)): + parsed_version = parse(item) + else: + parsed_version = item + + # Filter out any item which is parsed as a LegacyVersion + if isinstance(parsed_version, LegacyVersion): + continue + + # Store any item which is a pre-release for later unless we've + # already found a final version or we are accepting prereleases + if parsed_version.is_prerelease and not prereleases: + if not filtered: + found_prereleases.append(item) + else: + filtered.append(item) + + # If we've found no items except for pre-releases, then we'll go + # ahead and use the pre-releases + if not filtered and found_prereleases and prereleases is None: + return found_prereleases + + return filtered diff --git a/pipenv/vendor/packaging/utils.py b/pipenv/vendor/packaging/utils.py new file mode 100644 index 0000000000..4b94a82fbb --- /dev/null +++ b/pipenv/vendor/packaging/utils.py @@ -0,0 +1,63 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import re + +from .version import InvalidVersion, Version + + +_canonicalize_regex = re.compile(r"[-_.]+") + + +def canonicalize_name(name): + # This is taken from PEP 503. + return _canonicalize_regex.sub("-", name).lower() + + +def canonicalize_version(version): + """ + This is very similar to Version.__str__, but has one subtle differences + with the way it handles the release segment. + """ + + try: + version = Version(version) + except InvalidVersion: + # Legacy versions cannot be normalized + return version + + parts = [] + + # Epoch + if version.epoch != 0: + parts.append("{0}!".format(version.epoch)) + + # Release segment + # NB: This strips trailing '.0's to normalize + parts.append( + re.sub( + r'(\.0)+$', + '', + ".".join(str(x) for x in version.release) + ) + ) + + # Pre-release + if version.pre is not None: + parts.append("".join(str(x) for x in version.pre)) + + # Post-release + if version.post is not None: + parts.append(".post{0}".format(version.post)) + + # Development release + if version.dev is not None: + parts.append(".dev{0}".format(version.dev)) + + # Local version segment + if version.local is not None: + parts.append("+{0}".format(version.local)) + + return "".join(parts) diff --git a/pipenv/vendor/packaging/version.py b/pipenv/vendor/packaging/version.py new file mode 100644 index 0000000000..6ed5cbbdc3 --- /dev/null +++ b/pipenv/vendor/packaging/version.py @@ -0,0 +1,441 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import collections +import itertools +import re + +from ._structures import Infinity + + +__all__ = [ + "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN" +] + + +_Version = collections.namedtuple( + "_Version", + ["epoch", "release", "dev", "pre", "post", "local"], +) + + +def parse(version): + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion(object): + + def __hash__(self): + return hash(self._key) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __ge__(self, other): + return self._compare(other, lambda s, o: s >= o) + + def __gt__(self, other): + return self._compare(other, lambda s, o: s > o) + + def __ne__(self, other): + return self._compare(other, lambda s, o: s != o) + + def _compare(self, other, method): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return method(self._key, other._key) + + +class LegacyVersion(_BaseVersion): + + def __init__(self, version): + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + def __str__(self): + return self._version + + def __repr__(self): + return "".format(repr(str(self))) + + @property + def public(self): + return self._version + + @property + def base_version(self): + return self._version + + @property + def epoch(self): + return -1 + + @property + def release(self): + return None + + @property + def pre(self): + return None + + @property + def post(self): + return None + + @property + def dev(self): + return None + + @property + def local(self): + return None + + @property + def is_prerelease(self): + return False + + @property + def is_postrelease(self): + return False + + @property + def is_devrelease(self): + return False + + +_legacy_version_component_re = re.compile( + r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, +) + +_legacy_version_replacement_map = { + "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", +} + + +def _parse_version_parts(s): + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version): + # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # it's adoption of the packaging library. + parts = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + parts = tuple(parts) + + return epoch, parts + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P

                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(
+        r"^\s*" + VERSION_PATTERN + r"\s*$",
+        re.VERBOSE | re.IGNORECASE,
+    )
+
+    def __init__(self, version):
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(
+                match.group("pre_l"),
+                match.group("pre_n"),
+            ),
+            post=_parse_letter_version(
+                match.group("post_l"),
+                match.group("post_n1") or match.group("post_n2"),
+            ),
+            dev=_parse_letter_version(
+                match.group("dev_l"),
+                match.group("dev_n"),
+            ),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self):
+        return "".format(repr(str(self)))
+
+    def __str__(self):
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append("{0}!".format(self.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(".post{0}".format(self.post))
+
+        # Development release
+        if self.dev is not None:
+            parts.append(".dev{0}".format(self.dev))
+
+        # Local version segment
+        if self.local is not None:
+            parts.append("+{0}".format(self.local))
+
+        return "".join(parts)
+
+    @property
+    def epoch(self):
+        return self._version.epoch
+
+    @property
+    def release(self):
+        return self._version.release
+
+    @property
+    def pre(self):
+        return self._version.pre
+
+    @property
+    def post(self):
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self):
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self):
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self):
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self):
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append("{0}!".format(self.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self):
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self):
+        return self.post is not None
+
+    @property
+    def is_devrelease(self):
+        return self.dev is not None
+
+
+def _parse_letter_version(letter, number):
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    release = tuple(
+        reversed(list(
+            itertools.dropwhile(
+                lambda x: x == 0,
+                reversed(release),
+            )
+        ))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        pre = -Infinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        pre = Infinity
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        post = -Infinity
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        dev = Infinity
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        local = -Infinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        local = tuple(
+            (i, "") if isinstance(i, int) else (-Infinity, i)
+            for i in local
+        )
+
+    return epoch, release, pre, post, dev, local
diff --git a/pipenv/vendor/pythonfinder/LICENSE.txt b/pipenv/vendor/pythonfinder/LICENSE.txt
new file mode 100644
index 0000000000..4715db36be
--- /dev/null
+++ b/pipenv/vendor/pythonfinder/LICENSE.txt
@@ -0,0 +1,20 @@
+Copyright (c) 2018 Dan Ryan
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/pipenv/vendor/pythonfinder/__init__.py b/pipenv/vendor/pythonfinder/__init__.py
new file mode 100644
index 0000000000..eead319863
--- /dev/null
+++ b/pipenv/vendor/pythonfinder/__init__.py
@@ -0,0 +1 @@
+__version__ = '0.0.5'
diff --git a/pipenv/vendor/pythonfinder/__main__.py b/pipenv/vendor/pythonfinder/__main__.py
new file mode 100644
index 0000000000..7a331e5017
--- /dev/null
+++ b/pipenv/vendor/pythonfinder/__main__.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+import os
+import sys
+
+PYTHONFINDER_MAIN = os.path.dirname(os.path.abspath(__file__))
+PYTHONFINDER_PACKAGE = os.path.dirname(PYTHONFINDER_MAIN)
+
+from pythonfinder import cli as cli
+
+if __name__ == '__main__':
+    sys.exit(cli())
diff --git a/pipenv/vendor/pythonfinder/_vendor/__init__.py b/pipenv/vendor/pythonfinder/_vendor/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/pipenv/vendor/pythonfinder/_vendor/pep514tools/__init__.py b/pipenv/vendor/pythonfinder/_vendor/pep514tools/__init__.py
new file mode 100644
index 0000000000..e7e7440873
--- /dev/null
+++ b/pipenv/vendor/pythonfinder/_vendor/pep514tools/__init__.py
@@ -0,0 +1,11 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Steve Dower
+# All rights reserved.
+#
+# Distributed under the terms of the MIT License
+#-------------------------------------------------------------------------
+
+__author__ = 'Steve Dower '
+__version__ = '0.1.0'
+
+from pythonfinder._vendor.pep514tools.environment import findall, find, findone
diff --git a/pipenv/vendor/pythonfinder/_vendor/pep514tools/__main__.py b/pipenv/vendor/pythonfinder/_vendor/pep514tools/__main__.py
new file mode 100644
index 0000000000..f554c16593
--- /dev/null
+++ b/pipenv/vendor/pythonfinder/_vendor/pep514tools/__main__.py
@@ -0,0 +1,7 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Steve Dower
+# All rights reserved.
+#
+# Distributed under the terms of the MIT License
+#-------------------------------------------------------------------------
+
diff --git a/pipenv/vendor/pythonfinder/_vendor/pep514tools/_registry.py b/pipenv/vendor/pythonfinder/_vendor/pep514tools/_registry.py
new file mode 100644
index 0000000000..2611f60127
--- /dev/null
+++ b/pipenv/vendor/pythonfinder/_vendor/pep514tools/_registry.py
@@ -0,0 +1,198 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Steve Dower
+# All rights reserved.
+#
+# Distributed under the terms of the MIT License
+#-------------------------------------------------------------------------
+
+__all__ = ['open_source', 'REGISTRY_SOURCE_LM', 'REGISTRY_SOURCE_LM_WOW6432', 'REGISTRY_SOURCE_CU']
+
+from itertools import count
+import re
+try:
+    import winreg
+except ImportError:
+    import _winreg as winreg
+
+REGISTRY_SOURCE_LM = 1
+REGISTRY_SOURCE_LM_WOW6432 = 2
+REGISTRY_SOURCE_CU = 3
+
+_REG_KEY_INFO = {
+    REGISTRY_SOURCE_LM: (winreg.HKEY_LOCAL_MACHINE, r'Software\Python', winreg.KEY_WOW64_64KEY),
+    REGISTRY_SOURCE_LM_WOW6432: (winreg.HKEY_LOCAL_MACHINE, r'Software\Python', winreg.KEY_WOW64_32KEY),
+    REGISTRY_SOURCE_CU: (winreg.HKEY_CURRENT_USER, r'Software\Python', 0),
+}
+
+def get_value_from_tuple(value, vtype):
+    if vtype == winreg.REG_SZ:
+        if '\0' in value:
+            return value[:value.index('\0')]
+        return value
+    return None
+
+def join(x, y):
+    return x + '\\' + y
+
+_VALID_ATTR = re.compile('^[a-z_]+$')
+_VALID_KEY = re.compile('^[A-Za-z]+$')
+_KEY_TO_ATTR = re.compile('([A-Z]+[a-z]+)')
+
+class PythonWrappedDict(object):
+    @staticmethod
+    def _attr_to_key(attr):
+        if not attr:
+            return ''
+        if not _VALID_ATTR.match(attr):
+            return attr
+        return ''.join(c.capitalize() for c in attr.split('_'))
+
+    @staticmethod
+    def _key_to_attr(key):
+        if not key:
+            return ''
+        if not _VALID_KEY.match(key):
+            return key
+        return '_'.join(k for k in _KEY_TO_ATTR.split(key) if k).lower()
+
+    def __init__(self, d):
+        self._d = d
+
+    def __getattr__(self, attr):
+        if attr.startswith('_'):
+            return object.__getattribute__(self, attr)
+
+        if attr == 'value':
+            attr = ''
+
+        key = self._attr_to_key(attr)
+        try:
+            return self._d[key]
+        except KeyError:
+            pass
+        except Exception:
+            raise AttributeError(attr)
+        raise AttributeError(attr)
+
+    def __setattr__(self, attr, value):
+        if attr.startswith('_'):
+            return object.__setattr__(self, attr, value)
+
+        if attr == 'value':
+            attr = ''
+        self._d[self._attr_to_key(attr)] = value
+
+    def __dir__(self):
+        k2a = self._key_to_attr
+        return list(map(k2a, self._d))
+
+    def _setdefault(self, key, value):
+        self._d.setdefault(key, value)
+
+    def _items(self):
+        return self._d.items()
+
+    def __repr__(self):
+        k2a = self._key_to_attr
+        return 'info(' + ', '.join('{}={!r}'.format(k2a(k), v) for k, v in self._d.items()) + ')'
+
+class RegistryAccessor(object):
+    def __init__(self, root, subkey, flags):
+        self._root = root
+        self.subkey = subkey
+        _, _, self.name = subkey.rpartition('\\')
+        self._flags = flags
+
+    def __iter__(self):
+        subkey_names = []
+        try:
+            with winreg.OpenKeyEx(self._root, self.subkey, 0, winreg.KEY_READ | self._flags) as key:
+                for i in count():
+                    subkey_names.append(winreg.EnumKey(key, i))
+        except OSError:
+            pass
+        return iter(self[k] for k in subkey_names)
+
+    def __getitem__(self, key):
+        return RegistryAccessor(self._root, join(self.subkey, key), self._flags)
+
+    def get_value(self, value_name):
+        try:
+            with winreg.OpenKeyEx(self._root, self.subkey, 0, winreg.KEY_READ | self._flags) as key:
+                return get_value_from_tuple(*winreg.QueryValueEx(key, value_name))
+        except OSError:
+            return None
+
+    def get_all_values(self):
+        schema = {}
+        for subkey in self:
+            schema[subkey.name] = subkey.get_all_values()
+
+        key = winreg.OpenKeyEx(self._root, self.subkey, 0, winreg.KEY_READ | self._flags)
+        try:
+            with key:
+                for i in count():
+                    vname, value, vtype = winreg.EnumValue(key, i)
+                    value = get_value_from_tuple(value, vtype)
+                    if value:
+                        schema[vname or ''] = value
+        except OSError:
+            pass
+
+        return PythonWrappedDict(schema)
+
+    def set_value(self, value_name, value):
+        with winreg.CreateKeyEx(self._root, self.subkey, 0, winreg.KEY_WRITE | self._flags) as key:
+            if value is None:
+                winreg.DeleteValue(key, value_name)
+            elif isinstance(value, str):
+                winreg.SetValueEx(key, value_name, 0, winreg.REG_SZ, value)
+            else:
+                raise TypeError('cannot write {} to registry'.format(type(value)))
+
+    def _set_all_values(self, rootkey, name, info, errors):
+        with winreg.CreateKeyEx(rootkey, name, 0, winreg.KEY_WRITE | self._flags) as key:
+            for k, v in info:
+                if isinstance(v, PythonWrappedDict):
+                    self._set_all_values(key, k, v._items(), errors)
+                elif isinstance(v, dict):
+                    self._set_all_values(key, k, v.items(), errors)
+                elif v is None:
+                    winreg.DeleteValue(key, k)
+                elif isinstance(v, str):
+                    winreg.SetValueEx(key, k, 0, winreg.REG_SZ, v)
+                else:
+                    errors.append('cannot write {} to registry'.format(type(v)))
+
+    def set_all_values(self, info):
+        errors = []
+        if isinstance(info, PythonWrappedDict):
+            items = info._items()
+        elif isinstance(info, dict):
+            items = info.items()
+        else:
+            raise TypeError('info must be a dictionary')
+        
+        self._set_all_values(self._root, self.subkey, items, errors)
+        if len(errors) == 1:
+            raise ValueError(errors[0])
+        elif errors:
+            raise ValueError(errors)
+
+    def delete(self):
+        for k in self:
+            k.delete()
+        try:
+            key = winreg.OpenKeyEx(self._root, None, 0, winreg.KEY_READ | self._flags)
+        except OSError:
+            return
+        with key:
+            winreg.DeleteKeyEx(key, self.subkey)
+
+
+def open_source(registry_source):
+    info = _REG_KEY_INFO.get(registry_source)
+    if not info:
+        raise ValueError("unsupported registry source")
+    root, subkey, flags = info
+    return RegistryAccessor(root, subkey, flags)
diff --git a/pipenv/vendor/pythonfinder/_vendor/pep514tools/environment.py b/pipenv/vendor/pythonfinder/_vendor/pep514tools/environment.py
new file mode 100644
index 0000000000..2c09ccbcab
--- /dev/null
+++ b/pipenv/vendor/pythonfinder/_vendor/pep514tools/environment.py
@@ -0,0 +1,123 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Steve Dower
+# All rights reserved.
+#
+# Distributed under the terms of the MIT License
+#-------------------------------------------------------------------------
+
+__all__ = ['Environment', 'findall', 'find', 'findone']
+
+from itertools import count
+from pythonfinder._vendor.pep514tools._registry import open_source, REGISTRY_SOURCE_LM, REGISTRY_SOURCE_LM_WOW6432, REGISTRY_SOURCE_CU
+import re
+import sys
+
+# These tags are treated specially when the Company is 'PythonCore'
+_PYTHONCORE_COMPATIBILITY_TAGS = {
+    '2.0', '2.1', '2.2', '2.3', '2.4', '2.5', '2.6', '2.7',
+    '3.0', '3.1', '3.2', '3.3', '3.4'
+}
+
+_IS_64BIT_OS = None
+def _is_64bit_os():
+    global _IS_64BIT_OS
+    if _IS_64BIT_OS is None:
+        if sys.maxsize > 2**32:
+            import platform
+            _IS_64BIT_OS = (platform.machine() == 'AMD64')
+        else:
+            _IS_64BIT_OS = False
+    return _IS_64BIT_OS
+
+class Environment(object):
+    def __init__(self, source, company, tag, guessed_arch=None):
+        self._source = source
+        self.company = company
+        self.tag = tag
+        self._guessed_arch = guessed_arch
+        self._orig_info = company, tag
+        self.info = {}
+
+    def load(self):
+        if not self._source:
+            raise ValueError('Environment not initialized with a source')
+        self.info = info = self._source[self.company][self.tag].get_all_values()
+        if self.company == 'PythonCore':
+            info._setdefault('DisplayName', 'Python ' + self.tag)
+            info._setdefault('SupportUrl', 'http://www.python.org/')
+            info._setdefault('Version', self.tag[:3])
+            info._setdefault('SysVersion', self.tag[:3])
+            if self._guessed_arch:
+                info._setdefault('SysArchitecture', self._guessed_arch)
+
+    def save(self, copy=False):
+        if not self._source:
+            raise ValueError('Environment not initialized with a source')
+        if (self.company, self.tag) != self._orig_info:
+            if not copy:
+                self._source[self._orig_info[0]][self._orig_info[1]].delete()
+            self._orig_info = self.company, self.tag
+
+        src = self._source[self.company][self.tag]
+        src.set_all_values(self.info)
+
+        self.info = src.get_all_values()
+
+    def delete(self):
+        if (self.company, self.tag) != self._orig_info:
+            raise ValueError("cannot delete Environment when company/tag have been modified")
+
+        if not self._source:
+            raise ValueError('Environment not initialized with a source')
+        self._source.delete()
+
+    def __repr__(self):
+        return ''.format(self.company, self.tag)
+
+def _get_sources(include_per_machine=True, include_per_user=True):
+    if _is_64bit_os():
+        if include_per_user:
+            yield open_source(REGISTRY_SOURCE_CU), None
+        if include_per_machine:
+            yield open_source(REGISTRY_SOURCE_LM), '64bit'
+            yield open_source(REGISTRY_SOURCE_LM_WOW6432), '32bit'
+    else:
+        if include_per_user:
+            yield open_source(REGISTRY_SOURCE_CU), '32bit'
+        if include_per_machine:
+            yield open_source(REGISTRY_SOURCE_LM), '32bit'
+
+def findall(include_per_machine=True, include_per_user=True):
+    for src, arch in _get_sources(include_per_machine=include_per_machine, include_per_user=include_per_user):
+        for company in src:
+            for tag in company:
+                try:
+                    env = Environment(src, company.name, tag.name, arch)
+                    env.load()
+                except OSError:
+                    pass
+                else:
+                    yield env
+
+def find(company_or_tag, tag=None, include_per_machine=True, include_per_user=True, maxcount=None):
+    if not tag:
+        env = Environment(None, 'PythonCore', company_or_tag)
+    else:
+        env = Environment(None, company_or_tag, tag)
+
+    results = []
+    for src, arch in _get_sources(include_per_machine=include_per_machine, include_per_user=include_per_user):
+        try:
+            env._source = src
+            env._guessed_arch = arch
+            env.load()
+        except OSError:
+            pass
+        else:
+            results.append(env)
+    return results
+
+def findone(company_or_tag, tag=None, include_per_machine=True, include_per_user=True):
+    found = find(company_or_tag, tag, include_per_machine, include_per_user, maxcount=1)
+    if found:
+        return found[0]
diff --git a/pipenv/vendor/pythonfinder/cli.py b/pipenv/vendor/pythonfinder/cli.py
new file mode 100644
index 0000000000..5ad13d257c
--- /dev/null
+++ b/pipenv/vendor/pythonfinder/cli.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+# -*- coding=utf-8 -*-
+
+import click
+import crayons
+import sys
+from . import __version__
+from .pythonfinder import PythonFinder
+
+
+# @click.group(invoke_without_command=True, context_settings=CONTEXT_SETTINGS)
+@click.command()
+@click.option('--find', default=False, nargs=1, help="Find a specific python version.")
+@click.option('--findall', is_flag=True, default=False, help="Find all python versions.")
+# @click.version_option(prog_name=crayons.normal('pyfinder', bold=True), version=__version__)
+@click.pass_context
+def cli(
+    ctx, find=False, findall=False
+):
+    if not find and not findall:
+        click.echo('Please provide a command', color='red')
+        sys.exit(1)
+    if find:
+        if any([find.startswith('{0}'.format(n)) for n in range(10)]):
+            found = PythonFinder.from_version(find.strip())
+        else:
+            found = PythonFinder.from_line()
+        if found:
+            click.echo('Found Python Version: {0}'.format(found), color='white')
+            sys.exit(0)
+    else:
+        #TODO: implement this
+        click.echo('This is not yet implemented')
+        sys.exit(0)
+    sys.exit()
+
+
+if __name__ == '__main__':
+    cli()
diff --git a/pipenv/vendor/pythonfinder/pythonfinder.py b/pipenv/vendor/pythonfinder/pythonfinder.py
new file mode 100644
index 0000000000..8620e2e683
--- /dev/null
+++ b/pipenv/vendor/pythonfinder/pythonfinder.py
@@ -0,0 +1,234 @@
+# -*- coding=utf-8 -*-
+import os
+import sys
+import delegator
+import platform
+from packaging.version import parse as parse_version
+from collections import defaultdict
+try:
+    from pathlib import Path
+except ImportError:
+    from pathlib2 import Path
+
+
+PYENV_INSTALLED = (bool(os.environ.get('PYENV_SHELL')) or bool(os.environ.get('PYENV_ROOT')))
+PYENV_ROOT = os.environ.get('PYENV_ROOT', os.path.expanduser('~/.pyenv'))
+IS_64BIT_OS = None
+SYSTEM_ARCH = platform.architecture()[0]
+
+if sys.maxsize > 2**32:
+    IS_64BIT_OS = (platform.machine() == 'AMD64')
+else:
+    IS_64BIT_OS = False
+
+
+def shellquote(s):
+    """Prepares a string for the shell (on Windows too!)
+
+    Only for use on grouped arguments (passed as a string to Popen)
+    """
+    if s is None:
+        return None
+    # Additional escaping for windows paths
+    if os.name == 'nt':
+        s = "{}".format(s.replace("\\", "\\\\"))
+
+    return '"' + s.replace("'", "'\\''") + '"'
+
+
+class PathFinder(object):
+    WHICH = {}
+
+    def __init__(self, path=None):
+        self.path = path if path else os.environ.get('PATH')
+        self._populate_which_dict()
+
+    @classmethod
+    def which(cls, cmd):
+        if not cls.WHICH:
+            cls._populate_which_dict()
+        return cls.WHICH.get(cmd, cmd)
+
+    @classmethod
+    def _populate_which_dict(cls):
+        for path in os.environ.get('PATH', '').split(os.pathsep):
+            path = os.path.expandvars(path)
+            files = os.listdir(path)
+            for fn in files:
+                full_path = os.sep.join([path, fn])
+                if os.access(full_path, os.X_OK) and not os.path.isdir(full_path):
+                    if not cls.WHICH.get(fn):
+                        cls.WHICH[fn] = full_path
+                    base_exec = os.path.splitext(fn)[0]
+                    if not cls.WHICH.get(base_exec):
+                        cls.WHICH[base_exec] = full_path
+
+
+class PythonFinder(PathFinder):
+    """Find pythons given a specific version, path, or nothing."""
+
+    PYENV_VERSIONS = {}
+    PYTHON_VERSIONS = {}
+    PYTHON_PATHS = {}
+    MAX_PYTHON = {}
+    PYTHON_ARCHS = defaultdict(dict)
+    WHICH_PYTHON = {}
+    RUNTIMES = ['python', 'pypy', 'ipy', 'jython', 'pyston']
+
+    def __init__(self, path=None, version=None, full_version=None):
+        self.version = version
+        self.full_version = full_version
+        super(PythonFinder, self).__init__(path=path)
+
+    @classmethod
+    def from_line(cls, python):
+        if os.path.isabs(python) and os.access(python, os.X_OK):
+            return python
+        if python.startswith('py'):
+            windows_finder = python.split()
+            if len(windows_finder) > 1 and windows_finder[0] == 'py' and windows_finder[1].startswith('-'):
+                version = windows_finder[1].strip('-').split()[0]
+                return cls.from_version(version)
+            return cls.WHICH_PYTHON.get(python) or cls.which(python)
+
+    @classmethod
+    def from_version(cls, version, architecture=None):
+        guess = cls.PYTHON_VERSIONS.get(cls.MAX_PYTHON.get(version, version))
+        if guess:
+            return guess
+        if os.name == 'nt':
+            path = cls.from_windows_finder(version, architecture)
+        else:
+            parsed_version = parse_version(version)
+            full_version = parsed_version.base_version
+            if PYENV_INSTALLED:
+                path = cls.from_pyenv(full_version)
+            else:
+                path = cls._crawl_path_for_version(full_version)
+        if path and not isinstance(path, Path):
+            path = Path(path)
+        return path
+
+    @classmethod
+    def from_windows_finder(cls, version=None, arch=None):
+        if not cls.PYTHON_VERSIONS:
+            cls._populate_windows_python_versions()
+        if arch:
+            return cls.PYTHON_ARCHS[version][arch]
+        return cls.PYTHON_VERSIONS[version]
+
+    @classmethod
+    def _populate_windows_python_versions(cls):
+        from pythonfinder._vendor.pep514tools import environment
+        versions = environment.findall()
+        path = None
+        for version_object in versions:
+            path = Path(version_object.info.install_path.__getattr__('')).joinpath('python.exe')
+            version = version_object.info.sys_version
+            full_version = version_object.info.version
+            architecture = getattr(version_object, 'sys_architecture', SYSTEM_ARCH)
+            for v in [version, full_version, architecture]:
+                if not cls.PYTHON_VERSIONS.get(v):
+                    cls.PYTHON_VERSIONS[v] = '{0}'.format(path)
+            cls.register_python(path, full_version, architecture)
+
+    @classmethod
+    def _populate_python_versions(cls):
+        import fnmatch
+        match_rules = ['*python', '*python?', '*python?.?', '*python?.?m']
+        runtime_execs = []
+        exts = list(filter(None, os.environ.get('PATHEXT', '').split(os.pathsep)))
+        for path in os.environ.get('PATH', '').split(os.pathsep):
+            path = os.path.expandvars(path)
+            from glob import glob
+            pythons = glob(os.sep.join([path, 'python*']))
+            execs = [match for rule in match_rules for match in fnmatch.filter(pythons, rule)]
+            for executable in execs:
+                exec_name = os.path.basename(executable)
+                if os.access(executable, os.X_OK):
+                    runtime_execs.append(executable)
+                if not cls.WHICH_PYTHON.get(exec_name):
+                    cls.WHICH_PYTHON[exec_name] = executable
+                for e in exts:
+                    pext = executable + e
+                    if os.access(pext, os.X_OK):
+                        runtime_execs.append(pext)
+        for python in runtime_execs:
+            version_cmd = '{0} -c "import sys; print(sys.version.split()[0])"'.format(shellquote(python))
+            version = delegator.run(version_cmd).out.strip()
+            cls.register_python(python, version)
+
+    @classmethod
+    def _crawl_path_for_version(cls, version):
+        if not cls.PYTHON_VERSIONS:
+            cls._populate_python_versions()
+        return cls.PYTHON_VERSIONS.get(version)
+
+    @classmethod
+    def from_pyenv(cls, version):
+        if not cls.PYENV_VERSIONS:
+            cls.populate_pyenv_runtimes()
+        return cls.PYENV_VERSIONS[version]
+
+    @classmethod
+    def register_python(cls, path, full_version, pre=False, pyenv=False, arch=None):
+        if not arch:
+            import platform
+            arch, _ = platform.architecture()
+        parsed_version = parse_version(full_version)
+        if isinstance(parsed_version._version, str):
+            if arch == SYSTEM_ARCH or SYSTEM_ARCH.startswith(str(arch)):
+                cls.PYTHON_VERSIONS[parsed_version._version] = path
+            cls.MAX_PYTHON[parsed_version._version] = parsed_version._version
+            cls.PYTHON_VERSION[parsed_version._version] = parsed_version._version
+            cls.PYTHON_PATHS[path] = parsed_version._version
+            cls.PYTHON_ARCHS[parsed_version._version][arch] = path
+            return
+        pre = pre or parsed_version.is_prerelease
+        major_minor = '.'.join(['{0}'.format(v) for v in parsed_version._version.release[:2]])
+        major = '{0}'.format(parsed_version._version.release[0])
+        cls.PYTHON_PATHS[path] = full_version
+        if not pre and parsed_version > parse_version(cls.MAX_PYTHON.get(major_minor, '0.0.0')):
+            if major_minor != full_version:
+                if parsed_version > parse_version(cls.MAX_PYTHON.get(full_version, '0.0.0')):
+                    cls.MAX_PYTHON[full_version] = parsed_version.base_version
+            cls.MAX_PYTHON[major_minor] = parsed_version.base_version
+            cls.PYTHON_VERSIONS[major_minor] = path
+            if arch == SYSTEM_ARCH or SYSTEM_ARCH.startswith(str(arch)):
+                if parsed_version > parse_version(cls.MAX_PYTHON.get(major, '0.0.0')):
+                    cls.MAX_PYTHON[major] = parsed_version.base_version
+                    cls.PYTHON_VERSIONS[major] = path
+        if not pyenv:
+            for v in [full_version, major_minor, major]:
+                if not cls.PYTHON_VERSIONS.get(v) or cls.MAX_PYTHON.get(v) == full_version:
+                    if cls.MAX_PYTHON.get(v) == full_version and not (arch == SYSTEM_ARCH or SYSTEM_ARCH.startswith(str(arch))):
+                        pass
+                    else:
+                        cls.PYTHON_VERSIONS[v] = path
+                if not cls.PYTHON_ARCHS.get(v, {}).get(arch):
+                    cls.PYTHON_ARCHS[v][arch] = path
+        else:
+            for v in [full_version, major_minor, major]:
+                if (not cls.PYENV_VERSIONS.get(v) and (v == major and not pre) or v != major) or cls.MAX_PYTHON.get(v) == full_version:
+                    cls.PYENV_VERSIONS[v] = path
+            if not cls.PYTHON_VERSIONS.get(full_version):
+                cls.PYTHON_VERSIONS[full_version] = path
+            if not cls.PYTHON_ARCHS.get(v, {}).get(arch):
+                    cls.PYTHON_ARCHS[v][arch] = path
+
+    @classmethod
+    def populate_pyenv_runtimes(cls):
+        from glob import glob
+        search_path = os.sep.join(['{0}'.format(PYENV_ROOT), 'versions', '*'])
+        runtimes = ['pypy', 'ipy', 'jython', 'pyston']
+        for pyenv_path in glob(search_path):
+            parsed_version = parse_version(os.path.basename(pyenv_path))
+            if parsed_version.is_prerelease and cls.PYENV_VERSIONS.get(parsed_version.base_version):
+                continue
+            bin_path = os.sep.join([pyenv_path, 'bin'])
+            runtime = os.sep.join([bin_path, 'python'])
+            if not os.path.exists(runtime):
+                exes = [os.sep.join([bin_path, exe]) for exe in runtimes if os.path.exists(os.sep.join([bin_path, exe]))]
+                if exes:
+                    runtime = exes[0]
+            cls.register_python(runtime, parsed_version.base_version, pre=parsed_version.is_prerelease, pyenv=True)
diff --git a/pipenv/vendor/requirementslib/LICENSE b/pipenv/vendor/requirementslib/LICENSE
new file mode 100644
index 0000000000..8c731e2798
--- /dev/null
+++ b/pipenv/vendor/requirementslib/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright 2018 Dan Ryan.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/pipenv/vendor/requirementslib/__init__.py b/pipenv/vendor/requirementslib/__init__.py
new file mode 100644
index 0000000000..cff986d48a
--- /dev/null
+++ b/pipenv/vendor/requirementslib/__init__.py
@@ -0,0 +1,4 @@
+# -*- coding=utf-8 -*-
+__version__ = "0.0.1"
+
+from .requirements import Requirement
diff --git a/pipenv/vendor/requirementslib/_compat.py b/pipenv/vendor/requirementslib/_compat.py
new file mode 100644
index 0000000000..e6de02bc1a
--- /dev/null
+++ b/pipenv/vendor/requirementslib/_compat.py
@@ -0,0 +1,40 @@
+# -*- coding=utf-8 -*-
+# -*- coding=utf-8 -*-
+import importlib
+
+
+def do_import(module_path, subimport=None, old_path=None):
+    internal = "pip._internal.{0}".format(module_path)
+    old_path = old_path or module_path
+    pip9 = "pip.{0}".format(old_path)
+    try:
+        _tmp = importlib.import_module(internal)
+    except ImportError:
+        _tmp = importlib.import_module(pip9)
+    if subimport:
+        return getattr(_tmp, subimport, _tmp)
+    return _tmp
+
+
+InstallRequirement = do_import("req.req_install", "InstallRequirement")
+parse_requirements = do_import("req.req_file", "parse_requirements")
+RequirementSet = do_import("req.req_set", "RequirementSet")
+user_cache_dir = do_import("utils.appdirs", "user_cache_dir")
+FAVORITE_HASH = do_import("utils.hashes", "FAVORITE_HASH")
+is_file_url = do_import("download", "is_file_url")
+url_to_path = do_import("download", "url_to_path")
+path_to_url = do_import("download", "path_to_url")
+is_archive_file = do_import("download", "is_archive_file")
+_strip_extras = do_import("req.req_install", "_strip_extras")
+PackageFinder = do_import("index", "PackageFinder")
+FormatControl = do_import("index", "FormatControl")
+Link = do_import("index", "Link")
+Wheel = do_import("wheel", "Wheel")
+Command = do_import("basecommand", "Command")
+cmdoptions = do_import("cmdoptions")
+get_installed_distributions = do_import(
+    "utils.misc", "get_installed_distributions", old_path="utils"
+)
+is_installable_file = do_import("utils.misc", "is_installable_file", old_path="utils")
+is_installable_dir = do_import("utils.misc", "is_installable_dir", old_path="utils")
+PyPI = do_import("models.index", "PyPI")
diff --git a/pipenv/requirements.py b/pipenv/vendor/requirementslib/requirements.py
similarity index 63%
rename from pipenv/requirements.py
rename to pipenv/vendor/requirementslib/requirements.py
index 616b7af4af..683e66fdab 100644
--- a/pipenv/requirements.py
+++ b/pipenv/vendor/requirementslib/requirements.py
@@ -6,28 +6,37 @@
 import os
 import requirements
 import six
-from .vendor.attr import attrs, attrib, Factory, validators
-from .vendor import attr
-from pip9.index import Link
-from pip9.download import path_to_url
-from pip9.req.req_install import _strip_extras
-from pip9._vendor.distlib.markers import Evaluator
-from pip9._vendor.packaging.markers import Marker, InvalidMarker
-from pip9._vendor.packaging.specifiers import SpecifierSet, InvalidSpecifier
-from .utils import SCHEME_LIST, VCS_LIST, is_installable_file, is_vcs, multi_split, get_converted_relative_path, is_star, is_valid_url, pep423_name
-from .vendor.first import first
+from attr import attrs, attrib, Factory, validators
+import attr
+from ._compat import Link, path_to_url, _strip_extras
+from distlib.markers import Evaluator
+from packaging.markers import Marker, InvalidMarker
+from packaging.specifiers import SpecifierSet, InvalidSpecifier
+from .utils import (
+    SCHEME_LIST,
+    VCS_LIST,
+    is_installable_file,
+    is_vcs,
+    is_valid_url,
+    pep423_name,
+    get_converted_relative_path,
+    multi_split,
+    is_star,
+)
+from first import first
 
 try:
     from pathlib import Path
 except ImportError:
     from pathlib2 import Path
-HASH_STRING = ' --hash={0}'
+
+HASH_STRING = " --hash={0}"
 
 
 def _strip_ssh_from_git_uri(uri):
     """Return git+ssh:// formatted URI to git+git@ format"""
     if isinstance(uri, six.string_types):
-        uri = uri.replace('git+ssh://', 'git+')
+        uri = uri.replace("git+ssh://", "git+")
     return uri
 
 
@@ -35,17 +44,17 @@ def _clean_git_uri(uri):
     """Cleans VCS uris from pip9 format"""
     if isinstance(uri, six.string_types):
         # Add scheme for parsing purposes, this is also what pip does
-        if uri.startswith('git+') and '://' not in uri:
-            uri = uri.replace('git+', 'git+ssh://')
+        if uri.startswith("git+") and "://" not in uri:
+            uri = uri.replace("git+", "git+ssh://")
     return uri
 
 
 def _split_markers(line):
     """Split markers from a dependency"""
     if not any(line.startswith(uri_prefix) for uri_prefix in SCHEME_LIST):
-        marker_sep = ';'
+        marker_sep = ";"
     else:
-        marker_sep = '; '
+        marker_sep = "; "
     markers = None
     if marker_sep in line:
         line, markers = line.split(marker_sep, 1)
@@ -55,39 +64,37 @@ def _split_markers(line):
 
 def _split_vcs_method(uri):
     """Split a vcs+uri formatted uri into (vcs, uri)"""
-    vcs_start = '{0}+'
-    vcs = first(
-        [vcs for vcs in VCS_LIST if uri.startswith(vcs_start.format(vcs))]
-    )
+    vcs_start = "{0}+"
+    vcs = first([vcs for vcs in VCS_LIST if uri.startswith(vcs_start.format(vcs))])
     if vcs:
-        vcs, uri = uri.split('+', 1)
+        vcs, uri = uri.split("+", 1)
     return vcs, uri
 
 
 def _validate_vcs(instance, attr_, value):
     if value not in VCS_LIST:
-        raise ValueError('Invalid vcs {0!r}'.format(value))
+        raise ValueError("Invalid vcs {0!r}".format(value))
 
 
 def _validate_path(instance, attr_, value):
     if not os.path.exists(value):
-        raise ValueError('Invalid path {0!r}', format(value))
+        raise ValueError("Invalid path {0!r}", format(value))
 
 
 def _validate_markers(instance, attr_, value):
     try:
-        Marker('{0}{1}'.format(attr_.name, value))
+        Marker("{0}{1}".format(attr_.name, value))
     except InvalidMarker:
-        raise ValueError('Invalid Marker {0}{1}'.format(attr_, value))
+        raise ValueError("Invalid Marker {0}{1}".format(attr_, value))
 
 
 def _validate_specifiers(instance, attr_, value):
-    if value == '':
+    if value == "":
         return True
     try:
         SpecifierSet(value)
     except InvalidMarker:
-        raise ValueError('Invalid Specifiers {0}'.format(value))
+        raise ValueError("Invalid Specifiers {0}".format(value))
 
 
 def _filter_none(k, v):
@@ -103,14 +110,13 @@ def _optional_instance_of(cls):
 @attrs
 class Source(object):
     # : URL to PyPI instance
-    url = attrib(default='')
+    url = attrib(default="")
     # : If False, skip SSL checks
     verify_ssl = attrib(
-        default=True,
-        validator=validators.optional(validators.instance_of(bool)),
+        default=True, validator=validators.optional(validators.instance_of(bool))
     )
     # : human name to refer to this source (can be referenced in packages or dev-packages)
-    name = attrib(default='')
+    name = attrib(default="")
 
 
 @six.add_metaclass(abc.ABCMeta)
@@ -142,9 +148,7 @@ def attr_fields(cls):
 @attrs
 class PipenvMarkers(BaseRequirement):
     """System-level requirements - see PEP508 for more detail"""
-    os_name = attrib(
-        default=None, validator=validators.optional(_validate_markers)
-    )
+    os_name = attrib(default=None, validator=validators.optional(_validate_markers))
     sys_platform = attrib(
         default=None, validator=validators.optional(_validate_markers)
     )
@@ -178,13 +182,16 @@ class PipenvMarkers(BaseRequirement):
 
     @property
     def line_part(self):
-        return ' and '.join(
-            ['{0} {1}'.format(k, v) for k, v in attr.asdict(self, filter=_filter_none).items()]
+        return " and ".join(
+            [
+                "{0} {1}".format(k, v)
+                for k, v in attr.asdict(self, filter=_filter_none).items()
+            ]
         )
 
     @property
     def pipfile_part(self):
-        return {'markers': self.as_line}
+        return {"markers": self.as_line}
 
     @classmethod
     def make_marker(cls, marker_string):
@@ -200,17 +207,17 @@ def make_marker(cls, marker_string):
 
     @classmethod
     def from_line(cls, line):
-        if ';' in line:
-            line = line.rsplit(';', 1)[1].strip()
+        if ";" in line:
+            line = line.rsplit(";", 1)[1].strip()
         marker_dict = cls.make_marker(line)
         return cls(**marker_dict)
 
     @classmethod
     def from_pipfile(cls, name, pipfile):
         found_keys = [k for k in pipfile.keys() if k in cls.attr_fields()]
-        marker_strings = ['{0} {1}'.format(k, pipfile[k]) for k in found_keys]
-        if pipfile.get('markers'):
-            marker_strings.append(pipfile.get('markers'))
+        marker_strings = ["{0} {1}".format(k, pipfile[k]) for k in found_keys]
+        if pipfile.get("markers"):
+            marker_strings.append(pipfile.get("markers"))
         markers = {}
         for marker in marker_strings:
             marker_dict = cls.make_marker(marker)
@@ -227,7 +234,7 @@ class NamedRequirement(BaseRequirement):
 
     @req.default
     def get_requirement(self):
-        return first(requirements.parse('{0}{1}'.format(self.name, self.version)))
+        return first(requirements.parse("{0}{1}".format(self.name, self.version)))
 
     @classmethod
     def from_line(cls, line):
@@ -240,24 +247,24 @@ def from_line(cls, line):
     @classmethod
     def from_pipfile(cls, name, pipfile):
         creation_args = {}
-        if hasattr(pipfile, 'keys'):
+        if hasattr(pipfile, "keys"):
             creation_args = {k: v for k, v in pipfile.items() if k in cls.attr_fields()}
-        creation_args['name'] = name
+        creation_args["name"] = name
         version = _get_version(pipfile)
-        creation_args['version'] = version
-        creation_args['req'] = first(requirements.parse('{0}{1}'.format(name, version)))
+        creation_args["version"] = version
+        creation_args["req"] = first(requirements.parse("{0}{1}".format(name, version)))
         return cls(**creation_args)
 
     @property
     def line_part(self):
-        return '{self.name}'.format(self=self)
+        return "{self.name}".format(self=self)
 
     @property
     def pipfile_part(self):
         pipfile_dict = attr.asdict(self, filter=_filter_none)
-        if 'version' not in pipfile_dict:
-            pipfile_dict['version'] = '*'
-        name = pipfile_dict.pop('name')
+        if "version" not in pipfile_dict:
+            pipfile_dict["version"] = "*"
+        name = pipfile_dict.pop("name")
         return {name: pipfile_dict}
 
 
@@ -278,52 +285,57 @@ class FileRequirement(BaseRequirement):
     @uri.default
     def get_uri(self):
         if self.path and not self.uri:
-            self._uri_scheme = 'path'
+            self._uri_scheme = "path"
             self.uri = path_to_url(os.path.abspath(self.path))
 
     @name.default
     def get_name(self):
         loc = self.path or self.uri
         if loc:
-            self._uri_scheme = 'path' if self.path else 'uri'
-        hashed_loc = hashlib.sha256(loc.encode('utf-8')).hexdigest()
+            self._uri_scheme = "path" if self.path else "uri"
+        hashed_loc = hashlib.sha256(loc.encode("utf-8")).hexdigest()
         hash_fragment = hashed_loc[-7:]
         self._has_hashed_name = True
         return hash_fragment
 
     @link.default
     def get_link(self):
-        target = '{0}#egg={1}'.format(self.uri, self.name)
+        target = "{0}#egg={1}".format(self.uri, self.name)
         return Link(target)
 
     @req.default
     def get_requirement(self):
-        base = '{0}'.format(self.link)
+        base = "{0}".format(self.link)
         req = first(requirements.parse(base))
         if self.editable:
             req.editable = True
-        if self.link and self.link.scheme.startswith('file'):
+        if self.link and self.link.scheme.startswith("file"):
             if self.path:
                 req.path = self.path
                 req.local_file = True
-                self._uri_scheme = 'file'
+                self._uri_scheme = "file"
                 req.uri = None
         req.link = self.link
         return req
 
     @property
     def is_remote_artifact(self):
-        return any(self.link.scheme.startswith(scheme) for scheme in ('http', 'https', 'ftp', 'ftps', 'uri')) and not self.req.local_file and (self.link.is_artifact or self.link.is_wheel) and not self.req.editable
+        return any(
+            self.link.scheme.startswith(scheme)
+            for scheme in ("http", "https", "ftp", "ftps", "uri")
+        ) and (
+            self.link.is_artifact or self.link.is_wheel
+        ) and not self.req.editable
 
     @classmethod
     def from_line(cls, line):
         link = None
         path = None
-        editable = line.startswith('-e ')
-        line = line.split(' ', 1)[1] if editable else line
+        editable = line.startswith("-e ")
+        line = line.split(" ", 1)[1] if editable else line
         if not any([is_installable_file(line), is_valid_url(line)]):
             raise ValueError(
-                'Supplied requirement is not installable: {0!r}'.format(line)
+                "Supplied requirement is not installable: {0!r}".format(line)
             )
 
         if is_valid_url(line):
@@ -331,59 +343,60 @@ def from_line(cls, line):
         else:
             _path = Path(line)
             link = Link(_path.absolute().as_uri())
-            if _path.is_absolute() or _path.as_posix() == '.':
+            if _path.is_absolute() or _path.as_posix() == ".":
                 path = _path.as_posix()
             else:
                 path = get_converted_relative_path(line)
         arg_dict = {
-            'path': path,
-            'uri': link.url_without_fragment,
-            'link': link,
-            'editable': editable
+            "path": path,
+            "uri": link.url_without_fragment,
+            "link": link,
+            "editable": editable,
         }
         if link.egg_fragment:
-            arg_dict['name'] = link.egg_fragment
+            arg_dict["name"] = link.egg_fragment
         created = cls(**arg_dict)
         return created
 
     @classmethod
     def from_pipfile(cls, name, pipfile):
-        uri_key = first((k for k in ['uri', 'file'] if k in pipfile))
-        uri = pipfile.get(uri_key, pipfile.get('path'))
+        uri_key = first((k for k in ["uri", "file"] if k in pipfile))
+        uri = pipfile.get(uri_key, pipfile.get("path"))
         if not uri_key:
             abs_path = os.path.abspath(uri)
             uri = path_to_url(abs_path) if os.path.exists(abs_path) else None
         link = Link(uri) if uri else None
         arg_dict = {
-            'name': name,
-            'path': pipfile.get('path'),
-            'uri': link.url_without_fragment,
-            'editable': pipfile.get('editable'),
-            'link': link
+            "name": name,
+            "path": pipfile.get("path"),
+            "uri": link.url_without_fragment,
+            "editable": pipfile.get("editable"),
+            "link": link,
         }
         return cls(**arg_dict)
 
     @property
     def line_part(self):
         seed = self.path or self.link.url or self.uri
-        editable = '-e ' if self.editable else ''
-        return '{0}{1}'.format(editable, seed)
+        editable = "-e " if self.editable else ""
+        return "{0}{1}".format(editable, seed)
 
     @property
     def pipfile_part(self):
         pipfile_dict = {k: v for k, v in attr.asdict(self, filter=_filter_none).items()}
-        name = pipfile_dict.pop('name')
+        name = pipfile_dict.pop("name")
         req = self.req
-        if self._has_hashed_name and self.is_remote_artifact:
-            dict_key = 'file'
+        # For local paths and remote installable artifacts (zipfiles, etc)
+        if self.is_remote_artifact:
+            dict_key = "file"
             # Look for uri first because file is a uri format and this is designed
             # to make sure we add file keys to the pipfile as a replacement of uri
-            target_keys = [k for k in pipfile_dict.keys() if k in ['uri', 'path']]
+            target_keys = [k for k in pipfile_dict.keys() if k in ["uri", "path"]]
             pipfile_dict[dict_key] = pipfile_dict.pop(first(target_keys))
             if len(target_keys) > 1:
                 _ = pipfile_dict.pop(target_keys[1])
         else:
-            collisions = [key for key in ['path', 'uri', 'file'] if key in pipfile_dict]
+            collisions = [key for key in ["path", "uri", "file"] if key in pipfile_dict]
             if len(collisions) > 1:
                 for k in collisions[1:]:
                     _ = pipfile_dict.pop(k)
@@ -402,42 +415,54 @@ class VCSRequirement(FileRequirement):
     name = attrib()
     link = attrib()
     req = attrib()
-    _INCLUDE_FIELDS = ('editable', 'uri', 'path', 'vcs', 'ref', 'subdirectory', 'name', 'link', 'req')
+    _INCLUDE_FIELDS = (
+        "editable", "uri", "path", "vcs", "ref", "subdirectory", "name", "link", "req"
+    )
 
     @link.default
     def get_link(self):
-        return build_vcs_link(self.vcs, _clean_git_uri(self.uri), name=self.name, ref=self.ref, subdirectory=self.subdirectory)
+        return build_vcs_link(
+            self.vcs,
+            _clean_git_uri(self.uri),
+            name=self.name,
+            ref=self.ref,
+            subdirectory=self.subdirectory,
+        )
 
     @name.default
     def get_name(self):
-        return self.link.egg_fragment or self.req.name if self.req else ''
+        return self.link.egg_fragment or self.req.name if self.req else ""
 
     @property
     def vcs_uri(self):
         uri = self.uri
-        if not any(uri.startswith('{0}+'.format(vcs)) for vcs in VCS_LIST):
-            uri = '{0}+{1}'.format(self.vcs, uri)
+        if not any(uri.startswith("{0}+".format(vcs)) for vcs in VCS_LIST):
+            uri = "{0}+{1}".format(self.vcs, uri)
         return uri
 
     @req.default
     def get_requirement(self):
-        prefix = '-e ' if self.editable else ''
-        line = '{0}{1}'.format(prefix, self.link.url)
+        prefix = "-e " if self.editable else ""
+        line = "{0}{1}".format(prefix, self.link.url)
         req = first(requirements.parse(line))
-        if self.path and self.link and self.link.scheme.startswith('file'):
+        if self.path and self.link and self.link.scheme.startswith("file"):
             req.local_file = True
             req.path = self.path
         if self.editable:
             req.editable = True
         req.link = self.link
-        if self.uri != self.link.url and 'git+ssh://' in self.link.url and 'git+git@' in self.uri:
+        if (
+            self.uri != self.link.url
+            and "git+ssh://" in self.link.url
+            and "git+git@" in self.uri
+        ):
             req.line = _strip_ssh_from_git_uri(req.line)
             req.uri = _strip_ssh_from_git_uri(req.uri)
         if not req.name:
             raise ValueError(
-                'pipenv requires an #egg fragment for version controlled '
-                'dependencies. Please install remote dependency '
-                'in the form {0}#egg=.'.format(req.uri)
+                "pipenv requires an #egg fragment for version controlled "
+                "dependencies. Please install remote dependency "
+                "in the form {0}#egg=.".format(req.uri)
             )
         if self.vcs and not req.vcs:
             req.vcs = self.vcs
@@ -450,30 +475,32 @@ def from_pipfile(cls, name, pipfile):
         creation_args = {}
         pipfile_keys = [
             k
-            for k in (
-                'ref', 'vcs', 'subdirectory', 'path', 'editable', 'file', 'uri'
-            ) +
-            VCS_LIST
+            for k in ("ref", "vcs", "subdirectory", "path", "editable", "file", "uri")
+            + VCS_LIST
             if k in pipfile
         ]
         for key in pipfile_keys:
             if key in VCS_LIST:
-                creation_args['vcs'] = key
-                composed_uri = _clean_git_uri('{0}+{1}'.format(key, pipfile.get(key))).lstrip('{0}+'.format(key))
+                creation_args["vcs"] = key
+                composed_uri = _clean_git_uri(
+                    "{0}+{1}".format(key, pipfile.get(key))
+                ).lstrip(
+                    "{0}+".format(key)
+                )
                 is_url = is_valid_url(pipfile.get(key)) or is_valid_url(composed_uri)
-                target_key = 'uri' if is_url else 'path'
+                target_key = "uri" if is_url else "path"
                 creation_args[target_key] = pipfile.get(key)
             else:
                 creation_args[key] = pipfile.get(key)
-        creation_args['name'] = name
+        creation_args["name"] = name
         return cls(**creation_args)
 
     @classmethod
     def from_line(cls, line, editable=None):
         path = None
-        if line.startswith('-e '):
+        if line.startswith("-e "):
             editable = True
-            line = line.split(' ', 1)[1]
+            line = line.split(" ", 1)[1]
         vcs_line = _clean_git_uri(line)
         vcs_method, vcs_location = _split_vcs_method(vcs_line)
         if not is_valid_url(vcs_location) and os.path.exists(vcs_location):
@@ -482,12 +509,12 @@ def from_line(cls, line, editable=None):
         link = Link(vcs_line)
         name = link.egg_fragment
         uri = link.url_without_fragment
-        if 'git+git@' in line:
+        if "git+git@" in line:
             uri = _strip_ssh_from_git_uri(uri)
         subdirectory = link.subdirectory_fragment
         ref = None
-        if '@' in link.show_url:
-            uri, ref = uri.rsplit('@', 1)
+        if "@" in link.show_url:
+            uri, ref = uri.rsplit("@", 1)
         return cls(
             name=name,
             ref=ref,
@@ -504,17 +531,17 @@ def line_part(self):
         """requirements.txt compatible line part sans-extras"""
         if self.req:
             return self.req.line
-        base = '{0}'.format(self.link)
+        base = "{0}".format(self.link)
         if self.editable:
-            base = '-e {0}'.format(base)
+            base = "-e {0}".format(base)
         return base
 
     @staticmethod
     def _choose_vcs_source(pipfile):
-        src_keys = [k for k in pipfile.keys() if k in ['path', 'uri', 'file']]
+        src_keys = [k for k in pipfile.keys() if k in ["path", "uri", "file"]]
         if src_keys:
             chosen_key = first(src_keys)
-            vcs_type = pipfile.pop('vcs')
+            vcs_type = pipfile.pop("vcs")
             _, pipfile_url = _split_vcs_method(pipfile.get(chosen_key))
             pipfile[vcs_type] = pipfile_url
             for removed in src_keys:
@@ -524,28 +551,24 @@ def _choose_vcs_source(pipfile):
     @property
     def pipfile_part(self):
         pipfile_dict = attr.asdict(self, filter=_filter_none).copy()
-        if 'vcs' in pipfile_dict:
+        if "vcs" in pipfile_dict:
             pipfile_dict = self._choose_vcs_source(pipfile_dict)
-        name = pipfile_dict.pop('name')
+        name = pipfile_dict.pop("name")
         return {name: pipfile_dict}
 
 
 @attrs
-class PipenvRequirement(object):
+class Requirement(object):
     name = attrib()
     vcs = attrib(default=None, validator=validators.optional(_validate_vcs))
-    req = attrib(
-        default=None, validator=_optional_instance_of(BaseRequirement)
-    )
+    req = attrib(default=None, validator=_optional_instance_of(BaseRequirement))
     markers = attrib(default=None)
-    specifiers = attrib(
-        validator=validators.optional(_validate_specifiers)
-    )
+    specifiers = attrib(validator=validators.optional(_validate_specifiers))
     index = attrib(default=None)
     editable = attrib(default=None)
     hashes = attrib(default=Factory(list), converter=list)
     extras = attrib(default=Factory(list))
-    _INCLUDE_FIELDS = ('name', 'markers', 'index', 'editable', 'hashes', 'extras')
+    _INCLUDE_FIELDS = ("name", "markers", "index", "editable", "hashes", "extras")
 
     @name.default
     def get_name(self):
@@ -558,23 +581,23 @@ def requirement(self):
     @property
     def hashes_as_pip(self):
         if self.hashes:
-            return ''.join([HASH_STRING.format(h) for h in self.hashes])
+            return "".join([HASH_STRING.format(h) for h in self.hashes])
 
-        return ''
+        return ""
 
     @property
     def markers_as_pip(self):
         if self.markers:
-            return '; {0}'.format(self.markers)
+            return "; {0}".format(self.markers)
 
-        return ''
+        return ""
 
     @property
     def extras_as_pip(self):
         if self.extras:
-            return '[{0}]'.format(','.join(self.extras))
+            return "[{0}]".format(",".join(self.extras))
 
-        return ''
+        return ""
 
     @specifiers.default
     def get_specifiers(self):
@@ -584,15 +607,15 @@ def get_specifiers(self):
 
     @property
     def is_vcs(self):
-        return isinstance(self, VCSRequirement)
+        return isinstance(self.req, VCSRequirement)
 
     @property
     def is_file_or_url(self):
-        return isinstance(self, FileRequirement)
+        return isinstance(self.req, FileRequirement)
 
     @property
     def is_named(self):
-        return isinstance(self, NamedRequirement)
+        return isinstance(self.req, NamedRequirement)
 
     @property
     def normalized_name(self):
@@ -603,106 +626,120 @@ def normalized_name(self):
     @classmethod
     def from_line(cls, line):
         hashes = None
-        if '--hash=' in line:
-            hashes = line.split(' --hash=')
+        if "--hash=" in line:
+            hashes = line.split(" --hash=")
             line, hashes = hashes[0], hashes[1:]
-        editable = line.startswith('-e ')
-        stripped_line = line.split(' ', 1)[1] if editable else line
+        editable = line.startswith("-e ")
+        stripped_line = line.split(" ", 1)[1] if editable else line
         line, markers = _split_markers(line)
         line, extras = _strip_extras(line)
         vcs = None
         # Installable local files and installable non-vcs urls are handled
         # as files, generally speaking
-        if is_installable_file(stripped_line) or (is_valid_url(stripped_line) and not is_vcs(stripped_line)):
+        if (
+            is_installable_file(stripped_line)
+            or (is_valid_url(stripped_line) and not is_vcs(stripped_line))
+        ):
             r = FileRequirement.from_line(line)
         elif is_vcs(stripped_line):
             r = VCSRequirement.from_line(line)
             vcs = r.vcs
         else:
-            name = multi_split(stripped_line, '!=<>~')[0]
+            name = multi_split(stripped_line, "!=<>~")[0]
             if not extras:
                 name, extras = _strip_extras(name)
             r = NamedRequirement.from_line(stripped_line)
         if extras:
             extras = first(
-                requirements.parse('fakepkg{0}'.format(_extras_to_string(extras)))
+                requirements.parse("fakepkg{0}".format(_extras_to_string(extras)))
             ).extras
             r.req.extras = extras
         if markers:
             r.req.markers = markers
         args = {
-            'name': r.name,
-            'vcs': vcs,
-            'req': r,
-            'markers': markers,
-            'editable': editable,
+            "name": r.name,
+            "vcs": vcs,
+            "req": r,
+            "markers": markers,
+            "editable": editable,
         }
         if extras:
-            args['extras'] = extras
+            args["extras"] = extras
         if hashes:
-            args['hashes'] = hashes
+            args["hashes"] = hashes
         return cls(**args)
 
     @classmethod
     def from_pipfile(cls, name, indexes, pipfile):
         _pipfile = {}
-        if hasattr(pipfile, 'keys'):
+        if hasattr(pipfile, "keys"):
             _pipfile = dict(pipfile).copy()
-        _pipfile['version'] = _get_version(pipfile)
+        _pipfile["version"] = _get_version(pipfile)
         vcs = first([vcs for vcs in VCS_LIST if vcs in _pipfile])
         if vcs:
-            _pipfile['vcs'] = vcs
+            _pipfile["vcs"] = vcs
             r = VCSRequirement.from_pipfile(name, pipfile)
-        elif any(key in _pipfile for key in ['path', 'file', 'uri']):
+        elif any(key in _pipfile for key in ["path", "file", "uri"]):
             r = FileRequirement.from_pipfile(name, pipfile)
         else:
             r = NamedRequirement.from_pipfile(name, pipfile)
         args = {
-            'name': r.name,
-            'vcs': vcs,
-            'req': r,
-            'markers': PipenvMarkers.from_pipfile(name, _pipfile).line_part,
-            'extras': _pipfile.get('extras'),
-            'editable': _pipfile.get('editable', False),
-            'index': _pipfile.get('index')
+            "name": r.name,
+            "vcs": vcs,
+            "req": r,
+            "markers": PipenvMarkers.from_pipfile(name, _pipfile).line_part,
+            "extras": _pipfile.get("extras"),
+            "editable": _pipfile.get("editable", False),
+            "index": _pipfile.get("index"),
         }
-        if any(key in _pipfile for key in ['hash', 'hashes']):
-            args['hashes'] = _pipfile.get('hashes', [pipfile.get('hash')])
+        if any(key in _pipfile for key in ["hash", "hashes"]):
+            args["hashes"] = _pipfile.get("hashes", [pipfile.get("hash")])
         return cls(**args)
 
     def as_line(self, include_index=False, project=None):
-        line = '{0}{1}{2}{3}{4}'.format(
+        line = "{0}{1}{2}{3}{4}".format(
             self.req.line_part,
             self.extras_as_pip,
-            self.specifiers if self.specifiers else '',
+            self.specifiers if self.specifiers else "",
             self.markers_as_pip,
             self.hashes_as_pip,
         )
         if include_index and not (self.requirement.local_file or self.vcs):
             from .utils import prepare_pip_source_args
+
             if self.index:
                 pip_src_args = [project.get_source(self.index)]
             else:
                 pip_src_args = project.sources
-            index_string = ' '.join(prepare_pip_source_args(pip_src_args))
-            line = '{0} {1}'.format(line, index_string)
+            index_string = " ".join(prepare_pip_source_args(pip_src_args))
+            line = "{0} {1}".format(line, index_string)
         return line
 
     def as_pipfile(self, include_index=False):
-        good_keys = ('hashes', 'extras', 'markers', 'editable', 'version', 'index') + VCS_LIST
-        req_dict = {k: v for k, v in attr.asdict(self, recurse=False, filter=_filter_none).items() if k in good_keys}
+        good_keys = (
+            "hashes", "extras", "markers", "editable", "version", "index"
+        ) + VCS_LIST
+        req_dict = {
+            k: v
+            for k, v in attr.asdict(self, recurse=False, filter=_filter_none).items()
+            if k in good_keys
+        }
         name = self.name
-        base_dict = {k: v for k, v in self.req.pipfile_part[name].items() if k not in ['req', 'link']}
+        base_dict = {
+            k: v
+            for k, v in self.req.pipfile_part[name].items()
+            if k not in ["req", "link"]
+        }
         base_dict.update(req_dict)
-        conflicting_keys = ('file', 'path', 'uri')
-        if 'file' in base_dict and any(k in base_dict for k in conflicting_keys[1:]):
+        conflicting_keys = ("file", "path", "uri")
+        if "file" in base_dict and any(k in base_dict for k in conflicting_keys[1:]):
             conflicts = [k for k in (conflicting_keys[1:],) if k in base_dict]
             for k in conflicts:
                 _ = base_dict.pop(k)
-        if 'hashes' in base_dict and len(base_dict['hashes']) == 1:
-            base_dict['hash'] = base_dict.pop('hashes')[0]
-        if len(base_dict.keys()) == 1 and 'version' in base_dict:
-            base_dict = base_dict.get('version')
+        if "hashes" in base_dict and len(base_dict["hashes"]) == 1:
+            base_dict["hash"] = base_dict.pop("hashes")[0]
+        if len(base_dict.keys()) == 1 and "version" in base_dict:
+            base_dict = base_dict.get("version")
         return {name: base_dict}
 
     @property
@@ -713,12 +750,12 @@ def pipfile_entry(self):
 def _extras_to_string(extras):
     """Turn a list of extras into a string"""
     if isinstance(extras, six.string_types):
-        if extras.startswith('['):
+        if extras.startswith("["):
             return extras
 
         else:
             extras = [extras]
-    return '[{0}]'.format(','.join(extras))
+    return "[{0}]".format(",".join(extras))
 
 
 def _specs_to_string(specs):
@@ -726,40 +763,38 @@ def _specs_to_string(specs):
     if specs:
         if isinstance(specs, six.string_types):
             return specs
-        return ','.join([''.join(spec) for spec in specs])
-    return ''
+        return ",".join(["".join(spec) for spec in specs])
+    return ""
 
 
-def build_vcs_link(
-    vcs, uri, name=None, ref=None, subdirectory=None, extras=None
-):
+def build_vcs_link(vcs, uri, name=None, ref=None, subdirectory=None, extras=None):
     if extras is None:
         extras = []
-    vcs_start = '{0}+'.format(vcs)
+    vcs_start = "{0}+".format(vcs)
     if not uri.startswith(vcs_start):
-        uri = '{0}{1}'.format(vcs_start, uri)
+        uri = "{0}{1}".format(vcs_start, uri)
     uri = _clean_git_uri(uri)
     if ref:
-        uri = '{0}@{1}'.format(uri, ref)
+        uri = "{0}@{1}".format(uri, ref)
     if name:
-        uri = '{0}#egg={1}'.format(uri, name)
+        uri = "{0}#egg={1}".format(uri, name)
         if extras:
             extras = _extras_to_string(extras)
-            uri = '{0}{1}'.format(uri, extras)
+            uri = "{0}{1}".format(uri, extras)
     if subdirectory:
-        uri = '{0}&subdirectory={1}'.format(uri, subdirectory)
+        uri = "{0}&subdirectory={1}".format(uri, subdirectory)
     return Link(uri)
 
 
 def _get_version(pipfile_entry):
-    if str(pipfile_entry) == '{}' or is_star(pipfile_entry):
-        return ''
+    if str(pipfile_entry) == "{}" or is_star(pipfile_entry):
+        return ""
 
-    elif hasattr(pipfile_entry, 'keys') and 'version' in pipfile_entry:
-        if is_star(pipfile_entry.get('version')):
-            return ''
-        return pipfile_entry.get('version', '')
+    elif hasattr(pipfile_entry, "keys") and "version" in pipfile_entry:
+        if is_star(pipfile_entry.get("version")):
+            return ""
+        return pipfile_entry.get("version", "")
 
     if isinstance(pipfile_entry, six.string_types):
         return pipfile_entry
-    return ''
+    return ""
diff --git a/pipenv/vendor/requirementslib/utils.py b/pipenv/vendor/requirementslib/utils.py
new file mode 100644
index 0000000000..1f815e1be5
--- /dev/null
+++ b/pipenv/vendor/requirementslib/utils.py
@@ -0,0 +1,103 @@
+# -*- coding=utf-8 -*-
+from __future__ import absolute_import
+import os
+import six
+
+try:
+    from urllib.parse import urlparse
+except ImportError:
+    from urlparse import urlparse
+
+try:
+    from pathlib import Path
+except ImportError:
+    from pathlib2 import Path
+
+VCS_LIST = ("git", "svn", "hg", "bzr")
+SCHEME_LIST = ("http://", "https://", "ftp://", "ftps://", "file://")
+
+
+def is_vcs(pipfile_entry):
+    import requirements
+    from .requirements import _clean_git_uri
+
+    """Determine if dictionary entry from Pipfile is for a vcs dependency."""
+    if hasattr(pipfile_entry, "keys"):
+        return any(key for key in pipfile_entry.keys() if key in VCS_LIST)
+
+    elif isinstance(pipfile_entry, six.string_types):
+        return bool(
+            requirements.requirement.VCS_REGEX.match(_clean_git_uri(pipfile_entry))
+        )
+
+    return False
+
+
+def get_converted_relative_path(path, relative_to=os.curdir):
+    """Given a vague relative path, return the path relative to the given location"""
+    return os.path.join(".", os.path.relpath(path, start=relative_to))
+
+
+def multi_split(s, split):
+    """Splits on multiple given separators."""
+    for r in split:
+        s = s.replace(r, "|")
+    return [i for i in s.split("|") if len(i) > 0]
+
+
+def is_star(val):
+    return isinstance(val, six.string_types) and val == "*"
+
+
+def is_installable_file(path):
+    """Determine if a path can potentially be installed"""
+    from ._compat import is_installable_dir, is_archive_file
+    from packaging import specifiers
+
+    if (
+        hasattr(path, "keys")
+        and any(key for key in path.keys() if key in ["file", "path"])
+    ):
+        path = urlparse(path["file"]).path if "file" in path else path["path"]
+    if not isinstance(path, six.string_types) or path == "*":
+        return False
+
+    # If the string starts with a valid specifier operator, test if it is a valid
+    # specifier set before making a path object (to avoid breaking windows)
+    if any(path.startswith(spec) for spec in "!=<>~"):
+        try:
+            specifiers.SpecifierSet(path)
+        # If this is not a valid specifier, just move on and try it as a path
+        except specifiers.InvalidSpecifier:
+            pass
+        else:
+            return False
+
+    if not os.path.exists(os.path.abspath(path)):
+        return False
+
+    lookup_path = Path(path)
+    absolute_path = "{0}".format(lookup_path.absolute())
+    if lookup_path.is_dir() and is_installable_dir(absolute_path):
+        return True
+
+    elif lookup_path.is_file() and is_archive_file(absolute_path):
+        return True
+
+    return False
+
+
+def is_valid_url(url):
+    """Checks if a given string is an url"""
+    pieces = urlparse(url)
+    return all([pieces.scheme, pieces.netloc])
+
+
+def pep423_name(name):
+    """Normalize package name to PEP 423 style standard."""
+    name = name.lower()
+    if any(i not in name for i in (VCS_LIST + SCHEME_LIST)):
+        return name.replace("_", "-")
+
+    else:
+        return name
diff --git a/tasks/vendoring/__init__.py b/tasks/vendoring/__init__.py
index 1d58badd99..9bd14deea3 100644
--- a/tasks/vendoring/__init__.py
+++ b/tasks/vendoring/__init__.py
@@ -45,6 +45,9 @@
     'pytoml': 'https://github.com/avakar/pytoml/raw/master/LICENSE',
     'webencodings': 'https://github.com/SimonSapin/python-webencodings/raw/'
                     'master/LICENSE',
+    'requirementslib': 'https://github.com/techalchemy/requirementslib/raw/master/LICENSE',
+    'distlib': 'https://github.com/vsajip/distlib/raw/master/LICENSE.txt',
+    'pythonfinder': 'https://raw.githubusercontent.com/techalchemy/pythonfinder/master/LICENSE.txt'
 }
 
 FILE_WHITE_LIST = (

From 56f3d6191e6065388bb2121b00c54de83642eb8c Mon Sep 17 00:00:00 2001
From: Dan Ryan 
Date: Thu, 10 May 2018 17:57:30 -0400
Subject: [PATCH 10/17] Use vendored requirementslib for requirements

- Also update patch for pip

Signed-off-by: Dan Ryan 
---
 pipenv/vendor/requirementslib/__init__.py |  2 +-
 pipenv/vendor/requirementslib/utils.py    | 29 +++++++++++++++++++++++
 2 files changed, 30 insertions(+), 1 deletion(-)

diff --git a/pipenv/vendor/requirementslib/__init__.py b/pipenv/vendor/requirementslib/__init__.py
index cff986d48a..ecd72ce604 100644
--- a/pipenv/vendor/requirementslib/__init__.py
+++ b/pipenv/vendor/requirementslib/__init__.py
@@ -1,4 +1,4 @@
 # -*- coding=utf-8 -*-
-__version__ = "0.0.1"
+__version__ = "0.0.2"
 
 from .requirements import Requirement
diff --git a/pipenv/vendor/requirementslib/utils.py b/pipenv/vendor/requirementslib/utils.py
index 1f815e1be5..b9358a35b5 100644
--- a/pipenv/vendor/requirementslib/utils.py
+++ b/pipenv/vendor/requirementslib/utils.py
@@ -101,3 +101,32 @@ def pep423_name(name):
 
     else:
         return name
+
+
+def prepare_pip_source_args(sources, pip_args=None):
+    if pip_args is None:
+        pip_args = []
+    if sources:
+        # Add the source to pip9.
+        pip_args.extend(['-i', sources[0]['url']])
+        # Trust the host if it's not verified.
+        if not sources[0].get('verify_ssl', True):
+            pip_args.extend(
+                [
+                    '--trusted-host',
+                    urlparse(sources[0]['url']).netloc.split(':')[0],
+                ]
+            )
+        # Add additional sources as extra indexes.
+        if len(sources) > 1:
+            for source in sources[1:]:
+                pip_args.extend(['--extra-index-url', source['url']])
+                # Trust the host if it's not verified.
+                if not source.get('verify_ssl', True):
+                    pip_args.extend(
+                        [
+                            '--trusted-host',
+                            urlparse(source['url']).hostname,
+                        ]
+                    )
+    return pip_args

From c58bf55385ccdf1833701f16ff2ee7e1ebe3c1a9 Mon Sep 17 00:00:00 2001
From: Dan Ryan 
Date: Thu, 10 May 2018 22:14:41 -0400
Subject: [PATCH 11/17] Vendor pyparsing for packaging

Signed-off-by: Dan Ryan 
---
 pipenv/vendor/pyparsing.LICENSE |   18 +
 pipenv/vendor/pyparsing.py      | 5720 +++++++++++++++++++++++++++++++
 pipenv/vendor/vendor.txt        |    1 +
 3 files changed, 5739 insertions(+)
 create mode 100644 pipenv/vendor/pyparsing.LICENSE
 create mode 100644 pipenv/vendor/pyparsing.py

diff --git a/pipenv/vendor/pyparsing.LICENSE b/pipenv/vendor/pyparsing.LICENSE
new file mode 100644
index 0000000000..bbc959e0d6
--- /dev/null
+++ b/pipenv/vendor/pyparsing.LICENSE
@@ -0,0 +1,18 @@
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/pipenv/vendor/pyparsing.py b/pipenv/vendor/pyparsing.py
new file mode 100644
index 0000000000..69be39f6ac
--- /dev/null
+++ b/pipenv/vendor/pyparsing.py
@@ -0,0 +1,5720 @@
+# module pyparsing.py
+#
+# Copyright (c) 2003-2016  Paul T. McGuire
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__doc__ = \
+"""
+pyparsing module - Classes and methods to define and execute parsing grammars
+
+The pyparsing module is an alternative approach to creating and executing simple grammars,
+vs. the traditional lex/yacc approach, or the use of regular expressions.  With pyparsing, you
+don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
+provides a library of classes that you use to construct the grammar directly in Python.
+
+Here is a program to parse "Hello, World!" (or any greeting of the form 
+C{", !"}), built up using L{Word}, L{Literal}, and L{And} elements 
+(L{'+'} operator gives L{And} expressions, strings are auto-converted to
+L{Literal} expressions)::
+
+    from pyparsing import Word, alphas
+
+    # define grammar of a greeting
+    greet = Word(alphas) + "," + Word(alphas) + "!"
+
+    hello = "Hello, World!"
+    print (hello, "->", greet.parseString(hello))
+
+The program outputs the following::
+
+    Hello, World! -> ['Hello', ',', 'World', '!']
+
+The Python representation of the grammar is quite readable, owing to the self-explanatory
+class names, and the use of '+', '|' and '^' operators.
+
+The L{ParseResults} object returned from L{ParserElement.parseString} can be accessed as a nested list, a dictionary, or an
+object with named attributes.
+
+The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
+ - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello  ,  World  !", etc.)
+ - quoted strings
+ - embedded comments
+"""
+
+__version__ = "2.2.0"
+__versionTime__ = "06 Mar 2017 02:06 UTC"
+__author__ = "Paul McGuire "
+
+import string
+from weakref import ref as wkref
+import copy
+import sys
+import warnings
+import re
+import sre_constants
+import collections
+import pprint
+import traceback
+import types
+from datetime import datetime
+
+try:
+    from _thread import RLock
+except ImportError:
+    from threading import RLock
+
+try:
+    from collections import OrderedDict as _OrderedDict
+except ImportError:
+    try:
+        from ordereddict import OrderedDict as _OrderedDict
+    except ImportError:
+        _OrderedDict = None
+
+#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
+
+__all__ = [
+'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
+'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
+'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
+'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
+'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
+'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 
+'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
+'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
+'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
+'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
+'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
+'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
+'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
+'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', 
+'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
+'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
+'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
+'CloseMatch', 'tokenMap', 'pyparsing_common',
+]
+
+system_version = tuple(sys.version_info)[:3]
+PY_3 = system_version[0] == 3
+if PY_3:
+    _MAX_INT = sys.maxsize
+    basestring = str
+    unichr = chr
+    _ustr = str
+
+    # build list of single arg builtins, that can be used as parse actions
+    singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
+
+else:
+    _MAX_INT = sys.maxint
+    range = xrange
+
+    def _ustr(obj):
+        """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
+           str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
+           then < returns the unicode object | encodes it with the default encoding | ... >.
+        """
+        if isinstance(obj,unicode):
+            return obj
+
+        try:
+            # If this works, then _ustr(obj) has the same behaviour as str(obj), so
+            # it won't break any existing code.
+            return str(obj)
+
+        except UnicodeEncodeError:
+            # Else encode it
+            ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
+            xmlcharref = Regex(r'&#\d+;')
+            xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
+            return xmlcharref.transformString(ret)
+
+    # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
+    singleArgBuiltins = []
+    import __builtin__
+    for fname in "sum len sorted reversed list tuple set any all min max".split():
+        try:
+            singleArgBuiltins.append(getattr(__builtin__,fname))
+        except AttributeError:
+            continue
+            
+_generatorType = type((y for y in range(1)))
+ 
+def _xml_escape(data):
+    """Escape &, <, >, ", ', etc. in a string of data."""
+
+    # ampersand must be replaced first
+    from_symbols = '&><"\''
+    to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
+    for from_,to_ in zip(from_symbols, to_symbols):
+        data = data.replace(from_, to_)
+    return data
+
+class _Constants(object):
+    pass
+
+alphas     = string.ascii_uppercase + string.ascii_lowercase
+nums       = "0123456789"
+hexnums    = nums + "ABCDEFabcdef"
+alphanums  = alphas + nums
+_bslash    = chr(92)
+printables = "".join(c for c in string.printable if c not in string.whitespace)
+
+class ParseBaseException(Exception):
+    """base exception class for all parsing runtime exceptions"""
+    # Performance tuning: we construct a *lot* of these, so keep this
+    # constructor as small and fast as possible
+    def __init__( self, pstr, loc=0, msg=None, elem=None ):
+        self.loc = loc
+        if msg is None:
+            self.msg = pstr
+            self.pstr = ""
+        else:
+            self.msg = msg
+            self.pstr = pstr
+        self.parserElement = elem
+        self.args = (pstr, loc, msg)
+
+    @classmethod
+    def _from_exception(cls, pe):
+        """
+        internal factory method to simplify creating one type of ParseException 
+        from another - avoids having __init__ signature conflicts among subclasses
+        """
+        return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
+
+    def __getattr__( self, aname ):
+        """supported attributes by name are:
+            - lineno - returns the line number of the exception text
+            - col - returns the column number of the exception text
+            - line - returns the line containing the exception text
+        """
+        if( aname == "lineno" ):
+            return lineno( self.loc, self.pstr )
+        elif( aname in ("col", "column") ):
+            return col( self.loc, self.pstr )
+        elif( aname == "line" ):
+            return line( self.loc, self.pstr )
+        else:
+            raise AttributeError(aname)
+
+    def __str__( self ):
+        return "%s (at char %d), (line:%d, col:%d)" % \
+                ( self.msg, self.loc, self.lineno, self.column )
+    def __repr__( self ):
+        return _ustr(self)
+    def markInputline( self, markerString = ">!<" ):
+        """Extracts the exception line from the input string, and marks
+           the location of the exception with a special symbol.
+        """
+        line_str = self.line
+        line_column = self.column - 1
+        if markerString:
+            line_str = "".join((line_str[:line_column],
+                                markerString, line_str[line_column:]))
+        return line_str.strip()
+    def __dir__(self):
+        return "lineno col line".split() + dir(type(self))
+
+class ParseException(ParseBaseException):
+    """
+    Exception thrown when parse expressions don't match class;
+    supported attributes by name are:
+     - lineno - returns the line number of the exception text
+     - col - returns the column number of the exception text
+     - line - returns the line containing the exception text
+        
+    Example::
+        try:
+            Word(nums).setName("integer").parseString("ABC")
+        except ParseException as pe:
+            print(pe)
+            print("column: {}".format(pe.col))
+            
+    prints::
+       Expected integer (at char 0), (line:1, col:1)
+        column: 1
+    """
+    pass
+
+class ParseFatalException(ParseBaseException):
+    """user-throwable exception thrown when inconsistent parse content
+       is found; stops all parsing immediately"""
+    pass
+
+class ParseSyntaxException(ParseFatalException):
+    """just like L{ParseFatalException}, but thrown internally when an
+       L{ErrorStop} ('-' operator) indicates that parsing is to stop 
+       immediately because an unbacktrackable syntax error has been found"""
+    pass
+
+#~ class ReparseException(ParseBaseException):
+    #~ """Experimental class - parse actions can raise this exception to cause
+       #~ pyparsing to reparse the input string:
+        #~ - with a modified input string, and/or
+        #~ - with a modified start location
+       #~ Set the values of the ReparseException in the constructor, and raise the
+       #~ exception in a parse action to cause pyparsing to use the new string/location.
+       #~ Setting the values as None causes no change to be made.
+       #~ """
+    #~ def __init_( self, newstring, restartLoc ):
+        #~ self.newParseText = newstring
+        #~ self.reparseLoc = restartLoc
+
+class RecursiveGrammarException(Exception):
+    """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
+    def __init__( self, parseElementList ):
+        self.parseElementTrace = parseElementList
+
+    def __str__( self ):
+        return "RecursiveGrammarException: %s" % self.parseElementTrace
+
+class _ParseResultsWithOffset(object):
+    def __init__(self,p1,p2):
+        self.tup = (p1,p2)
+    def __getitem__(self,i):
+        return self.tup[i]
+    def __repr__(self):
+        return repr(self.tup[0])
+    def setOffset(self,i):
+        self.tup = (self.tup[0],i)
+
+class ParseResults(object):
+    """
+    Structured parse results, to provide multiple means of access to the parsed data:
+       - as a list (C{len(results)})
+       - by list index (C{results[0], results[1]}, etc.)
+       - by attribute (C{results.} - see L{ParserElement.setResultsName})
+
+    Example::
+        integer = Word(nums)
+        date_str = (integer.setResultsName("year") + '/' 
+                        + integer.setResultsName("month") + '/' 
+                        + integer.setResultsName("day"))
+        # equivalent form:
+        # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+        # parseString returns a ParseResults object
+        result = date_str.parseString("1999/12/31")
+
+        def test(s, fn=repr):
+            print("%s -> %s" % (s, fn(eval(s))))
+        test("list(result)")
+        test("result[0]")
+        test("result['month']")
+        test("result.day")
+        test("'month' in result")
+        test("'minutes' in result")
+        test("result.dump()", str)
+    prints::
+        list(result) -> ['1999', '/', '12', '/', '31']
+        result[0] -> '1999'
+        result['month'] -> '12'
+        result.day -> '31'
+        'month' in result -> True
+        'minutes' in result -> False
+        result.dump() -> ['1999', '/', '12', '/', '31']
+        - day: 31
+        - month: 12
+        - year: 1999
+    """
+    def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
+        if isinstance(toklist, cls):
+            return toklist
+        retobj = object.__new__(cls)
+        retobj.__doinit = True
+        return retobj
+
+    # Performance tuning: we construct a *lot* of these, so keep this
+    # constructor as small and fast as possible
+    def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
+        if self.__doinit:
+            self.__doinit = False
+            self.__name = None
+            self.__parent = None
+            self.__accumNames = {}
+            self.__asList = asList
+            self.__modal = modal
+            if toklist is None:
+                toklist = []
+            if isinstance(toklist, list):
+                self.__toklist = toklist[:]
+            elif isinstance(toklist, _generatorType):
+                self.__toklist = list(toklist)
+            else:
+                self.__toklist = [toklist]
+            self.__tokdict = dict()
+
+        if name is not None and name:
+            if not modal:
+                self.__accumNames[name] = 0
+            if isinstance(name,int):
+                name = _ustr(name) # will always return a str, but use _ustr for consistency
+            self.__name = name
+            if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
+                if isinstance(toklist,basestring):
+                    toklist = [ toklist ]
+                if asList:
+                    if isinstance(toklist,ParseResults):
+                        self[name] = _ParseResultsWithOffset(toklist.copy(),0)
+                    else:
+                        self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
+                    self[name].__name = name
+                else:
+                    try:
+                        self[name] = toklist[0]
+                    except (KeyError,TypeError,IndexError):
+                        self[name] = toklist
+
+    def __getitem__( self, i ):
+        if isinstance( i, (int,slice) ):
+            return self.__toklist[i]
+        else:
+            if i not in self.__accumNames:
+                return self.__tokdict[i][-1][0]
+            else:
+                return ParseResults([ v[0] for v in self.__tokdict[i] ])
+
+    def __setitem__( self, k, v, isinstance=isinstance ):
+        if isinstance(v,_ParseResultsWithOffset):
+            self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
+            sub = v[0]
+        elif isinstance(k,(int,slice)):
+            self.__toklist[k] = v
+            sub = v
+        else:
+            self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
+            sub = v
+        if isinstance(sub,ParseResults):
+            sub.__parent = wkref(self)
+
+    def __delitem__( self, i ):
+        if isinstance(i,(int,slice)):
+            mylen = len( self.__toklist )
+            del self.__toklist[i]
+
+            # convert int to slice
+            if isinstance(i, int):
+                if i < 0:
+                    i += mylen
+                i = slice(i, i+1)
+            # get removed indices
+            removed = list(range(*i.indices(mylen)))
+            removed.reverse()
+            # fixup indices in token dictionary
+            for name,occurrences in self.__tokdict.items():
+                for j in removed:
+                    for k, (value, position) in enumerate(occurrences):
+                        occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
+        else:
+            del self.__tokdict[i]
+
+    def __contains__( self, k ):
+        return k in self.__tokdict
+
+    def __len__( self ): return len( self.__toklist )
+    def __bool__(self): return ( not not self.__toklist )
+    __nonzero__ = __bool__
+    def __iter__( self ): return iter( self.__toklist )
+    def __reversed__( self ): return iter( self.__toklist[::-1] )
+    def _iterkeys( self ):
+        if hasattr(self.__tokdict, "iterkeys"):
+            return self.__tokdict.iterkeys()
+        else:
+            return iter(self.__tokdict)
+
+    def _itervalues( self ):
+        return (self[k] for k in self._iterkeys())
+            
+    def _iteritems( self ):
+        return ((k, self[k]) for k in self._iterkeys())
+
+    if PY_3:
+        keys = _iterkeys       
+        """Returns an iterator of all named result keys (Python 3.x only)."""
+
+        values = _itervalues
+        """Returns an iterator of all named result values (Python 3.x only)."""
+
+        items = _iteritems
+        """Returns an iterator of all named result key-value tuples (Python 3.x only)."""
+
+    else:
+        iterkeys = _iterkeys
+        """Returns an iterator of all named result keys (Python 2.x only)."""
+
+        itervalues = _itervalues
+        """Returns an iterator of all named result values (Python 2.x only)."""
+
+        iteritems = _iteritems
+        """Returns an iterator of all named result key-value tuples (Python 2.x only)."""
+
+        def keys( self ):
+            """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
+            return list(self.iterkeys())
+
+        def values( self ):
+            """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
+            return list(self.itervalues())
+                
+        def items( self ):
+            """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
+            return list(self.iteritems())
+
+    def haskeys( self ):
+        """Since keys() returns an iterator, this method is helpful in bypassing
+           code that looks for the existence of any defined results names."""
+        return bool(self.__tokdict)
+        
+    def pop( self, *args, **kwargs):
+        """
+        Removes and returns item at specified index (default=C{last}).
+        Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
+        argument or an integer argument, it will use C{list} semantics
+        and pop tokens from the list of parsed tokens. If passed a 
+        non-integer argument (most likely a string), it will use C{dict}
+        semantics and pop the corresponding value from any defined 
+        results names. A second default return value argument is 
+        supported, just as in C{dict.pop()}.
+
+        Example::
+            def remove_first(tokens):
+                tokens.pop(0)
+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+            print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
+
+            label = Word(alphas)
+            patt = label("LABEL") + OneOrMore(Word(nums))
+            print(patt.parseString("AAB 123 321").dump())
+
+            # Use pop() in a parse action to remove named result (note that corresponding value is not
+            # removed from list form of results)
+            def remove_LABEL(tokens):
+                tokens.pop("LABEL")
+                return tokens
+            patt.addParseAction(remove_LABEL)
+            print(patt.parseString("AAB 123 321").dump())
+        prints::
+            ['AAB', '123', '321']
+            - LABEL: AAB
+
+            ['AAB', '123', '321']
+        """
+        if not args:
+            args = [-1]
+        for k,v in kwargs.items():
+            if k == 'default':
+                args = (args[0], v)
+            else:
+                raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
+        if (isinstance(args[0], int) or 
+                        len(args) == 1 or 
+                        args[0] in self):
+            index = args[0]
+            ret = self[index]
+            del self[index]
+            return ret
+        else:
+            defaultvalue = args[1]
+            return defaultvalue
+
+    def get(self, key, defaultValue=None):
+        """
+        Returns named result matching the given key, or if there is no
+        such name, then returns the given C{defaultValue} or C{None} if no
+        C{defaultValue} is specified.
+
+        Similar to C{dict.get()}.
+        
+        Example::
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
+
+            result = date_str.parseString("1999/12/31")
+            print(result.get("year")) # -> '1999'
+            print(result.get("hour", "not specified")) # -> 'not specified'
+            print(result.get("hour")) # -> None
+        """
+        if key in self:
+            return self[key]
+        else:
+            return defaultValue
+
+    def insert( self, index, insStr ):
+        """
+        Inserts new element at location index in the list of parsed tokens.
+        
+        Similar to C{list.insert()}.
+
+        Example::
+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+
+            # use a parse action to insert the parse location in the front of the parsed results
+            def insert_locn(locn, tokens):
+                tokens.insert(0, locn)
+            print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
+        """
+        self.__toklist.insert(index, insStr)
+        # fixup indices in token dictionary
+        for name,occurrences in self.__tokdict.items():
+            for k, (value, position) in enumerate(occurrences):
+                occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
+
+    def append( self, item ):
+        """
+        Add single element to end of ParseResults list of elements.
+
+        Example::
+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+            
+            # use a parse action to compute the sum of the parsed integers, and add it to the end
+            def append_sum(tokens):
+                tokens.append(sum(map(int, tokens)))
+            print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
+        """
+        self.__toklist.append(item)
+
+    def extend( self, itemseq ):
+        """
+        Add sequence of elements to end of ParseResults list of elements.
+
+        Example::
+            patt = OneOrMore(Word(alphas))
+            
+            # use a parse action to append the reverse of the matched strings, to make a palindrome
+            def make_palindrome(tokens):
+                tokens.extend(reversed([t[::-1] for t in tokens]))
+                return ''.join(tokens)
+            print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
+        """
+        if isinstance(itemseq, ParseResults):
+            self += itemseq
+        else:
+            self.__toklist.extend(itemseq)
+
+    def clear( self ):
+        """
+        Clear all elements and results names.
+        """
+        del self.__toklist[:]
+        self.__tokdict.clear()
+
+    def __getattr__( self, name ):
+        try:
+            return self[name]
+        except KeyError:
+            return ""
+            
+        if name in self.__tokdict:
+            if name not in self.__accumNames:
+                return self.__tokdict[name][-1][0]
+            else:
+                return ParseResults([ v[0] for v in self.__tokdict[name] ])
+        else:
+            return ""
+
+    def __add__( self, other ):
+        ret = self.copy()
+        ret += other
+        return ret
+
+    def __iadd__( self, other ):
+        if other.__tokdict:
+            offset = len(self.__toklist)
+            addoffset = lambda a: offset if a<0 else a+offset
+            otheritems = other.__tokdict.items()
+            otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
+                                for (k,vlist) in otheritems for v in vlist]
+            for k,v in otherdictitems:
+                self[k] = v
+                if isinstance(v[0],ParseResults):
+                    v[0].__parent = wkref(self)
+            
+        self.__toklist += other.__toklist
+        self.__accumNames.update( other.__accumNames )
+        return self
+
+    def __radd__(self, other):
+        if isinstance(other,int) and other == 0:
+            # useful for merging many ParseResults using sum() builtin
+            return self.copy()
+        else:
+            # this may raise a TypeError - so be it
+            return other + self
+        
+    def __repr__( self ):
+        return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
+
+    def __str__( self ):
+        return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
+
+    def _asStringList( self, sep='' ):
+        out = []
+        for item in self.__toklist:
+            if out and sep:
+                out.append(sep)
+            if isinstance( item, ParseResults ):
+                out += item._asStringList()
+            else:
+                out.append( _ustr(item) )
+        return out
+
+    def asList( self ):
+        """
+        Returns the parse results as a nested list of matching tokens, all converted to strings.
+
+        Example::
+            patt = OneOrMore(Word(alphas))
+            result = patt.parseString("sldkj lsdkj sldkj")
+            # even though the result prints in string-like form, it is actually a pyparsing ParseResults
+            print(type(result), result) # ->  ['sldkj', 'lsdkj', 'sldkj']
+            
+            # Use asList() to create an actual list
+            result_list = result.asList()
+            print(type(result_list), result_list) # ->  ['sldkj', 'lsdkj', 'sldkj']
+        """
+        return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
+
+    def asDict( self ):
+        """
+        Returns the named parse results as a nested dictionary.
+
+        Example::
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+            
+            result = date_str.parseString('12/31/1999')
+            print(type(result), repr(result)) # ->  (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
+            
+            result_dict = result.asDict()
+            print(type(result_dict), repr(result_dict)) # ->  {'day': '1999', 'year': '12', 'month': '31'}
+
+            # even though a ParseResults supports dict-like access, sometime you just need to have a dict
+            import json
+            print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
+            print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
+        """
+        if PY_3:
+            item_fn = self.items
+        else:
+            item_fn = self.iteritems
+            
+        def toItem(obj):
+            if isinstance(obj, ParseResults):
+                if obj.haskeys():
+                    return obj.asDict()
+                else:
+                    return [toItem(v) for v in obj]
+            else:
+                return obj
+                
+        return dict((k,toItem(v)) for k,v in item_fn())
+
+    def copy( self ):
+        """
+        Returns a new copy of a C{ParseResults} object.
+        """
+        ret = ParseResults( self.__toklist )
+        ret.__tokdict = self.__tokdict.copy()
+        ret.__parent = self.__parent
+        ret.__accumNames.update( self.__accumNames )
+        ret.__name = self.__name
+        return ret
+
+    def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
+        """
+        (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
+        """
+        nl = "\n"
+        out = []
+        namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
+                                                            for v in vlist)
+        nextLevelIndent = indent + "  "
+
+        # collapse out indents if formatting is not desired
+        if not formatted:
+            indent = ""
+            nextLevelIndent = ""
+            nl = ""
+
+        selfTag = None
+        if doctag is not None:
+            selfTag = doctag
+        else:
+            if self.__name:
+                selfTag = self.__name
+
+        if not selfTag:
+            if namedItemsOnly:
+                return ""
+            else:
+                selfTag = "ITEM"
+
+        out += [ nl, indent, "<", selfTag, ">" ]
+
+        for i,res in enumerate(self.__toklist):
+            if isinstance(res,ParseResults):
+                if i in namedItems:
+                    out += [ res.asXML(namedItems[i],
+                                        namedItemsOnly and doctag is None,
+                                        nextLevelIndent,
+                                        formatted)]
+                else:
+                    out += [ res.asXML(None,
+                                        namedItemsOnly and doctag is None,
+                                        nextLevelIndent,
+                                        formatted)]
+            else:
+                # individual token, see if there is a name for it
+                resTag = None
+                if i in namedItems:
+                    resTag = namedItems[i]
+                if not resTag:
+                    if namedItemsOnly:
+                        continue
+                    else:
+                        resTag = "ITEM"
+                xmlBodyText = _xml_escape(_ustr(res))
+                out += [ nl, nextLevelIndent, "<", resTag, ">",
+                                                xmlBodyText,
+                                                "" ]
+
+        out += [ nl, indent, "" ]
+        return "".join(out)
+
+    def __lookup(self,sub):
+        for k,vlist in self.__tokdict.items():
+            for v,loc in vlist:
+                if sub is v:
+                    return k
+        return None
+
+    def getName(self):
+        r"""
+        Returns the results name for this token expression. Useful when several 
+        different expressions might match at a particular location.
+
+        Example::
+            integer = Word(nums)
+            ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
+            house_number_expr = Suppress('#') + Word(nums, alphanums)
+            user_data = (Group(house_number_expr)("house_number") 
+                        | Group(ssn_expr)("ssn")
+                        | Group(integer)("age"))
+            user_info = OneOrMore(user_data)
+            
+            result = user_info.parseString("22 111-22-3333 #221B")
+            for item in result:
+                print(item.getName(), ':', item[0])
+        prints::
+            age : 22
+            ssn : 111-22-3333
+            house_number : 221B
+        """
+        if self.__name:
+            return self.__name
+        elif self.__parent:
+            par = self.__parent()
+            if par:
+                return par.__lookup(self)
+            else:
+                return None
+        elif (len(self) == 1 and
+               len(self.__tokdict) == 1 and
+               next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
+            return next(iter(self.__tokdict.keys()))
+        else:
+            return None
+
+    def dump(self, indent='', depth=0, full=True):
+        """
+        Diagnostic method for listing out the contents of a C{ParseResults}.
+        Accepts an optional C{indent} argument so that this string can be embedded
+        in a nested display of other data.
+
+        Example::
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+            
+            result = date_str.parseString('12/31/1999')
+            print(result.dump())
+        prints::
+            ['12', '/', '31', '/', '1999']
+            - day: 1999
+            - month: 31
+            - year: 12
+        """
+        out = []
+        NL = '\n'
+        out.append( indent+_ustr(self.asList()) )
+        if full:
+            if self.haskeys():
+                items = sorted((str(k), v) for k,v in self.items())
+                for k,v in items:
+                    if out:
+                        out.append(NL)
+                    out.append( "%s%s- %s: " % (indent,('  '*depth), k) )
+                    if isinstance(v,ParseResults):
+                        if v:
+                            out.append( v.dump(indent,depth+1) )
+                        else:
+                            out.append(_ustr(v))
+                    else:
+                        out.append(repr(v))
+            elif any(isinstance(vv,ParseResults) for vv in self):
+                v = self
+                for i,vv in enumerate(v):
+                    if isinstance(vv,ParseResults):
+                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),vv.dump(indent,depth+1) ))
+                    else:
+                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),_ustr(vv)))
+            
+        return "".join(out)
+
+    def pprint(self, *args, **kwargs):
+        """
+        Pretty-printer for parsed results as a list, using the C{pprint} module.
+        Accepts additional positional or keyword args as defined for the 
+        C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
+
+        Example::
+            ident = Word(alphas, alphanums)
+            num = Word(nums)
+            func = Forward()
+            term = ident | num | Group('(' + func + ')')
+            func <<= ident + Group(Optional(delimitedList(term)))
+            result = func.parseString("fna a,b,(fnb c,d,200),100")
+            result.pprint(width=40)
+        prints::
+            ['fna',
+             ['a',
+              'b',
+              ['(', 'fnb', ['c', 'd', '200'], ')'],
+              '100']]
+        """
+        pprint.pprint(self.asList(), *args, **kwargs)
+
+    # add support for pickle protocol
+    def __getstate__(self):
+        return ( self.__toklist,
+                 ( self.__tokdict.copy(),
+                   self.__parent is not None and self.__parent() or None,
+                   self.__accumNames,
+                   self.__name ) )
+
+    def __setstate__(self,state):
+        self.__toklist = state[0]
+        (self.__tokdict,
+         par,
+         inAccumNames,
+         self.__name) = state[1]
+        self.__accumNames = {}
+        self.__accumNames.update(inAccumNames)
+        if par is not None:
+            self.__parent = wkref(par)
+        else:
+            self.__parent = None
+
+    def __getnewargs__(self):
+        return self.__toklist, self.__name, self.__asList, self.__modal
+
+    def __dir__(self):
+        return (dir(type(self)) + list(self.keys()))
+
+collections.MutableMapping.register(ParseResults)
+
+def col (loc,strg):
+    """Returns current column within a string, counting newlines as line separators.
+   The first column is number 1.
+
+   Note: the default parsing behavior is to expand tabs in the input string
+   before starting the parsing process.  See L{I{ParserElement.parseString}} for more information
+   on parsing strings containing C{}s, and suggested methods to maintain a
+   consistent view of the parsed string, the parse location, and line and column
+   positions within the parsed string.
+   """
+    s = strg
+    return 1 if 0} for more information
+   on parsing strings containing C{}s, and suggested methods to maintain a
+   consistent view of the parsed string, the parse location, and line and column
+   positions within the parsed string.
+   """
+    return strg.count("\n",0,loc) + 1
+
+def line( loc, strg ):
+    """Returns the line of text containing loc within a string, counting newlines as line separators.
+       """
+    lastCR = strg.rfind("\n", 0, loc)
+    nextCR = strg.find("\n", loc)
+    if nextCR >= 0:
+        return strg[lastCR+1:nextCR]
+    else:
+        return strg[lastCR+1:]
+
+def _defaultStartDebugAction( instring, loc, expr ):
+    print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
+
+def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
+    print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
+
+def _defaultExceptionDebugAction( instring, loc, expr, exc ):
+    print ("Exception raised:" + _ustr(exc))
+
+def nullDebugAction(*args):
+    """'Do-nothing' debug action, to suppress debugging output during parsing."""
+    pass
+
+# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
+#~ 'decorator to trim function calls to match the arity of the target'
+#~ def _trim_arity(func, maxargs=3):
+    #~ if func in singleArgBuiltins:
+        #~ return lambda s,l,t: func(t)
+    #~ limit = 0
+    #~ foundArity = False
+    #~ def wrapper(*args):
+        #~ nonlocal limit,foundArity
+        #~ while 1:
+            #~ try:
+                #~ ret = func(*args[limit:])
+                #~ foundArity = True
+                #~ return ret
+            #~ except TypeError:
+                #~ if limit == maxargs or foundArity:
+                    #~ raise
+                #~ limit += 1
+                #~ continue
+    #~ return wrapper
+
+# this version is Python 2.x-3.x cross-compatible
+'decorator to trim function calls to match the arity of the target'
+def _trim_arity(func, maxargs=2):
+    if func in singleArgBuiltins:
+        return lambda s,l,t: func(t)
+    limit = [0]
+    foundArity = [False]
+    
+    # traceback return data structure changed in Py3.5 - normalize back to plain tuples
+    if system_version[:2] >= (3,5):
+        def extract_stack(limit=0):
+            # special handling for Python 3.5.0 - extra deep call stack by 1
+            offset = -3 if system_version == (3,5,0) else -2
+            frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
+            return [(frame_summary.filename, frame_summary.lineno)]
+        def extract_tb(tb, limit=0):
+            frames = traceback.extract_tb(tb, limit=limit)
+            frame_summary = frames[-1]
+            return [(frame_summary.filename, frame_summary.lineno)]
+    else:
+        extract_stack = traceback.extract_stack
+        extract_tb = traceback.extract_tb
+    
+    # synthesize what would be returned by traceback.extract_stack at the call to 
+    # user's parse action 'func', so that we don't incur call penalty at parse time
+    
+    LINE_DIFF = 6
+    # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND 
+    # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
+    this_line = extract_stack(limit=2)[-1]
+    pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
+
+    def wrapper(*args):
+        while 1:
+            try:
+                ret = func(*args[limit[0]:])
+                foundArity[0] = True
+                return ret
+            except TypeError:
+                # re-raise TypeErrors if they did not come from our arity testing
+                if foundArity[0]:
+                    raise
+                else:
+                    try:
+                        tb = sys.exc_info()[-1]
+                        if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
+                            raise
+                    finally:
+                        del tb
+
+                if limit[0] <= maxargs:
+                    limit[0] += 1
+                    continue
+                raise
+
+    # copy func name to wrapper for sensible debug output
+    func_name = ""
+    try:
+        func_name = getattr(func, '__name__', 
+                            getattr(func, '__class__').__name__)
+    except Exception:
+        func_name = str(func)
+    wrapper.__name__ = func_name
+
+    return wrapper
+
+class ParserElement(object):
+    """Abstract base level parser element class."""
+    DEFAULT_WHITE_CHARS = " \n\t\r"
+    verbose_stacktrace = False
+
+    @staticmethod
+    def setDefaultWhitespaceChars( chars ):
+        r"""
+        Overrides the default whitespace chars
+
+        Example::
+            # default whitespace chars are space,  and newline
+            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def', 'ghi', 'jkl']
+            
+            # change to just treat newline as significant
+            ParserElement.setDefaultWhitespaceChars(" \t")
+            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def']
+        """
+        ParserElement.DEFAULT_WHITE_CHARS = chars
+
+    @staticmethod
+    def inlineLiteralsUsing(cls):
+        """
+        Set class to be used for inclusion of string literals into a parser.
+        
+        Example::
+            # default literal class used is Literal
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
+
+            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
+
+
+            # change to Suppress
+            ParserElement.inlineLiteralsUsing(Suppress)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
+
+            date_str.parseString("1999/12/31")  # -> ['1999', '12', '31']
+        """
+        ParserElement._literalStringClass = cls
+
+    def __init__( self, savelist=False ):
+        self.parseAction = list()
+        self.failAction = None
+        #~ self.name = ""  # don't define self.name, let subclasses try/except upcall
+        self.strRepr = None
+        self.resultsName = None
+        self.saveAsList = savelist
+        self.skipWhitespace = True
+        self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+        self.copyDefaultWhiteChars = True
+        self.mayReturnEmpty = False # used when checking for left-recursion
+        self.keepTabs = False
+        self.ignoreExprs = list()
+        self.debug = False
+        self.streamlined = False
+        self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
+        self.errmsg = ""
+        self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
+        self.debugActions = ( None, None, None ) #custom debug actions
+        self.re = None
+        self.callPreparse = True # used to avoid redundant calls to preParse
+        self.callDuringTry = False
+
+    def copy( self ):
+        """
+        Make a copy of this C{ParserElement}.  Useful for defining different parse actions
+        for the same parsing pattern, using copies of the original parse element.
+        
+        Example::
+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+            integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
+            integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
+            
+            print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
+        prints::
+            [5120, 100, 655360, 268435456]
+        Equivalent form of C{expr.copy()} is just C{expr()}::
+            integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
+        """
+        cpy = copy.copy( self )
+        cpy.parseAction = self.parseAction[:]
+        cpy.ignoreExprs = self.ignoreExprs[:]
+        if self.copyDefaultWhiteChars:
+            cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+        return cpy
+
+    def setName( self, name ):
+        """
+        Define name for this expression, makes debugging and exception messages clearer.
+        
+        Example::
+            Word(nums).parseString("ABC")  # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
+            Word(nums).setName("integer").parseString("ABC")  # -> Exception: Expected integer (at char 0), (line:1, col:1)
+        """
+        self.name = name
+        self.errmsg = "Expected " + self.name
+        if hasattr(self,"exception"):
+            self.exception.msg = self.errmsg
+        return self
+
+    def setResultsName( self, name, listAllMatches=False ):
+        """
+        Define name for referencing matching tokens as a nested attribute
+        of the returned parse results.
+        NOTE: this returns a *copy* of the original C{ParserElement} object;
+        this is so that the client can define a basic element, such as an
+        integer, and reference it in multiple places with different names.
+
+        You can also set results names using the abbreviated syntax,
+        C{expr("name")} in place of C{expr.setResultsName("name")} - 
+        see L{I{__call__}<__call__>}.
+
+        Example::
+            date_str = (integer.setResultsName("year") + '/' 
+                        + integer.setResultsName("month") + '/' 
+                        + integer.setResultsName("day"))
+
+            # equivalent form:
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+        """
+        newself = self.copy()
+        if name.endswith("*"):
+            name = name[:-1]
+            listAllMatches=True
+        newself.resultsName = name
+        newself.modalResults = not listAllMatches
+        return newself
+
+    def setBreak(self,breakFlag = True):
+        """Method to invoke the Python pdb debugger when this element is
+           about to be parsed. Set C{breakFlag} to True to enable, False to
+           disable.
+        """
+        if breakFlag:
+            _parseMethod = self._parse
+            def breaker(instring, loc, doActions=True, callPreParse=True):
+                import pdb
+                pdb.set_trace()
+                return _parseMethod( instring, loc, doActions, callPreParse )
+            breaker._originalParseMethod = _parseMethod
+            self._parse = breaker
+        else:
+            if hasattr(self._parse,"_originalParseMethod"):
+                self._parse = self._parse._originalParseMethod
+        return self
+
+    def setParseAction( self, *fns, **kwargs ):
+        """
+        Define one or more actions to perform when successfully matching parse element definition.
+        Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
+        C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
+         - s   = the original string being parsed (see note below)
+         - loc = the location of the matching substring
+         - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
+        If the functions in fns modify the tokens, they can return them as the return
+        value from fn, and the modified list of tokens will replace the original.
+        Otherwise, fn does not need to return any value.
+
+        Optional keyword arguments:
+         - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
+
+        Note: the default parsing behavior is to expand tabs in the input string
+        before starting the parsing process.  See L{I{parseString}} for more information
+        on parsing strings containing C{}s, and suggested methods to maintain a
+        consistent view of the parsed string, the parse location, and line and column
+        positions within the parsed string.
+        
+        Example::
+            integer = Word(nums)
+            date_str = integer + '/' + integer + '/' + integer
+
+            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
+
+            # use parse action to convert to ints at parse time
+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+            date_str = integer + '/' + integer + '/' + integer
+
+            # note that integer fields are now ints, not strings
+            date_str.parseString("1999/12/31")  # -> [1999, '/', 12, '/', 31]
+        """
+        self.parseAction = list(map(_trim_arity, list(fns)))
+        self.callDuringTry = kwargs.get("callDuringTry", False)
+        return self
+
+    def addParseAction( self, *fns, **kwargs ):
+        """
+        Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}}.
+        
+        See examples in L{I{copy}}.
+        """
+        self.parseAction += list(map(_trim_arity, list(fns)))
+        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+        return self
+
+    def addCondition(self, *fns, **kwargs):
+        """Add a boolean predicate function to expression's list of parse actions. See 
+        L{I{setParseAction}} for function call signatures. Unlike C{setParseAction}, 
+        functions passed to C{addCondition} need to return boolean success/fail of the condition.
+
+        Optional keyword arguments:
+         - message = define a custom message to be used in the raised exception
+         - fatal   = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
+         
+        Example::
+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+            year_int = integer.copy()
+            year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
+            date_str = year_int + '/' + integer + '/' + integer
+
+            result = date_str.parseString("1999/12/31")  # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
+        """
+        msg = kwargs.get("message", "failed user-defined condition")
+        exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
+        for fn in fns:
+            def pa(s,l,t):
+                if not bool(_trim_arity(fn)(s,l,t)):
+                    raise exc_type(s,l,msg)
+            self.parseAction.append(pa)
+        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+        return self
+
+    def setFailAction( self, fn ):
+        """Define action to perform if parsing fails at this expression.
+           Fail acton fn is a callable function that takes the arguments
+           C{fn(s,loc,expr,err)} where:
+            - s = string being parsed
+            - loc = location where expression match was attempted and failed
+            - expr = the parse expression that failed
+            - err = the exception thrown
+           The function returns no value.  It may throw C{L{ParseFatalException}}
+           if it is desired to stop parsing immediately."""
+        self.failAction = fn
+        return self
+
+    def _skipIgnorables( self, instring, loc ):
+        exprsFound = True
+        while exprsFound:
+            exprsFound = False
+            for e in self.ignoreExprs:
+                try:
+                    while 1:
+                        loc,dummy = e._parse( instring, loc )
+                        exprsFound = True
+                except ParseException:
+                    pass
+        return loc
+
+    def preParse( self, instring, loc ):
+        if self.ignoreExprs:
+            loc = self._skipIgnorables( instring, loc )
+
+        if self.skipWhitespace:
+            wt = self.whiteChars
+            instrlen = len(instring)
+            while loc < instrlen and instring[loc] in wt:
+                loc += 1
+
+        return loc
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        return loc, []
+
+    def postParse( self, instring, loc, tokenlist ):
+        return tokenlist
+
+    #~ @profile
+    def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
+        debugging = ( self.debug ) #and doActions )
+
+        if debugging or self.failAction:
+            #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
+            if (self.debugActions[0] ):
+                self.debugActions[0]( instring, loc, self )
+            if callPreParse and self.callPreparse:
+                preloc = self.preParse( instring, loc )
+            else:
+                preloc = loc
+            tokensStart = preloc
+            try:
+                try:
+                    loc,tokens = self.parseImpl( instring, preloc, doActions )
+                except IndexError:
+                    raise ParseException( instring, len(instring), self.errmsg, self )
+            except ParseBaseException as err:
+                #~ print ("Exception raised:", err)
+                if self.debugActions[2]:
+                    self.debugActions[2]( instring, tokensStart, self, err )
+                if self.failAction:
+                    self.failAction( instring, tokensStart, self, err )
+                raise
+        else:
+            if callPreParse and self.callPreparse:
+                preloc = self.preParse( instring, loc )
+            else:
+                preloc = loc
+            tokensStart = preloc
+            if self.mayIndexError or loc >= len(instring):
+                try:
+                    loc,tokens = self.parseImpl( instring, preloc, doActions )
+                except IndexError:
+                    raise ParseException( instring, len(instring), self.errmsg, self )
+            else:
+                loc,tokens = self.parseImpl( instring, preloc, doActions )
+
+        tokens = self.postParse( instring, loc, tokens )
+
+        retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
+        if self.parseAction and (doActions or self.callDuringTry):
+            if debugging:
+                try:
+                    for fn in self.parseAction:
+                        tokens = fn( instring, tokensStart, retTokens )
+                        if tokens is not None:
+                            retTokens = ParseResults( tokens,
+                                                      self.resultsName,
+                                                      asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+                                                      modal=self.modalResults )
+                except ParseBaseException as err:
+                    #~ print "Exception raised in user parse action:", err
+                    if (self.debugActions[2] ):
+                        self.debugActions[2]( instring, tokensStart, self, err )
+                    raise
+            else:
+                for fn in self.parseAction:
+                    tokens = fn( instring, tokensStart, retTokens )
+                    if tokens is not None:
+                        retTokens = ParseResults( tokens,
+                                                  self.resultsName,
+                                                  asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+                                                  modal=self.modalResults )
+
+        if debugging:
+            #~ print ("Matched",self,"->",retTokens.asList())
+            if (self.debugActions[1] ):
+                self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
+
+        return loc, retTokens
+
+    def tryParse( self, instring, loc ):
+        try:
+            return self._parse( instring, loc, doActions=False )[0]
+        except ParseFatalException:
+            raise ParseException( instring, loc, self.errmsg, self)
+    
+    def canParseNext(self, instring, loc):
+        try:
+            self.tryParse(instring, loc)
+        except (ParseException, IndexError):
+            return False
+        else:
+            return True
+
+    class _UnboundedCache(object):
+        def __init__(self):
+            cache = {}
+            self.not_in_cache = not_in_cache = object()
+
+            def get(self, key):
+                return cache.get(key, not_in_cache)
+
+            def set(self, key, value):
+                cache[key] = value
+
+            def clear(self):
+                cache.clear()
+                
+            def cache_len(self):
+                return len(cache)
+
+            self.get = types.MethodType(get, self)
+            self.set = types.MethodType(set, self)
+            self.clear = types.MethodType(clear, self)
+            self.__len__ = types.MethodType(cache_len, self)
+
+    if _OrderedDict is not None:
+        class _FifoCache(object):
+            def __init__(self, size):
+                self.not_in_cache = not_in_cache = object()
+
+                cache = _OrderedDict()
+
+                def get(self, key):
+                    return cache.get(key, not_in_cache)
+
+                def set(self, key, value):
+                    cache[key] = value
+                    while len(cache) > size:
+                        try:
+                            cache.popitem(False)
+                        except KeyError:
+                            pass
+
+                def clear(self):
+                    cache.clear()
+
+                def cache_len(self):
+                    return len(cache)
+
+                self.get = types.MethodType(get, self)
+                self.set = types.MethodType(set, self)
+                self.clear = types.MethodType(clear, self)
+                self.__len__ = types.MethodType(cache_len, self)
+
+    else:
+        class _FifoCache(object):
+            def __init__(self, size):
+                self.not_in_cache = not_in_cache = object()
+
+                cache = {}
+                key_fifo = collections.deque([], size)
+
+                def get(self, key):
+                    return cache.get(key, not_in_cache)
+
+                def set(self, key, value):
+                    cache[key] = value
+                    while len(key_fifo) > size:
+                        cache.pop(key_fifo.popleft(), None)
+                    key_fifo.append(key)
+
+                def clear(self):
+                    cache.clear()
+                    key_fifo.clear()
+
+                def cache_len(self):
+                    return len(cache)
+
+                self.get = types.MethodType(get, self)
+                self.set = types.MethodType(set, self)
+                self.clear = types.MethodType(clear, self)
+                self.__len__ = types.MethodType(cache_len, self)
+
+    # argument cache for optimizing repeated calls when backtracking through recursive expressions
+    packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
+    packrat_cache_lock = RLock()
+    packrat_cache_stats = [0, 0]
+
+    # this method gets repeatedly called during backtracking with the same arguments -
+    # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
+    def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
+        HIT, MISS = 0, 1
+        lookup = (self, instring, loc, callPreParse, doActions)
+        with ParserElement.packrat_cache_lock:
+            cache = ParserElement.packrat_cache
+            value = cache.get(lookup)
+            if value is cache.not_in_cache:
+                ParserElement.packrat_cache_stats[MISS] += 1
+                try:
+                    value = self._parseNoCache(instring, loc, doActions, callPreParse)
+                except ParseBaseException as pe:
+                    # cache a copy of the exception, without the traceback
+                    cache.set(lookup, pe.__class__(*pe.args))
+                    raise
+                else:
+                    cache.set(lookup, (value[0], value[1].copy()))
+                    return value
+            else:
+                ParserElement.packrat_cache_stats[HIT] += 1
+                if isinstance(value, Exception):
+                    raise value
+                return (value[0], value[1].copy())
+
+    _parse = _parseNoCache
+
+    @staticmethod
+    def resetCache():
+        ParserElement.packrat_cache.clear()
+        ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
+
+    _packratEnabled = False
+    @staticmethod
+    def enablePackrat(cache_size_limit=128):
+        """Enables "packrat" parsing, which adds memoizing to the parsing logic.
+           Repeated parse attempts at the same string location (which happens
+           often in many complex grammars) can immediately return a cached value,
+           instead of re-executing parsing/validating code.  Memoizing is done of
+           both valid results and parsing exceptions.
+           
+           Parameters:
+            - cache_size_limit - (default=C{128}) - if an integer value is provided
+              will limit the size of the packrat cache; if None is passed, then
+              the cache size will be unbounded; if 0 is passed, the cache will
+              be effectively disabled.
+            
+           This speedup may break existing programs that use parse actions that
+           have side-effects.  For this reason, packrat parsing is disabled when
+           you first import pyparsing.  To activate the packrat feature, your
+           program must call the class method C{ParserElement.enablePackrat()}.  If
+           your program uses C{psyco} to "compile as you go", you must call
+           C{enablePackrat} before calling C{psyco.full()}.  If you do not do this,
+           Python will crash.  For best results, call C{enablePackrat()} immediately
+           after importing pyparsing.
+           
+           Example::
+               import pyparsing
+               pyparsing.ParserElement.enablePackrat()
+        """
+        if not ParserElement._packratEnabled:
+            ParserElement._packratEnabled = True
+            if cache_size_limit is None:
+                ParserElement.packrat_cache = ParserElement._UnboundedCache()
+            else:
+                ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
+            ParserElement._parse = ParserElement._parseCache
+
+    def parseString( self, instring, parseAll=False ):
+        """
+        Execute the parse expression with the given string.
+        This is the main interface to the client code, once the complete
+        expression has been built.
+
+        If you want the grammar to require that the entire input string be
+        successfully parsed, then set C{parseAll} to True (equivalent to ending
+        the grammar with C{L{StringEnd()}}).
+
+        Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
+        in order to report proper column numbers in parse actions.
+        If the input string contains tabs and
+        the grammar uses parse actions that use the C{loc} argument to index into the
+        string being parsed, you can ensure you have a consistent view of the input
+        string by:
+         - calling C{parseWithTabs} on your grammar before calling C{parseString}
+           (see L{I{parseWithTabs}})
+         - define your parse action using the full C{(s,loc,toks)} signature, and
+           reference the input string using the parse action's C{s} argument
+         - explictly expand the tabs in your input string before calling
+           C{parseString}
+        
+        Example::
+            Word('a').parseString('aaaaabaaa')  # -> ['aaaaa']
+            Word('a').parseString('aaaaabaaa', parseAll=True)  # -> Exception: Expected end of text
+        """
+        ParserElement.resetCache()
+        if not self.streamlined:
+            self.streamline()
+            #~ self.saveAsList = True
+        for e in self.ignoreExprs:
+            e.streamline()
+        if not self.keepTabs:
+            instring = instring.expandtabs()
+        try:
+            loc, tokens = self._parse( instring, 0 )
+            if parseAll:
+                loc = self.preParse( instring, loc )
+                se = Empty() + StringEnd()
+                se._parse( instring, loc )
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+        else:
+            return tokens
+
+    def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
+        """
+        Scan the input string for expression matches.  Each match will return the
+        matching tokens, start location, and end location.  May be called with optional
+        C{maxMatches} argument, to clip scanning after 'n' matches are found.  If
+        C{overlap} is specified, then overlapping matches will be reported.
+
+        Note that the start and end locations are reported relative to the string
+        being parsed.  See L{I{parseString}} for more information on parsing
+        strings with embedded tabs.
+
+        Example::
+            source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
+            print(source)
+            for tokens,start,end in Word(alphas).scanString(source):
+                print(' '*start + '^'*(end-start))
+                print(' '*start + tokens[0])
+        
+        prints::
+        
+            sldjf123lsdjjkf345sldkjf879lkjsfd987
+            ^^^^^
+            sldjf
+                    ^^^^^^^
+                    lsdjjkf
+                              ^^^^^^
+                              sldkjf
+                                       ^^^^^^
+                                       lkjsfd
+        """
+        if not self.streamlined:
+            self.streamline()
+        for e in self.ignoreExprs:
+            e.streamline()
+
+        if not self.keepTabs:
+            instring = _ustr(instring).expandtabs()
+        instrlen = len(instring)
+        loc = 0
+        preparseFn = self.preParse
+        parseFn = self._parse
+        ParserElement.resetCache()
+        matches = 0
+        try:
+            while loc <= instrlen and matches < maxMatches:
+                try:
+                    preloc = preparseFn( instring, loc )
+                    nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
+                except ParseException:
+                    loc = preloc+1
+                else:
+                    if nextLoc > loc:
+                        matches += 1
+                        yield tokens, preloc, nextLoc
+                        if overlap:
+                            nextloc = preparseFn( instring, loc )
+                            if nextloc > loc:
+                                loc = nextLoc
+                            else:
+                                loc += 1
+                        else:
+                            loc = nextLoc
+                    else:
+                        loc = preloc+1
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def transformString( self, instring ):
+        """
+        Extension to C{L{scanString}}, to modify matching text with modified tokens that may
+        be returned from a parse action.  To use C{transformString}, define a grammar and
+        attach a parse action to it that modifies the returned token list.
+        Invoking C{transformString()} on a target string will then scan for matches,
+        and replace the matched text patterns according to the logic in the parse
+        action.  C{transformString()} returns the resulting transformed string.
+        
+        Example::
+            wd = Word(alphas)
+            wd.setParseAction(lambda toks: toks[0].title())
+            
+            print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
+        Prints::
+            Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
+        """
+        out = []
+        lastE = 0
+        # force preservation of s, to minimize unwanted transformation of string, and to
+        # keep string locs straight between transformString and scanString
+        self.keepTabs = True
+        try:
+            for t,s,e in self.scanString( instring ):
+                out.append( instring[lastE:s] )
+                if t:
+                    if isinstance(t,ParseResults):
+                        out += t.asList()
+                    elif isinstance(t,list):
+                        out += t
+                    else:
+                        out.append(t)
+                lastE = e
+            out.append(instring[lastE:])
+            out = [o for o in out if o]
+            return "".join(map(_ustr,_flatten(out)))
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def searchString( self, instring, maxMatches=_MAX_INT ):
+        """
+        Another extension to C{L{scanString}}, simplifying the access to the tokens found
+        to match the given parse expression.  May be called with optional
+        C{maxMatches} argument, to clip searching after 'n' matches are found.
+        
+        Example::
+            # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
+            cap_word = Word(alphas.upper(), alphas.lower())
+            
+            print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
+
+            # the sum() builtin can be used to merge results into a single ParseResults object
+            print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
+        prints::
+            [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
+            ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
+        """
+        try:
+            return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
+        """
+        Generator method to split a string using the given expression as a separator.
+        May be called with optional C{maxsplit} argument, to limit the number of splits;
+        and the optional C{includeSeparators} argument (default=C{False}), if the separating
+        matching text should be included in the split results.
+        
+        Example::        
+            punc = oneOf(list(".,;:/-!?"))
+            print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
+        prints::
+            ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
+        """
+        splits = 0
+        last = 0
+        for t,s,e in self.scanString(instring, maxMatches=maxsplit):
+            yield instring[last:s]
+            if includeSeparators:
+                yield t[0]
+            last = e
+        yield instring[last:]
+
+    def __add__(self, other ):
+        """
+        Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
+        converts them to L{Literal}s by default.
+        
+        Example::
+            greet = Word(alphas) + "," + Word(alphas) + "!"
+            hello = "Hello, World!"
+            print (hello, "->", greet.parseString(hello))
+        Prints::
+            Hello, World! -> ['Hello', ',', 'World', '!']
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return And( [ self, other ] )
+
+    def __radd__(self, other ):
+        """
+        Implementation of + operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other + self
+
+    def __sub__(self, other):
+        """
+        Implementation of - operator, returns C{L{And}} with error stop
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return self + And._ErrorStop() + other
+
+    def __rsub__(self, other ):
+        """
+        Implementation of - operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other - self
+
+    def __mul__(self,other):
+        """
+        Implementation of * operator, allows use of C{expr * 3} in place of
+        C{expr + expr + expr}.  Expressions may also me multiplied by a 2-integer
+        tuple, similar to C{{min,max}} multipliers in regular expressions.  Tuples
+        may also include C{None} as in:
+         - C{expr*(n,None)} or C{expr*(n,)} is equivalent
+              to C{expr*n + L{ZeroOrMore}(expr)}
+              (read as "at least n instances of C{expr}")
+         - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
+              (read as "0 to n instances of C{expr}")
+         - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
+         - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
+
+        Note that C{expr*(None,n)} does not raise an exception if
+        more than n exprs exist in the input stream; that is,
+        C{expr*(None,n)} does not enforce a maximum number of expr
+        occurrences.  If this behavior is desired, then write
+        C{expr*(None,n) + ~expr}
+        """
+        if isinstance(other,int):
+            minElements, optElements = other,0
+        elif isinstance(other,tuple):
+            other = (other + (None, None))[:2]
+            if other[0] is None:
+                other = (0, other[1])
+            if isinstance(other[0],int) and other[1] is None:
+                if other[0] == 0:
+                    return ZeroOrMore(self)
+                if other[0] == 1:
+                    return OneOrMore(self)
+                else:
+                    return self*other[0] + ZeroOrMore(self)
+            elif isinstance(other[0],int) and isinstance(other[1],int):
+                minElements, optElements = other
+                optElements -= minElements
+            else:
+                raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
+        else:
+            raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
+
+        if minElements < 0:
+            raise ValueError("cannot multiply ParserElement by negative value")
+        if optElements < 0:
+            raise ValueError("second tuple value must be greater or equal to first tuple value")
+        if minElements == optElements == 0:
+            raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
+
+        if (optElements):
+            def makeOptionalList(n):
+                if n>1:
+                    return Optional(self + makeOptionalList(n-1))
+                else:
+                    return Optional(self)
+            if minElements:
+                if minElements == 1:
+                    ret = self + makeOptionalList(optElements)
+                else:
+                    ret = And([self]*minElements) + makeOptionalList(optElements)
+            else:
+                ret = makeOptionalList(optElements)
+        else:
+            if minElements == 1:
+                ret = self
+            else:
+                ret = And([self]*minElements)
+        return ret
+
+    def __rmul__(self, other):
+        return self.__mul__(other)
+
+    def __or__(self, other ):
+        """
+        Implementation of | operator - returns C{L{MatchFirst}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return MatchFirst( [ self, other ] )
+
+    def __ror__(self, other ):
+        """
+        Implementation of | operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other | self
+
+    def __xor__(self, other ):
+        """
+        Implementation of ^ operator - returns C{L{Or}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return Or( [ self, other ] )
+
+    def __rxor__(self, other ):
+        """
+        Implementation of ^ operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other ^ self
+
+    def __and__(self, other ):
+        """
+        Implementation of & operator - returns C{L{Each}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return Each( [ self, other ] )
+
+    def __rand__(self, other ):
+        """
+        Implementation of & operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other & self
+
+    def __invert__( self ):
+        """
+        Implementation of ~ operator - returns C{L{NotAny}}
+        """
+        return NotAny( self )
+
+    def __call__(self, name=None):
+        """
+        Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
+        
+        If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
+        passed as C{True}.
+           
+        If C{name} is omitted, same as calling C{L{copy}}.
+
+        Example::
+            # these are equivalent
+            userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
+            userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")             
+        """
+        if name is not None:
+            return self.setResultsName(name)
+        else:
+            return self.copy()
+
+    def suppress( self ):
+        """
+        Suppresses the output of this C{ParserElement}; useful to keep punctuation from
+        cluttering up returned output.
+        """
+        return Suppress( self )
+
+    def leaveWhitespace( self ):
+        """
+        Disables the skipping of whitespace before matching the characters in the
+        C{ParserElement}'s defined pattern.  This is normally only used internally by
+        the pyparsing module, but may be needed in some whitespace-sensitive grammars.
+        """
+        self.skipWhitespace = False
+        return self
+
+    def setWhitespaceChars( self, chars ):
+        """
+        Overrides the default whitespace chars
+        """
+        self.skipWhitespace = True
+        self.whiteChars = chars
+        self.copyDefaultWhiteChars = False
+        return self
+
+    def parseWithTabs( self ):
+        """
+        Overrides default behavior to expand C{}s to spaces before parsing the input string.
+        Must be called before C{parseString} when the input grammar contains elements that
+        match C{} characters.
+        """
+        self.keepTabs = True
+        return self
+
+    def ignore( self, other ):
+        """
+        Define expression to be ignored (e.g., comments) while doing pattern
+        matching; may be called repeatedly, to define multiple comment or other
+        ignorable patterns.
+        
+        Example::
+            patt = OneOrMore(Word(alphas))
+            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
+            
+            patt.ignore(cStyleComment)
+            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
+        """
+        if isinstance(other, basestring):
+            other = Suppress(other)
+
+        if isinstance( other, Suppress ):
+            if other not in self.ignoreExprs:
+                self.ignoreExprs.append(other)
+        else:
+            self.ignoreExprs.append( Suppress( other.copy() ) )
+        return self
+
+    def setDebugActions( self, startAction, successAction, exceptionAction ):
+        """
+        Enable display of debugging messages while doing pattern matching.
+        """
+        self.debugActions = (startAction or _defaultStartDebugAction,
+                             successAction or _defaultSuccessDebugAction,
+                             exceptionAction or _defaultExceptionDebugAction)
+        self.debug = True
+        return self
+
+    def setDebug( self, flag=True ):
+        """
+        Enable display of debugging messages while doing pattern matching.
+        Set C{flag} to True to enable, False to disable.
+
+        Example::
+            wd = Word(alphas).setName("alphaword")
+            integer = Word(nums).setName("numword")
+            term = wd | integer
+            
+            # turn on debugging for wd
+            wd.setDebug()
+
+            OneOrMore(term).parseString("abc 123 xyz 890")
+        
+        prints::
+            Match alphaword at loc 0(1,1)
+            Matched alphaword -> ['abc']
+            Match alphaword at loc 3(1,4)
+            Exception raised:Expected alphaword (at char 4), (line:1, col:5)
+            Match alphaword at loc 7(1,8)
+            Matched alphaword -> ['xyz']
+            Match alphaword at loc 11(1,12)
+            Exception raised:Expected alphaword (at char 12), (line:1, col:13)
+            Match alphaword at loc 15(1,16)
+            Exception raised:Expected alphaword (at char 15), (line:1, col:16)
+
+        The output shown is that produced by the default debug actions - custom debug actions can be
+        specified using L{setDebugActions}. Prior to attempting
+        to match the C{wd} expression, the debugging message C{"Match  at loc (,)"}
+        is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
+        message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
+        which makes debugging and exception messages easier to understand - for instance, the default
+        name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
+        """
+        if flag:
+            self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
+        else:
+            self.debug = False
+        return self
+
+    def __str__( self ):
+        return self.name
+
+    def __repr__( self ):
+        return _ustr(self)
+
+    def streamline( self ):
+        self.streamlined = True
+        self.strRepr = None
+        return self
+
+    def checkRecursion( self, parseElementList ):
+        pass
+
+    def validate( self, validateTrace=[] ):
+        """
+        Check defined expressions for valid structure, check for infinite recursive definitions.
+        """
+        self.checkRecursion( [] )
+
+    def parseFile( self, file_or_filename, parseAll=False ):
+        """
+        Execute the parse expression on the given file or filename.
+        If a filename is specified (instead of a file object),
+        the entire file is opened, read, and closed before parsing.
+        """
+        try:
+            file_contents = file_or_filename.read()
+        except AttributeError:
+            with open(file_or_filename, "r") as f:
+                file_contents = f.read()
+        try:
+            return self.parseString(file_contents, parseAll)
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def __eq__(self,other):
+        if isinstance(other, ParserElement):
+            return self is other or vars(self) == vars(other)
+        elif isinstance(other, basestring):
+            return self.matches(other)
+        else:
+            return super(ParserElement,self)==other
+
+    def __ne__(self,other):
+        return not (self == other)
+
+    def __hash__(self):
+        return hash(id(self))
+
+    def __req__(self,other):
+        return self == other
+
+    def __rne__(self,other):
+        return not (self == other)
+
+    def matches(self, testString, parseAll=True):
+        """
+        Method for quick testing of a parser against a test string. Good for simple 
+        inline microtests of sub expressions while building up larger parser.
+           
+        Parameters:
+         - testString - to test against this expression for a match
+         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
+            
+        Example::
+            expr = Word(nums)
+            assert expr.matches("100")
+        """
+        try:
+            self.parseString(_ustr(testString), parseAll=parseAll)
+            return True
+        except ParseBaseException:
+            return False
+                
+    def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
+        """
+        Execute the parse expression on a series of test strings, showing each
+        test, the parsed results or where the parse failed. Quick and easy way to
+        run a parse expression against a list of sample strings.
+           
+        Parameters:
+         - tests - a list of separate test strings, or a multiline string of test strings
+         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests           
+         - comment - (default=C{'#'}) - expression for indicating embedded comments in the test 
+              string; pass None to disable comment filtering
+         - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
+              if False, only dump nested list
+         - printResults - (default=C{True}) prints test output to stdout
+         - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
+
+        Returns: a (success, results) tuple, where success indicates that all tests succeeded
+        (or failed if C{failureTests} is True), and the results contain a list of lines of each 
+        test's output
+        
+        Example::
+            number_expr = pyparsing_common.number.copy()
+
+            result = number_expr.runTests('''
+                # unsigned integer
+                100
+                # negative integer
+                -100
+                # float with scientific notation
+                6.02e23
+                # integer with scientific notation
+                1e-12
+                ''')
+            print("Success" if result[0] else "Failed!")
+
+            result = number_expr.runTests('''
+                # stray character
+                100Z
+                # missing leading digit before '.'
+                -.100
+                # too many '.'
+                3.14.159
+                ''', failureTests=True)
+            print("Success" if result[0] else "Failed!")
+        prints::
+            # unsigned integer
+            100
+            [100]
+
+            # negative integer
+            -100
+            [-100]
+
+            # float with scientific notation
+            6.02e23
+            [6.02e+23]
+
+            # integer with scientific notation
+            1e-12
+            [1e-12]
+
+            Success
+            
+            # stray character
+            100Z
+               ^
+            FAIL: Expected end of text (at char 3), (line:1, col:4)
+
+            # missing leading digit before '.'
+            -.100
+            ^
+            FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
+
+            # too many '.'
+            3.14.159
+                ^
+            FAIL: Expected end of text (at char 4), (line:1, col:5)
+
+            Success
+
+        Each test string must be on a single line. If you want to test a string that spans multiple
+        lines, create a test like this::
+
+            expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
+        
+        (Note that this is a raw string literal, you must include the leading 'r'.)
+        """
+        if isinstance(tests, basestring):
+            tests = list(map(str.strip, tests.rstrip().splitlines()))
+        if isinstance(comment, basestring):
+            comment = Literal(comment)
+        allResults = []
+        comments = []
+        success = True
+        for t in tests:
+            if comment is not None and comment.matches(t, False) or comments and not t:
+                comments.append(t)
+                continue
+            if not t:
+                continue
+            out = ['\n'.join(comments), t]
+            comments = []
+            try:
+                t = t.replace(r'\n','\n')
+                result = self.parseString(t, parseAll=parseAll)
+                out.append(result.dump(full=fullDump))
+                success = success and not failureTests
+            except ParseBaseException as pe:
+                fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
+                if '\n' in t:
+                    out.append(line(pe.loc, t))
+                    out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
+                else:
+                    out.append(' '*pe.loc + '^' + fatal)
+                out.append("FAIL: " + str(pe))
+                success = success and failureTests
+                result = pe
+            except Exception as exc:
+                out.append("FAIL-EXCEPTION: " + str(exc))
+                success = success and failureTests
+                result = exc
+
+            if printResults:
+                if fullDump:
+                    out.append('')
+                print('\n'.join(out))
+
+            allResults.append((t, result))
+        
+        return success, allResults
+
+        
+class Token(ParserElement):
+    """
+    Abstract C{ParserElement} subclass, for defining atomic matching patterns.
+    """
+    def __init__( self ):
+        super(Token,self).__init__( savelist=False )
+
+
+class Empty(Token):
+    """
+    An empty token, will always match.
+    """
+    def __init__( self ):
+        super(Empty,self).__init__()
+        self.name = "Empty"
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+
+
+class NoMatch(Token):
+    """
+    A token that will never match.
+    """
+    def __init__( self ):
+        super(NoMatch,self).__init__()
+        self.name = "NoMatch"
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+        self.errmsg = "Unmatchable token"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Literal(Token):
+    """
+    Token to exactly match a specified string.
+    
+    Example::
+        Literal('blah').parseString('blah')  # -> ['blah']
+        Literal('blah').parseString('blahfooblah')  # -> ['blah']
+        Literal('blah').parseString('bla')  # -> Exception: Expected "blah"
+    
+    For case-insensitive matching, use L{CaselessLiteral}.
+    
+    For keyword matching (force word break before and after the matched string),
+    use L{Keyword} or L{CaselessKeyword}.
+    """
+    def __init__( self, matchString ):
+        super(Literal,self).__init__()
+        self.match = matchString
+        self.matchLen = len(matchString)
+        try:
+            self.firstMatchChar = matchString[0]
+        except IndexError:
+            warnings.warn("null string passed to Literal; use Empty() instead",
+                            SyntaxWarning, stacklevel=2)
+            self.__class__ = Empty
+        self.name = '"%s"' % _ustr(self.match)
+        self.errmsg = "Expected " + self.name
+        self.mayReturnEmpty = False
+        self.mayIndexError = False
+
+    # Performance tuning: this routine gets called a *lot*
+    # if this is a single character match string  and the first character matches,
+    # short-circuit as quickly as possible, and avoid calling startswith
+    #~ @profile
+    def parseImpl( self, instring, loc, doActions=True ):
+        if (instring[loc] == self.firstMatchChar and
+            (self.matchLen==1 or instring.startswith(self.match,loc)) ):
+            return loc+self.matchLen, self.match
+        raise ParseException(instring, loc, self.errmsg, self)
+_L = Literal
+ParserElement._literalStringClass = Literal
+
+class Keyword(Token):
+    """
+    Token to exactly match a specified string as a keyword, that is, it must be
+    immediately followed by a non-keyword character.  Compare with C{L{Literal}}:
+     - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
+     - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
+    Accepts two optional constructor arguments in addition to the keyword string:
+     - C{identChars} is a string of characters that would be valid identifier characters,
+          defaulting to all alphanumerics + "_" and "$"
+     - C{caseless} allows case-insensitive matching, default is C{False}.
+       
+    Example::
+        Keyword("start").parseString("start")  # -> ['start']
+        Keyword("start").parseString("starting")  # -> Exception
+
+    For case-insensitive matching, use L{CaselessKeyword}.
+    """
+    DEFAULT_KEYWORD_CHARS = alphanums+"_$"
+
+    def __init__( self, matchString, identChars=None, caseless=False ):
+        super(Keyword,self).__init__()
+        if identChars is None:
+            identChars = Keyword.DEFAULT_KEYWORD_CHARS
+        self.match = matchString
+        self.matchLen = len(matchString)
+        try:
+            self.firstMatchChar = matchString[0]
+        except IndexError:
+            warnings.warn("null string passed to Keyword; use Empty() instead",
+                            SyntaxWarning, stacklevel=2)
+        self.name = '"%s"' % self.match
+        self.errmsg = "Expected " + self.name
+        self.mayReturnEmpty = False
+        self.mayIndexError = False
+        self.caseless = caseless
+        if caseless:
+            self.caselessmatch = matchString.upper()
+            identChars = identChars.upper()
+        self.identChars = set(identChars)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.caseless:
+            if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
+                 (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
+                 (loc == 0 or instring[loc-1].upper() not in self.identChars) ):
+                return loc+self.matchLen, self.match
+        else:
+            if (instring[loc] == self.firstMatchChar and
+                (self.matchLen==1 or instring.startswith(self.match,loc)) and
+                (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
+                (loc == 0 or instring[loc-1] not in self.identChars) ):
+                return loc+self.matchLen, self.match
+        raise ParseException(instring, loc, self.errmsg, self)
+
+    def copy(self):
+        c = super(Keyword,self).copy()
+        c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
+        return c
+
+    @staticmethod
+    def setDefaultKeywordChars( chars ):
+        """Overrides the default Keyword chars
+        """
+        Keyword.DEFAULT_KEYWORD_CHARS = chars
+
+class CaselessLiteral(Literal):
+    """
+    Token to match a specified string, ignoring case of letters.
+    Note: the matched results will always be in the case of the given
+    match string, NOT the case of the input text.
+
+    Example::
+        OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
+        
+    (Contrast with example for L{CaselessKeyword}.)
+    """
+    def __init__( self, matchString ):
+        super(CaselessLiteral,self).__init__( matchString.upper() )
+        # Preserve the defining literal.
+        self.returnString = matchString
+        self.name = "'%s'" % self.returnString
+        self.errmsg = "Expected " + self.name
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if instring[ loc:loc+self.matchLen ].upper() == self.match:
+            return loc+self.matchLen, self.returnString
+        raise ParseException(instring, loc, self.errmsg, self)
+
+class CaselessKeyword(Keyword):
+    """
+    Caseless version of L{Keyword}.
+
+    Example::
+        OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
+        
+    (Contrast with example for L{CaselessLiteral}.)
+    """
+    def __init__( self, matchString, identChars=None ):
+        super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
+             (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
+            return loc+self.matchLen, self.match
+        raise ParseException(instring, loc, self.errmsg, self)
+
+class CloseMatch(Token):
+    """
+    A variation on L{Literal} which matches "close" matches, that is, 
+    strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
+     - C{match_string} - string to be matched
+     - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
+    
+    The results from a successful parse will contain the matched text from the input string and the following named results:
+     - C{mismatches} - a list of the positions within the match_string where mismatches were found
+     - C{original} - the original match_string used to compare against the input string
+    
+    If C{mismatches} is an empty list, then the match was an exact match.
+    
+    Example::
+        patt = CloseMatch("ATCATCGAATGGA")
+        patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
+        patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
+
+        # exact match
+        patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
+
+        # close match allowing up to 2 mismatches
+        patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
+        patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
+    """
+    def __init__(self, match_string, maxMismatches=1):
+        super(CloseMatch,self).__init__()
+        self.name = match_string
+        self.match_string = match_string
+        self.maxMismatches = maxMismatches
+        self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
+        self.mayIndexError = False
+        self.mayReturnEmpty = False
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        start = loc
+        instrlen = len(instring)
+        maxloc = start + len(self.match_string)
+
+        if maxloc <= instrlen:
+            match_string = self.match_string
+            match_stringloc = 0
+            mismatches = []
+            maxMismatches = self.maxMismatches
+
+            for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
+                src,mat = s_m
+                if src != mat:
+                    mismatches.append(match_stringloc)
+                    if len(mismatches) > maxMismatches:
+                        break
+            else:
+                loc = match_stringloc + 1
+                results = ParseResults([instring[start:loc]])
+                results['original'] = self.match_string
+                results['mismatches'] = mismatches
+                return loc, results
+
+        raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Word(Token):
+    """
+    Token for matching words composed of allowed character sets.
+    Defined with string containing all allowed initial characters,
+    an optional string containing allowed body characters (if omitted,
+    defaults to the initial character set), and an optional minimum,
+    maximum, and/or exact length.  The default value for C{min} is 1 (a
+    minimum value < 1 is not valid); the default values for C{max} and C{exact}
+    are 0, meaning no maximum or exact length restriction. An optional
+    C{excludeChars} parameter can list characters that might be found in 
+    the input C{bodyChars} string; useful to define a word of all printables
+    except for one or two characters, for instance.
+    
+    L{srange} is useful for defining custom character set strings for defining 
+    C{Word} expressions, using range notation from regular expression character sets.
+    
+    A common mistake is to use C{Word} to match a specific literal string, as in 
+    C{Word("Address")}. Remember that C{Word} uses the string argument to define
+    I{sets} of matchable characters. This expression would match "Add", "AAA",
+    "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
+    To match an exact literal string, use L{Literal} or L{Keyword}.
+
+    pyparsing includes helper strings for building Words:
+     - L{alphas}
+     - L{nums}
+     - L{alphanums}
+     - L{hexnums}
+     - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
+     - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
+     - L{printables} (any non-whitespace character)
+
+    Example::
+        # a word composed of digits
+        integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
+        
+        # a word with a leading capital, and zero or more lowercase
+        capital_word = Word(alphas.upper(), alphas.lower())
+
+        # hostnames are alphanumeric, with leading alpha, and '-'
+        hostname = Word(alphas, alphanums+'-')
+        
+        # roman numeral (not a strict parser, accepts invalid mix of characters)
+        roman = Word("IVXLCDM")
+        
+        # any string of non-whitespace characters, except for ','
+        csv_value = Word(printables, excludeChars=",")
+    """
+    def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
+        super(Word,self).__init__()
+        if excludeChars:
+            initChars = ''.join(c for c in initChars if c not in excludeChars)
+            if bodyChars:
+                bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
+        self.initCharsOrig = initChars
+        self.initChars = set(initChars)
+        if bodyChars :
+            self.bodyCharsOrig = bodyChars
+            self.bodyChars = set(bodyChars)
+        else:
+            self.bodyCharsOrig = initChars
+            self.bodyChars = set(initChars)
+
+        self.maxSpecified = max > 0
+
+        if min < 1:
+            raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
+
+        self.minLen = min
+
+        if max > 0:
+            self.maxLen = max
+        else:
+            self.maxLen = _MAX_INT
+
+        if exact > 0:
+            self.maxLen = exact
+            self.minLen = exact
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayIndexError = False
+        self.asKeyword = asKeyword
+
+        if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
+            if self.bodyCharsOrig == self.initCharsOrig:
+                self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
+            elif len(self.initCharsOrig) == 1:
+                self.reString = "%s[%s]*" % \
+                                      (re.escape(self.initCharsOrig),
+                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
+            else:
+                self.reString = "[%s][%s]*" % \
+                                      (_escapeRegexRangeChars(self.initCharsOrig),
+                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
+            if self.asKeyword:
+                self.reString = r"\b"+self.reString+r"\b"
+            try:
+                self.re = re.compile( self.reString )
+            except Exception:
+                self.re = None
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.re:
+            result = self.re.match(instring,loc)
+            if not result:
+                raise ParseException(instring, loc, self.errmsg, self)
+
+            loc = result.end()
+            return loc, result.group()
+
+        if not(instring[ loc ] in self.initChars):
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        start = loc
+        loc += 1
+        instrlen = len(instring)
+        bodychars = self.bodyChars
+        maxloc = start + self.maxLen
+        maxloc = min( maxloc, instrlen )
+        while loc < maxloc and instring[loc] in bodychars:
+            loc += 1
+
+        throwException = False
+        if loc - start < self.minLen:
+            throwException = True
+        if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
+            throwException = True
+        if self.asKeyword:
+            if (start>0 and instring[start-1] in bodychars) or (loc4:
+                    return s[:4]+"..."
+                else:
+                    return s
+
+            if ( self.initCharsOrig != self.bodyCharsOrig ):
+                self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
+            else:
+                self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
+
+        return self.strRepr
+
+
+class Regex(Token):
+    r"""
+    Token for matching strings that match a given regular expression.
+    Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
+    If the given regex contains named groups (defined using C{(?P...)}), these will be preserved as 
+    named parse results.
+
+    Example::
+        realnum = Regex(r"[+-]?\d+\.\d*")
+        date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)')
+        # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
+        roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
+    """
+    compiledREtype = type(re.compile("[A-Z]"))
+    def __init__( self, pattern, flags=0):
+        """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
+        super(Regex,self).__init__()
+
+        if isinstance(pattern, basestring):
+            if not pattern:
+                warnings.warn("null string passed to Regex; use Empty() instead",
+                        SyntaxWarning, stacklevel=2)
+
+            self.pattern = pattern
+            self.flags = flags
+
+            try:
+                self.re = re.compile(self.pattern, self.flags)
+                self.reString = self.pattern
+            except sre_constants.error:
+                warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
+                    SyntaxWarning, stacklevel=2)
+                raise
+
+        elif isinstance(pattern, Regex.compiledREtype):
+            self.re = pattern
+            self.pattern = \
+            self.reString = str(pattern)
+            self.flags = flags
+            
+        else:
+            raise ValueError("Regex may only be constructed with a string or a compiled RE object")
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayIndexError = False
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        result = self.re.match(instring,loc)
+        if not result:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        loc = result.end()
+        d = result.groupdict()
+        ret = ParseResults(result.group())
+        if d:
+            for k in d:
+                ret[k] = d[k]
+        return loc,ret
+
+    def __str__( self ):
+        try:
+            return super(Regex,self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None:
+            self.strRepr = "Re:(%s)" % repr(self.pattern)
+
+        return self.strRepr
+
+
+class QuotedString(Token):
+    r"""
+    Token for matching strings that are delimited by quoting characters.
+    
+    Defined with the following parameters:
+        - quoteChar - string of one or more characters defining the quote delimiting string
+        - escChar - character to escape quotes, typically backslash (default=C{None})
+        - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
+        - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
+        - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
+        - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
+        - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
+
+    Example::
+        qs = QuotedString('"')
+        print(qs.searchString('lsjdf "This is the quote" sldjf'))
+        complex_qs = QuotedString('{{', endQuoteChar='}}')
+        print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
+        sql_qs = QuotedString('"', escQuote='""')
+        print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
+    prints::
+        [['This is the quote']]
+        [['This is the "quote"']]
+        [['This is the quote with "embedded" quotes']]
+    """
+    def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
+        super(QuotedString,self).__init__()
+
+        # remove white space from quote chars - wont work anyway
+        quoteChar = quoteChar.strip()
+        if not quoteChar:
+            warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
+            raise SyntaxError()
+
+        if endQuoteChar is None:
+            endQuoteChar = quoteChar
+        else:
+            endQuoteChar = endQuoteChar.strip()
+            if not endQuoteChar:
+                warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
+                raise SyntaxError()
+
+        self.quoteChar = quoteChar
+        self.quoteCharLen = len(quoteChar)
+        self.firstQuoteChar = quoteChar[0]
+        self.endQuoteChar = endQuoteChar
+        self.endQuoteCharLen = len(endQuoteChar)
+        self.escChar = escChar
+        self.escQuote = escQuote
+        self.unquoteResults = unquoteResults
+        self.convertWhitespaceEscapes = convertWhitespaceEscapes
+
+        if multiline:
+            self.flags = re.MULTILINE | re.DOTALL
+            self.pattern = r'%s(?:[^%s%s]' % \
+                ( re.escape(self.quoteChar),
+                  _escapeRegexRangeChars(self.endQuoteChar[0]),
+                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
+        else:
+            self.flags = 0
+            self.pattern = r'%s(?:[^%s\n\r%s]' % \
+                ( re.escape(self.quoteChar),
+                  _escapeRegexRangeChars(self.endQuoteChar[0]),
+                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
+        if len(self.endQuoteChar) > 1:
+            self.pattern += (
+                '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
+                                               _escapeRegexRangeChars(self.endQuoteChar[i]))
+                                    for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
+                )
+        if escQuote:
+            self.pattern += (r'|(?:%s)' % re.escape(escQuote))
+        if escChar:
+            self.pattern += (r'|(?:%s.)' % re.escape(escChar))
+            self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
+        self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
+
+        try:
+            self.re = re.compile(self.pattern, self.flags)
+            self.reString = self.pattern
+        except sre_constants.error:
+            warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
+                SyntaxWarning, stacklevel=2)
+            raise
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayIndexError = False
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
+        if not result:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        loc = result.end()
+        ret = result.group()
+
+        if self.unquoteResults:
+
+            # strip off quotes
+            ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
+
+            if isinstance(ret,basestring):
+                # replace escaped whitespace
+                if '\\' in ret and self.convertWhitespaceEscapes:
+                    ws_map = {
+                        r'\t' : '\t',
+                        r'\n' : '\n',
+                        r'\f' : '\f',
+                        r'\r' : '\r',
+                    }
+                    for wslit,wschar in ws_map.items():
+                        ret = ret.replace(wslit, wschar)
+
+                # replace escaped characters
+                if self.escChar:
+                    ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
+
+                # replace escaped quotes
+                if self.escQuote:
+                    ret = ret.replace(self.escQuote, self.endQuoteChar)
+
+        return loc, ret
+
+    def __str__( self ):
+        try:
+            return super(QuotedString,self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None:
+            self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
+
+        return self.strRepr
+
+
+class CharsNotIn(Token):
+    """
+    Token for matching words composed of characters I{not} in a given set (will
+    include whitespace in matched characters if not listed in the provided exclusion set - see example).
+    Defined with string containing all disallowed characters, and an optional
+    minimum, maximum, and/or exact length.  The default value for C{min} is 1 (a
+    minimum value < 1 is not valid); the default values for C{max} and C{exact}
+    are 0, meaning no maximum or exact length restriction.
+
+    Example::
+        # define a comma-separated-value as anything that is not a ','
+        csv_value = CharsNotIn(',')
+        print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
+    prints::
+        ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
+    """
+    def __init__( self, notChars, min=1, max=0, exact=0 ):
+        super(CharsNotIn,self).__init__()
+        self.skipWhitespace = False
+        self.notChars = notChars
+
+        if min < 1:
+            raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
+
+        self.minLen = min
+
+        if max > 0:
+            self.maxLen = max
+        else:
+            self.maxLen = _MAX_INT
+
+        if exact > 0:
+            self.maxLen = exact
+            self.minLen = exact
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayReturnEmpty = ( self.minLen == 0 )
+        self.mayIndexError = False
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if instring[loc] in self.notChars:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        start = loc
+        loc += 1
+        notchars = self.notChars
+        maxlen = min( start+self.maxLen, len(instring) )
+        while loc < maxlen and \
+              (instring[loc] not in notchars):
+            loc += 1
+
+        if loc - start < self.minLen:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        return loc, instring[start:loc]
+
+    def __str__( self ):
+        try:
+            return super(CharsNotIn, self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None:
+            if len(self.notChars) > 4:
+                self.strRepr = "!W:(%s...)" % self.notChars[:4]
+            else:
+                self.strRepr = "!W:(%s)" % self.notChars
+
+        return self.strRepr
+
+class White(Token):
+    """
+    Special matching class for matching whitespace.  Normally, whitespace is ignored
+    by pyparsing grammars.  This class is included when some whitespace structures
+    are significant.  Define with a string containing the whitespace characters to be
+    matched; default is C{" \\t\\r\\n"}.  Also takes optional C{min}, C{max}, and C{exact} arguments,
+    as defined for the C{L{Word}} class.
+    """
+    whiteStrs = {
+        " " : "",
+        "\t": "",
+        "\n": "",
+        "\r": "",
+        "\f": "",
+        }
+    def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
+        super(White,self).__init__()
+        self.matchWhite = ws
+        self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
+        #~ self.leaveWhitespace()
+        self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
+        self.mayReturnEmpty = True
+        self.errmsg = "Expected " + self.name
+
+        self.minLen = min
+
+        if max > 0:
+            self.maxLen = max
+        else:
+            self.maxLen = _MAX_INT
+
+        if exact > 0:
+            self.maxLen = exact
+            self.minLen = exact
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if not(instring[ loc ] in self.matchWhite):
+            raise ParseException(instring, loc, self.errmsg, self)
+        start = loc
+        loc += 1
+        maxloc = start + self.maxLen
+        maxloc = min( maxloc, len(instring) )
+        while loc < maxloc and instring[loc] in self.matchWhite:
+            loc += 1
+
+        if loc - start < self.minLen:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        return loc, instring[start:loc]
+
+
+class _PositionToken(Token):
+    def __init__( self ):
+        super(_PositionToken,self).__init__()
+        self.name=self.__class__.__name__
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+
+class GoToColumn(_PositionToken):
+    """
+    Token to advance to a specific column of input text; useful for tabular report scraping.
+    """
+    def __init__( self, colno ):
+        super(GoToColumn,self).__init__()
+        self.col = colno
+
+    def preParse( self, instring, loc ):
+        if col(loc,instring) != self.col:
+            instrlen = len(instring)
+            if self.ignoreExprs:
+                loc = self._skipIgnorables( instring, loc )
+            while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
+                loc += 1
+        return loc
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        thiscol = col( loc, instring )
+        if thiscol > self.col:
+            raise ParseException( instring, loc, "Text not in expected column", self )
+        newloc = loc + self.col - thiscol
+        ret = instring[ loc: newloc ]
+        return newloc, ret
+
+
+class LineStart(_PositionToken):
+    """
+    Matches if current position is at the beginning of a line within the parse string
+    
+    Example::
+    
+        test = '''\
+        AAA this line
+        AAA and this line
+          AAA but not this one
+        B AAA and definitely not this one
+        '''
+
+        for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
+            print(t)
+    
+    Prints::
+        ['AAA', ' this line']
+        ['AAA', ' and this line']    
+
+    """
+    def __init__( self ):
+        super(LineStart,self).__init__()
+        self.errmsg = "Expected start of line"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if col(loc, instring) == 1:
+            return loc, []
+        raise ParseException(instring, loc, self.errmsg, self)
+
+class LineEnd(_PositionToken):
+    """
+    Matches if current position is at the end of a line within the parse string
+    """
+    def __init__( self ):
+        super(LineEnd,self).__init__()
+        self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
+        self.errmsg = "Expected end of line"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if loc len(instring):
+            return loc, []
+        else:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+class WordStart(_PositionToken):
+    """
+    Matches if the current position is at the beginning of a Word, and
+    is not preceded by any character in a given set of C{wordChars}
+    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
+    use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
+    the string being parsed, or at the beginning of a line.
+    """
+    def __init__(self, wordChars = printables):
+        super(WordStart,self).__init__()
+        self.wordChars = set(wordChars)
+        self.errmsg = "Not at the start of a word"
+
+    def parseImpl(self, instring, loc, doActions=True ):
+        if loc != 0:
+            if (instring[loc-1] in self.wordChars or
+                instring[loc] not in self.wordChars):
+                raise ParseException(instring, loc, self.errmsg, self)
+        return loc, []
+
+class WordEnd(_PositionToken):
+    """
+    Matches if the current position is at the end of a Word, and
+    is not followed by any character in a given set of C{wordChars}
+    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
+    use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
+    the string being parsed, or at the end of a line.
+    """
+    def __init__(self, wordChars = printables):
+        super(WordEnd,self).__init__()
+        self.wordChars = set(wordChars)
+        self.skipWhitespace = False
+        self.errmsg = "Not at the end of a word"
+
+    def parseImpl(self, instring, loc, doActions=True ):
+        instrlen = len(instring)
+        if instrlen>0 and loc maxExcLoc:
+                    maxException = err
+                    maxExcLoc = err.loc
+            except IndexError:
+                if len(instring) > maxExcLoc:
+                    maxException = ParseException(instring,len(instring),e.errmsg,self)
+                    maxExcLoc = len(instring)
+            else:
+                # save match among all matches, to retry longest to shortest
+                matches.append((loc2, e))
+
+        if matches:
+            matches.sort(key=lambda x: -x[0])
+            for _,e in matches:
+                try:
+                    return e._parse( instring, loc, doActions )
+                except ParseException as err:
+                    err.__traceback__ = None
+                    if err.loc > maxExcLoc:
+                        maxException = err
+                        maxExcLoc = err.loc
+
+        if maxException is not None:
+            maxException.msg = self.errmsg
+            raise maxException
+        else:
+            raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+
+    def __ixor__(self, other ):
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        return self.append( other ) #Or( [ self, other ] )
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
+
+        return self.strRepr
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+
+
+class MatchFirst(ParseExpression):
+    """
+    Requires that at least one C{ParseExpression} is found.
+    If two expressions match, the first one listed is the one that will match.
+    May be constructed using the C{'|'} operator.
+
+    Example::
+        # construct MatchFirst using '|' operator
+        
+        # watch the order of expressions to match
+        number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
+        print(number.searchString("123 3.1416 789")) #  Fail! -> [['123'], ['3'], ['1416'], ['789']]
+
+        # put more selective expression first
+        number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
+        print(number.searchString("123 3.1416 789")) #  Better -> [['123'], ['3.1416'], ['789']]
+    """
+    def __init__( self, exprs, savelist = False ):
+        super(MatchFirst,self).__init__(exprs, savelist)
+        if self.exprs:
+            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+        else:
+            self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        maxExcLoc = -1
+        maxException = None
+        for e in self.exprs:
+            try:
+                ret = e._parse( instring, loc, doActions )
+                return ret
+            except ParseException as err:
+                if err.loc > maxExcLoc:
+                    maxException = err
+                    maxExcLoc = err.loc
+            except IndexError:
+                if len(instring) > maxExcLoc:
+                    maxException = ParseException(instring,len(instring),e.errmsg,self)
+                    maxExcLoc = len(instring)
+
+        # only got here if no expression matched, raise exception for match that made it the furthest
+        else:
+            if maxException is not None:
+                maxException.msg = self.errmsg
+                raise maxException
+            else:
+                raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+    def __ior__(self, other ):
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        return self.append( other ) #MatchFirst( [ self, other ] )
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
+
+        return self.strRepr
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+
+
+class Each(ParseExpression):
+    """
+    Requires all given C{ParseExpression}s to be found, but in any order.
+    Expressions may be separated by whitespace.
+    May be constructed using the C{'&'} operator.
+
+    Example::
+        color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
+        shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
+        integer = Word(nums)
+        shape_attr = "shape:" + shape_type("shape")
+        posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
+        color_attr = "color:" + color("color")
+        size_attr = "size:" + integer("size")
+
+        # use Each (using operator '&') to accept attributes in any order 
+        # (shape and posn are required, color and size are optional)
+        shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
+
+        shape_spec.runTests('''
+            shape: SQUARE color: BLACK posn: 100, 120
+            shape: CIRCLE size: 50 color: BLUE posn: 50,80
+            color:GREEN size:20 shape:TRIANGLE posn:20,40
+            '''
+            )
+    prints::
+        shape: SQUARE color: BLACK posn: 100, 120
+        ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
+        - color: BLACK
+        - posn: ['100', ',', '120']
+          - x: 100
+          - y: 120
+        - shape: SQUARE
+
+
+        shape: CIRCLE size: 50 color: BLUE posn: 50,80
+        ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
+        - color: BLUE
+        - posn: ['50', ',', '80']
+          - x: 50
+          - y: 80
+        - shape: CIRCLE
+        - size: 50
+
+
+        color: GREEN size: 20 shape: TRIANGLE posn: 20,40
+        ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
+        - color: GREEN
+        - posn: ['20', ',', '40']
+          - x: 20
+          - y: 40
+        - shape: TRIANGLE
+        - size: 20
+    """
+    def __init__( self, exprs, savelist = True ):
+        super(Each,self).__init__(exprs, savelist)
+        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+        self.skipWhitespace = True
+        self.initExprGroups = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.initExprGroups:
+            self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
+            opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
+            opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
+            self.optionals = opt1 + opt2
+            self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
+            self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
+            self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
+            self.required += self.multirequired
+            self.initExprGroups = False
+        tmpLoc = loc
+        tmpReqd = self.required[:]
+        tmpOpt  = self.optionals[:]
+        matchOrder = []
+
+        keepMatching = True
+        while keepMatching:
+            tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
+            failed = []
+            for e in tmpExprs:
+                try:
+                    tmpLoc = e.tryParse( instring, tmpLoc )
+                except ParseException:
+                    failed.append(e)
+                else:
+                    matchOrder.append(self.opt1map.get(id(e),e))
+                    if e in tmpReqd:
+                        tmpReqd.remove(e)
+                    elif e in tmpOpt:
+                        tmpOpt.remove(e)
+            if len(failed) == len(tmpExprs):
+                keepMatching = False
+
+        if tmpReqd:
+            missing = ", ".join(_ustr(e) for e in tmpReqd)
+            raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
+
+        # add any unmatched Optionals, in case they have default values defined
+        matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
+
+        resultlist = []
+        for e in matchOrder:
+            loc,results = e._parse(instring,loc,doActions)
+            resultlist.append(results)
+
+        finalResults = sum(resultlist, ParseResults([]))
+        return loc, finalResults
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
+
+        return self.strRepr
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+
+
+class ParseElementEnhance(ParserElement):
+    """
+    Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
+    """
+    def __init__( self, expr, savelist=False ):
+        super(ParseElementEnhance,self).__init__(savelist)
+        if isinstance( expr, basestring ):
+            if issubclass(ParserElement._literalStringClass, Token):
+                expr = ParserElement._literalStringClass(expr)
+            else:
+                expr = ParserElement._literalStringClass(Literal(expr))
+        self.expr = expr
+        self.strRepr = None
+        if expr is not None:
+            self.mayIndexError = expr.mayIndexError
+            self.mayReturnEmpty = expr.mayReturnEmpty
+            self.setWhitespaceChars( expr.whiteChars )
+            self.skipWhitespace = expr.skipWhitespace
+            self.saveAsList = expr.saveAsList
+            self.callPreparse = expr.callPreparse
+            self.ignoreExprs.extend(expr.ignoreExprs)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.expr is not None:
+            return self.expr._parse( instring, loc, doActions, callPreParse=False )
+        else:
+            raise ParseException("",loc,self.errmsg,self)
+
+    def leaveWhitespace( self ):
+        self.skipWhitespace = False
+        self.expr = self.expr.copy()
+        if self.expr is not None:
+            self.expr.leaveWhitespace()
+        return self
+
+    def ignore( self, other ):
+        if isinstance( other, Suppress ):
+            if other not in self.ignoreExprs:
+                super( ParseElementEnhance, self).ignore( other )
+                if self.expr is not None:
+                    self.expr.ignore( self.ignoreExprs[-1] )
+        else:
+            super( ParseElementEnhance, self).ignore( other )
+            if self.expr is not None:
+                self.expr.ignore( self.ignoreExprs[-1] )
+        return self
+
+    def streamline( self ):
+        super(ParseElementEnhance,self).streamline()
+        if self.expr is not None:
+            self.expr.streamline()
+        return self
+
+    def checkRecursion( self, parseElementList ):
+        if self in parseElementList:
+            raise RecursiveGrammarException( parseElementList+[self] )
+        subRecCheckList = parseElementList[:] + [ self ]
+        if self.expr is not None:
+            self.expr.checkRecursion( subRecCheckList )
+
+    def validate( self, validateTrace=[] ):
+        tmp = validateTrace[:]+[self]
+        if self.expr is not None:
+            self.expr.validate(tmp)
+        self.checkRecursion( [] )
+
+    def __str__( self ):
+        try:
+            return super(ParseElementEnhance,self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None and self.expr is not None:
+            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
+        return self.strRepr
+
+
+class FollowedBy(ParseElementEnhance):
+    """
+    Lookahead matching of the given parse expression.  C{FollowedBy}
+    does I{not} advance the parsing position within the input string, it only
+    verifies that the specified parse expression matches at the current
+    position.  C{FollowedBy} always returns a null token list.
+
+    Example::
+        # use FollowedBy to match a label only if it is followed by a ':'
+        data_word = Word(alphas)
+        label = data_word + FollowedBy(':')
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+        
+        OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
+    prints::
+        [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
+    """
+    def __init__( self, expr ):
+        super(FollowedBy,self).__init__(expr)
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        self.expr.tryParse( instring, loc )
+        return loc, []
+
+
+class NotAny(ParseElementEnhance):
+    """
+    Lookahead to disallow matching with the given parse expression.  C{NotAny}
+    does I{not} advance the parsing position within the input string, it only
+    verifies that the specified parse expression does I{not} match at the current
+    position.  Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
+    always returns a null token list.  May be constructed using the '~' operator.
+
+    Example::
+        
+    """
+    def __init__( self, expr ):
+        super(NotAny,self).__init__(expr)
+        #~ self.leaveWhitespace()
+        self.skipWhitespace = False  # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
+        self.mayReturnEmpty = True
+        self.errmsg = "Found unwanted token, "+_ustr(self.expr)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.expr.canParseNext(instring, loc):
+            raise ParseException(instring, loc, self.errmsg, self)
+        return loc, []
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "~{" + _ustr(self.expr) + "}"
+
+        return self.strRepr
+
+class _MultipleMatch(ParseElementEnhance):
+    def __init__( self, expr, stopOn=None):
+        super(_MultipleMatch, self).__init__(expr)
+        self.saveAsList = True
+        ender = stopOn
+        if isinstance(ender, basestring):
+            ender = ParserElement._literalStringClass(ender)
+        self.not_ender = ~ender if ender is not None else None
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        self_expr_parse = self.expr._parse
+        self_skip_ignorables = self._skipIgnorables
+        check_ender = self.not_ender is not None
+        if check_ender:
+            try_not_ender = self.not_ender.tryParse
+        
+        # must be at least one (but first see if we are the stopOn sentinel;
+        # if so, fail)
+        if check_ender:
+            try_not_ender(instring, loc)
+        loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
+        try:
+            hasIgnoreExprs = (not not self.ignoreExprs)
+            while 1:
+                if check_ender:
+                    try_not_ender(instring, loc)
+                if hasIgnoreExprs:
+                    preloc = self_skip_ignorables( instring, loc )
+                else:
+                    preloc = loc
+                loc, tmptokens = self_expr_parse( instring, preloc, doActions )
+                if tmptokens or tmptokens.haskeys():
+                    tokens += tmptokens
+        except (ParseException,IndexError):
+            pass
+
+        return loc, tokens
+        
+class OneOrMore(_MultipleMatch):
+    """
+    Repetition of one or more of the given expression.
+    
+    Parameters:
+     - expr - expression that must match one or more times
+     - stopOn - (default=C{None}) - expression for a terminating sentinel
+          (only required if the sentinel would ordinarily match the repetition 
+          expression)          
+
+    Example::
+        data_word = Word(alphas)
+        label = data_word + FollowedBy(':')
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
+
+        text = "shape: SQUARE posn: upper left color: BLACK"
+        OneOrMore(attr_expr).parseString(text).pprint()  # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
+
+        # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+        OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
+        
+        # could also be written as
+        (attr_expr * (1,)).parseString(text).pprint()
+    """
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + _ustr(self.expr) + "}..."
+
+        return self.strRepr
+
+class ZeroOrMore(_MultipleMatch):
+    """
+    Optional repetition of zero or more of the given expression.
+    
+    Parameters:
+     - expr - expression that must match zero or more times
+     - stopOn - (default=C{None}) - expression for a terminating sentinel
+          (only required if the sentinel would ordinarily match the repetition 
+          expression)          
+
+    Example: similar to L{OneOrMore}
+    """
+    def __init__( self, expr, stopOn=None):
+        super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
+        self.mayReturnEmpty = True
+        
+    def parseImpl( self, instring, loc, doActions=True ):
+        try:
+            return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
+        except (ParseException,IndexError):
+            return loc, []
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "[" + _ustr(self.expr) + "]..."
+
+        return self.strRepr
+
+class _NullToken(object):
+    def __bool__(self):
+        return False
+    __nonzero__ = __bool__
+    def __str__(self):
+        return ""
+
+_optionalNotMatched = _NullToken()
+class Optional(ParseElementEnhance):
+    """
+    Optional matching of the given expression.
+
+    Parameters:
+     - expr - expression that must match zero or more times
+     - default (optional) - value to be returned if the optional expression is not found.
+
+    Example::
+        # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
+        zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
+        zip.runTests('''
+            # traditional ZIP code
+            12345
+            
+            # ZIP+4 form
+            12101-0001
+            
+            # invalid ZIP
+            98765-
+            ''')
+    prints::
+        # traditional ZIP code
+        12345
+        ['12345']
+
+        # ZIP+4 form
+        12101-0001
+        ['12101-0001']
+
+        # invalid ZIP
+        98765-
+             ^
+        FAIL: Expected end of text (at char 5), (line:1, col:6)
+    """
+    def __init__( self, expr, default=_optionalNotMatched ):
+        super(Optional,self).__init__( expr, savelist=False )
+        self.saveAsList = self.expr.saveAsList
+        self.defaultValue = default
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        try:
+            loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
+        except (ParseException,IndexError):
+            if self.defaultValue is not _optionalNotMatched:
+                if self.expr.resultsName:
+                    tokens = ParseResults([ self.defaultValue ])
+                    tokens[self.expr.resultsName] = self.defaultValue
+                else:
+                    tokens = [ self.defaultValue ]
+            else:
+                tokens = []
+        return loc, tokens
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "[" + _ustr(self.expr) + "]"
+
+        return self.strRepr
+
+class SkipTo(ParseElementEnhance):
+    """
+    Token for skipping over all undefined text until the matched expression is found.
+
+    Parameters:
+     - expr - target expression marking the end of the data to be skipped
+     - include - (default=C{False}) if True, the target expression is also parsed 
+          (the skipped text and target expression are returned as a 2-element list).
+     - ignore - (default=C{None}) used to define grammars (typically quoted strings and 
+          comments) that might contain false matches to the target expression
+     - failOn - (default=C{None}) define expressions that are not allowed to be 
+          included in the skipped test; if found before the target expression is found, 
+          the SkipTo is not a match
+
+    Example::
+        report = '''
+            Outstanding Issues Report - 1 Jan 2000
+
+               # | Severity | Description                               |  Days Open
+            -----+----------+-------------------------------------------+-----------
+             101 | Critical | Intermittent system crash                 |          6
+              94 | Cosmetic | Spelling error on Login ('log|n')         |         14
+              79 | Minor    | System slow when running too many reports |         47
+            '''
+        integer = Word(nums)
+        SEP = Suppress('|')
+        # use SkipTo to simply match everything up until the next SEP
+        # - ignore quoted strings, so that a '|' character inside a quoted string does not match
+        # - parse action will call token.strip() for each matched token, i.e., the description body
+        string_data = SkipTo(SEP, ignore=quotedString)
+        string_data.setParseAction(tokenMap(str.strip))
+        ticket_expr = (integer("issue_num") + SEP 
+                      + string_data("sev") + SEP 
+                      + string_data("desc") + SEP 
+                      + integer("days_open"))
+        
+        for tkt in ticket_expr.searchString(report):
+            print tkt.dump()
+    prints::
+        ['101', 'Critical', 'Intermittent system crash', '6']
+        - days_open: 6
+        - desc: Intermittent system crash
+        - issue_num: 101
+        - sev: Critical
+        ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
+        - days_open: 14
+        - desc: Spelling error on Login ('log|n')
+        - issue_num: 94
+        - sev: Cosmetic
+        ['79', 'Minor', 'System slow when running too many reports', '47']
+        - days_open: 47
+        - desc: System slow when running too many reports
+        - issue_num: 79
+        - sev: Minor
+    """
+    def __init__( self, other, include=False, ignore=None, failOn=None ):
+        super( SkipTo, self ).__init__( other )
+        self.ignoreExpr = ignore
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+        self.includeMatch = include
+        self.asList = False
+        if isinstance(failOn, basestring):
+            self.failOn = ParserElement._literalStringClass(failOn)
+        else:
+            self.failOn = failOn
+        self.errmsg = "No match found for "+_ustr(self.expr)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        startloc = loc
+        instrlen = len(instring)
+        expr = self.expr
+        expr_parse = self.expr._parse
+        self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
+        self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
+        
+        tmploc = loc
+        while tmploc <= instrlen:
+            if self_failOn_canParseNext is not None:
+                # break if failOn expression matches
+                if self_failOn_canParseNext(instring, tmploc):
+                    break
+                    
+            if self_ignoreExpr_tryParse is not None:
+                # advance past ignore expressions
+                while 1:
+                    try:
+                        tmploc = self_ignoreExpr_tryParse(instring, tmploc)
+                    except ParseBaseException:
+                        break
+            
+            try:
+                expr_parse(instring, tmploc, doActions=False, callPreParse=False)
+            except (ParseException, IndexError):
+                # no match, advance loc in string
+                tmploc += 1
+            else:
+                # matched skipto expr, done
+                break
+
+        else:
+            # ran off the end of the input string without matching skipto expr, fail
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        # build up return values
+        loc = tmploc
+        skiptext = instring[startloc:loc]
+        skipresult = ParseResults(skiptext)
+        
+        if self.includeMatch:
+            loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
+            skipresult += mat
+
+        return loc, skipresult
+
+class Forward(ParseElementEnhance):
+    """
+    Forward declaration of an expression to be defined later -
+    used for recursive grammars, such as algebraic infix notation.
+    When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
+
+    Note: take care when assigning to C{Forward} not to overlook precedence of operators.
+    Specifically, '|' has a lower precedence than '<<', so that::
+        fwdExpr << a | b | c
+    will actually be evaluated as::
+        (fwdExpr << a) | b | c
+    thereby leaving b and c out as parseable alternatives.  It is recommended that you
+    explicitly group the values inserted into the C{Forward}::
+        fwdExpr << (a | b | c)
+    Converting to use the '<<=' operator instead will avoid this problem.
+
+    See L{ParseResults.pprint} for an example of a recursive parser created using
+    C{Forward}.
+    """
+    def __init__( self, other=None ):
+        super(Forward,self).__init__( other, savelist=False )
+
+    def __lshift__( self, other ):
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass(other)
+        self.expr = other
+        self.strRepr = None
+        self.mayIndexError = self.expr.mayIndexError
+        self.mayReturnEmpty = self.expr.mayReturnEmpty
+        self.setWhitespaceChars( self.expr.whiteChars )
+        self.skipWhitespace = self.expr.skipWhitespace
+        self.saveAsList = self.expr.saveAsList
+        self.ignoreExprs.extend(self.expr.ignoreExprs)
+        return self
+        
+    def __ilshift__(self, other):
+        return self << other
+    
+    def leaveWhitespace( self ):
+        self.skipWhitespace = False
+        return self
+
+    def streamline( self ):
+        if not self.streamlined:
+            self.streamlined = True
+            if self.expr is not None:
+                self.expr.streamline()
+        return self
+
+    def validate( self, validateTrace=[] ):
+        if self not in validateTrace:
+            tmp = validateTrace[:]+[self]
+            if self.expr is not None:
+                self.expr.validate(tmp)
+        self.checkRecursion([])
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+        return self.__class__.__name__ + ": ..."
+
+        # stubbed out for now - creates awful memory and perf issues
+        self._revertClass = self.__class__
+        self.__class__ = _ForwardNoRecurse
+        try:
+            if self.expr is not None:
+                retString = _ustr(self.expr)
+            else:
+                retString = "None"
+        finally:
+            self.__class__ = self._revertClass
+        return self.__class__.__name__ + ": " + retString
+
+    def copy(self):
+        if self.expr is not None:
+            return super(Forward,self).copy()
+        else:
+            ret = Forward()
+            ret <<= self
+            return ret
+
+class _ForwardNoRecurse(Forward):
+    def __str__( self ):
+        return "..."
+
+class TokenConverter(ParseElementEnhance):
+    """
+    Abstract subclass of C{ParseExpression}, for converting parsed results.
+    """
+    def __init__( self, expr, savelist=False ):
+        super(TokenConverter,self).__init__( expr )#, savelist )
+        self.saveAsList = False
+
+class Combine(TokenConverter):
+    """
+    Converter to concatenate all matching tokens to a single string.
+    By default, the matching patterns must also be contiguous in the input string;
+    this can be disabled by specifying C{'adjacent=False'} in the constructor.
+
+    Example::
+        real = Word(nums) + '.' + Word(nums)
+        print(real.parseString('3.1416')) # -> ['3', '.', '1416']
+        # will also erroneously match the following
+        print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
+
+        real = Combine(Word(nums) + '.' + Word(nums))
+        print(real.parseString('3.1416')) # -> ['3.1416']
+        # no match when there are internal spaces
+        print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
+    """
+    def __init__( self, expr, joinString="", adjacent=True ):
+        super(Combine,self).__init__( expr )
+        # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
+        if adjacent:
+            self.leaveWhitespace()
+        self.adjacent = adjacent
+        self.skipWhitespace = True
+        self.joinString = joinString
+        self.callPreparse = True
+
+    def ignore( self, other ):
+        if self.adjacent:
+            ParserElement.ignore(self, other)
+        else:
+            super( Combine, self).ignore( other )
+        return self
+
+    def postParse( self, instring, loc, tokenlist ):
+        retToks = tokenlist.copy()
+        del retToks[:]
+        retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
+
+        if self.resultsName and retToks.haskeys():
+            return [ retToks ]
+        else:
+            return retToks
+
+class Group(TokenConverter):
+    """
+    Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
+
+    Example::
+        ident = Word(alphas)
+        num = Word(nums)
+        term = ident | num
+        func = ident + Optional(delimitedList(term))
+        print(func.parseString("fn a,b,100"))  # -> ['fn', 'a', 'b', '100']
+
+        func = ident + Group(Optional(delimitedList(term)))
+        print(func.parseString("fn a,b,100"))  # -> ['fn', ['a', 'b', '100']]
+    """
+    def __init__( self, expr ):
+        super(Group,self).__init__( expr )
+        self.saveAsList = True
+
+    def postParse( self, instring, loc, tokenlist ):
+        return [ tokenlist ]
+
+class Dict(TokenConverter):
+    """
+    Converter to return a repetitive expression as a list, but also as a dictionary.
+    Each element can also be referenced using the first token in the expression as its key.
+    Useful for tabular report scraping when the first column can be used as a item key.
+
+    Example::
+        data_word = Word(alphas)
+        label = data_word + FollowedBy(':')
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
+
+        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+        
+        # print attributes as plain groups
+        print(OneOrMore(attr_expr).parseString(text).dump())
+        
+        # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
+        result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
+        print(result.dump())
+        
+        # access named fields as dict entries, or output as dict
+        print(result['shape'])        
+        print(result.asDict())
+    prints::
+        ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
+
+        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+        - color: light blue
+        - posn: upper left
+        - shape: SQUARE
+        - texture: burlap
+        SQUARE
+        {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
+    See more examples at L{ParseResults} of accessing fields by results name.
+    """
+    def __init__( self, expr ):
+        super(Dict,self).__init__( expr )
+        self.saveAsList = True
+
+    def postParse( self, instring, loc, tokenlist ):
+        for i,tok in enumerate(tokenlist):
+            if len(tok) == 0:
+                continue
+            ikey = tok[0]
+            if isinstance(ikey,int):
+                ikey = _ustr(tok[0]).strip()
+            if len(tok)==1:
+                tokenlist[ikey] = _ParseResultsWithOffset("",i)
+            elif len(tok)==2 and not isinstance(tok[1],ParseResults):
+                tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
+            else:
+                dictvalue = tok.copy() #ParseResults(i)
+                del dictvalue[0]
+                if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
+                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
+                else:
+                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
+
+        if self.resultsName:
+            return [ tokenlist ]
+        else:
+            return tokenlist
+
+
+class Suppress(TokenConverter):
+    """
+    Converter for ignoring the results of a parsed expression.
+
+    Example::
+        source = "a, b, c,d"
+        wd = Word(alphas)
+        wd_list1 = wd + ZeroOrMore(',' + wd)
+        print(wd_list1.parseString(source))
+
+        # often, delimiters that are useful during parsing are just in the
+        # way afterward - use Suppress to keep them out of the parsed output
+        wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
+        print(wd_list2.parseString(source))
+    prints::
+        ['a', ',', 'b', ',', 'c', ',', 'd']
+        ['a', 'b', 'c', 'd']
+    (See also L{delimitedList}.)
+    """
+    def postParse( self, instring, loc, tokenlist ):
+        return []
+
+    def suppress( self ):
+        return self
+
+
+class OnlyOnce(object):
+    """
+    Wrapper for parse actions, to ensure they are only called once.
+    """
+    def __init__(self, methodCall):
+        self.callable = _trim_arity(methodCall)
+        self.called = False
+    def __call__(self,s,l,t):
+        if not self.called:
+            results = self.callable(s,l,t)
+            self.called = True
+            return results
+        raise ParseException(s,l,"")
+    def reset(self):
+        self.called = False
+
+def traceParseAction(f):
+    """
+    Decorator for debugging parse actions. 
+    
+    When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
+    When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
+
+    Example::
+        wd = Word(alphas)
+
+        @traceParseAction
+        def remove_duplicate_chars(tokens):
+            return ''.join(sorted(set(''.join(tokens)))
+
+        wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
+        print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
+    prints::
+        >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
+        <3:
+            thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
+        sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
+        try:
+            ret = f(*paArgs)
+        except Exception as exc:
+            sys.stderr.write( "< ['aa', 'bb', 'cc']
+        delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
+    """
+    dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
+    if combine:
+        return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
+    else:
+        return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
+
+def countedArray( expr, intExpr=None ):
+    """
+    Helper to define a counted list of expressions.
+    This helper defines a pattern of the form::
+        integer expr expr expr...
+    where the leading integer tells how many expr expressions follow.
+    The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
+    
+    If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
+
+    Example::
+        countedArray(Word(alphas)).parseString('2 ab cd ef')  # -> ['ab', 'cd']
+
+        # in this parser, the leading integer value is given in binary,
+        # '10' indicating that 2 values are in the array
+        binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
+        countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef')  # -> ['ab', 'cd']
+    """
+    arrayExpr = Forward()
+    def countFieldParseAction(s,l,t):
+        n = t[0]
+        arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
+        return []
+    if intExpr is None:
+        intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
+    else:
+        intExpr = intExpr.copy()
+    intExpr.setName("arrayLen")
+    intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
+    return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
+
+def _flatten(L):
+    ret = []
+    for i in L:
+        if isinstance(i,list):
+            ret.extend(_flatten(i))
+        else:
+            ret.append(i)
+    return ret
+
+def matchPreviousLiteral(expr):
+    """
+    Helper to define an expression that is indirectly defined from
+    the tokens matched in a previous expression, that is, it looks
+    for a 'repeat' of a previous expression.  For example::
+        first = Word(nums)
+        second = matchPreviousLiteral(first)
+        matchExpr = first + ":" + second
+    will match C{"1:1"}, but not C{"1:2"}.  Because this matches a
+    previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
+    If this is not desired, use C{matchPreviousExpr}.
+    Do I{not} use with packrat parsing enabled.
+    """
+    rep = Forward()
+    def copyTokenToRepeater(s,l,t):
+        if t:
+            if len(t) == 1:
+                rep << t[0]
+            else:
+                # flatten t tokens
+                tflat = _flatten(t.asList())
+                rep << And(Literal(tt) for tt in tflat)
+        else:
+            rep << Empty()
+    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+    rep.setName('(prev) ' + _ustr(expr))
+    return rep
+
+def matchPreviousExpr(expr):
+    """
+    Helper to define an expression that is indirectly defined from
+    the tokens matched in a previous expression, that is, it looks
+    for a 'repeat' of a previous expression.  For example::
+        first = Word(nums)
+        second = matchPreviousExpr(first)
+        matchExpr = first + ":" + second
+    will match C{"1:1"}, but not C{"1:2"}.  Because this matches by
+    expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
+    the expressions are evaluated first, and then compared, so
+    C{"1"} is compared with C{"10"}.
+    Do I{not} use with packrat parsing enabled.
+    """
+    rep = Forward()
+    e2 = expr.copy()
+    rep <<= e2
+    def copyTokenToRepeater(s,l,t):
+        matchTokens = _flatten(t.asList())
+        def mustMatchTheseTokens(s,l,t):
+            theseTokens = _flatten(t.asList())
+            if  theseTokens != matchTokens:
+                raise ParseException("",0,"")
+        rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
+    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+    rep.setName('(prev) ' + _ustr(expr))
+    return rep
+
+def _escapeRegexRangeChars(s):
+    #~  escape these chars: ^-]
+    for c in r"\^-]":
+        s = s.replace(c,_bslash+c)
+    s = s.replace("\n",r"\n")
+    s = s.replace("\t",r"\t")
+    return _ustr(s)
+
+def oneOf( strs, caseless=False, useRegex=True ):
+    """
+    Helper to quickly define a set of alternative Literals, and makes sure to do
+    longest-first testing when there is a conflict, regardless of the input order,
+    but returns a C{L{MatchFirst}} for best performance.
+
+    Parameters:
+     - strs - a string of space-delimited literals, or a collection of string literals
+     - caseless - (default=C{False}) - treat all literals as caseless
+     - useRegex - (default=C{True}) - as an optimization, will generate a Regex
+          object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
+          if creating a C{Regex} raises an exception)
+
+    Example::
+        comp_oper = oneOf("< = > <= >= !=")
+        var = Word(alphas)
+        number = Word(nums)
+        term = var | number
+        comparison_expr = term + comp_oper + term
+        print(comparison_expr.searchString("B = 12  AA=23 B<=AA AA>12"))
+    prints::
+        [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
+    """
+    if caseless:
+        isequal = ( lambda a,b: a.upper() == b.upper() )
+        masks = ( lambda a,b: b.upper().startswith(a.upper()) )
+        parseElementClass = CaselessLiteral
+    else:
+        isequal = ( lambda a,b: a == b )
+        masks = ( lambda a,b: b.startswith(a) )
+        parseElementClass = Literal
+
+    symbols = []
+    if isinstance(strs,basestring):
+        symbols = strs.split()
+    elif isinstance(strs, collections.Iterable):
+        symbols = list(strs)
+    else:
+        warnings.warn("Invalid argument to oneOf, expected string or iterable",
+                SyntaxWarning, stacklevel=2)
+    if not symbols:
+        return NoMatch()
+
+    i = 0
+    while i < len(symbols)-1:
+        cur = symbols[i]
+        for j,other in enumerate(symbols[i+1:]):
+            if ( isequal(other, cur) ):
+                del symbols[i+j+1]
+                break
+            elif ( masks(cur, other) ):
+                del symbols[i+j+1]
+                symbols.insert(i,other)
+                cur = other
+                break
+        else:
+            i += 1
+
+    if not caseless and useRegex:
+        #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
+        try:
+            if len(symbols)==len("".join(symbols)):
+                return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
+            else:
+                return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
+        except Exception:
+            warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
+                    SyntaxWarning, stacklevel=2)
+
+
+    # last resort, just use MatchFirst
+    return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
+
+def dictOf( key, value ):
+    """
+    Helper to easily and clearly define a dictionary by specifying the respective patterns
+    for the key and value.  Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
+    in the proper order.  The key pattern can include delimiting markers or punctuation,
+    as long as they are suppressed, thereby leaving the significant key text.  The value
+    pattern can include named results, so that the C{Dict} results can include named token
+    fields.
+
+    Example::
+        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+        print(OneOrMore(attr_expr).parseString(text).dump())
+        
+        attr_label = label
+        attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
+
+        # similar to Dict, but simpler call format
+        result = dictOf(attr_label, attr_value).parseString(text)
+        print(result.dump())
+        print(result['shape'])
+        print(result.shape)  # object attribute access works too
+        print(result.asDict())
+    prints::
+        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+        - color: light blue
+        - posn: upper left
+        - shape: SQUARE
+        - texture: burlap
+        SQUARE
+        SQUARE
+        {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
+    """
+    return Dict( ZeroOrMore( Group ( key + value ) ) )
+
+def originalTextFor(expr, asString=True):
+    """
+    Helper to return the original, untokenized text for a given expression.  Useful to
+    restore the parsed fields of an HTML start tag into the raw tag text itself, or to
+    revert separate tokens with intervening whitespace back to the original matching
+    input text. By default, returns astring containing the original parsed text.  
+       
+    If the optional C{asString} argument is passed as C{False}, then the return value is a 
+    C{L{ParseResults}} containing any results names that were originally matched, and a 
+    single token containing the original matched text from the input string.  So if 
+    the expression passed to C{L{originalTextFor}} contains expressions with defined
+    results names, you must set C{asString} to C{False} if you want to preserve those
+    results name values.
+
+    Example::
+        src = "this is test  bold text  normal text "
+        for tag in ("b","i"):
+            opener,closer = makeHTMLTags(tag)
+            patt = originalTextFor(opener + SkipTo(closer) + closer)
+            print(patt.searchString(src)[0])
+    prints::
+        [' bold text ']
+        ['text']
+    """
+    locMarker = Empty().setParseAction(lambda s,loc,t: loc)
+    endlocMarker = locMarker.copy()
+    endlocMarker.callPreparse = False
+    matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
+    if asString:
+        extractText = lambda s,l,t: s[t._original_start:t._original_end]
+    else:
+        def extractText(s,l,t):
+            t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
+    matchExpr.setParseAction(extractText)
+    matchExpr.ignoreExprs = expr.ignoreExprs
+    return matchExpr
+
+def ungroup(expr): 
+    """
+    Helper to undo pyparsing's default grouping of And expressions, even
+    if all but one are non-empty.
+    """
+    return TokenConverter(expr).setParseAction(lambda t:t[0])
+
+def locatedExpr(expr):
+    """
+    Helper to decorate a returned token with its starting and ending locations in the input string.
+    This helper adds the following results names:
+     - locn_start = location where matched expression begins
+     - locn_end = location where matched expression ends
+     - value = the actual parsed results
+
+    Be careful if the input text contains C{} characters, you may want to call
+    C{L{ParserElement.parseWithTabs}}
+
+    Example::
+        wd = Word(alphas)
+        for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
+            print(match)
+    prints::
+        [[0, 'ljsdf', 5]]
+        [[8, 'lksdjjf', 15]]
+        [[18, 'lkkjj', 23]]
+    """
+    locator = Empty().setParseAction(lambda s,l,t: l)
+    return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
+
+
+# convenience constants for positional expressions
+empty       = Empty().setName("empty")
+lineStart   = LineStart().setName("lineStart")
+lineEnd     = LineEnd().setName("lineEnd")
+stringStart = StringStart().setName("stringStart")
+stringEnd   = StringEnd().setName("stringEnd")
+
+_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
+_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
+_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
+_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE)
+_charRange = Group(_singleChar + Suppress("-") + _singleChar)
+_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
+
+def srange(s):
+    r"""
+    Helper to easily define string ranges for use in Word construction.  Borrows
+    syntax from regexp '[]' string range definitions::
+        srange("[0-9]")   -> "0123456789"
+        srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"
+        srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
+    The input string must be enclosed in []'s, and the returned string is the expanded
+    character set joined into a single string.
+    The values enclosed in the []'s may be:
+     - a single character
+     - an escaped character with a leading backslash (such as C{\-} or C{\]})
+     - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) 
+         (C{\0x##} is also supported for backwards compatibility) 
+     - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
+     - a range of any of the above, separated by a dash (C{'a-z'}, etc.)
+     - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
+    """
+    _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
+    try:
+        return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
+    except Exception:
+        return ""
+
+def matchOnlyAtCol(n):
+    """
+    Helper method for defining parse actions that require matching at a specific
+    column in the input text.
+    """
+    def verifyCol(strg,locn,toks):
+        if col(locn,strg) != n:
+            raise ParseException(strg,locn,"matched token not at column %d" % n)
+    return verifyCol
+
+def replaceWith(replStr):
+    """
+    Helper method for common parse actions that simply return a literal value.  Especially
+    useful when used with C{L{transformString}()}.
+
+    Example::
+        num = Word(nums).setParseAction(lambda toks: int(toks[0]))
+        na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
+        term = na | num
+        
+        OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
+    """
+    return lambda s,l,t: [replStr]
+
+def removeQuotes(s,l,t):
+    """
+    Helper parse action for removing quotation marks from parsed quoted strings.
+
+    Example::
+        # by default, quotation marks are included in parsed results
+        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
+
+        # use removeQuotes to strip quotation marks from parsed results
+        quotedString.setParseAction(removeQuotes)
+        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
+    """
+    return t[0][1:-1]
+
+def tokenMap(func, *args):
+    """
+    Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional 
+    args are passed, they are forwarded to the given function as additional arguments after
+    the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
+    parsed data to an integer using base 16.
+
+    Example (compare the last to example in L{ParserElement.transformString}::
+        hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
+        hex_ints.runTests('''
+            00 11 22 aa FF 0a 0d 1a
+            ''')
+        
+        upperword = Word(alphas).setParseAction(tokenMap(str.upper))
+        OneOrMore(upperword).runTests('''
+            my kingdom for a horse
+            ''')
+
+        wd = Word(alphas).setParseAction(tokenMap(str.title))
+        OneOrMore(wd).setParseAction(' '.join).runTests('''
+            now is the winter of our discontent made glorious summer by this sun of york
+            ''')
+    prints::
+        00 11 22 aa FF 0a 0d 1a
+        [0, 17, 34, 170, 255, 10, 13, 26]
+
+        my kingdom for a horse
+        ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
+
+        now is the winter of our discontent made glorious summer by this sun of york
+        ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
+    """
+    def pa(s,l,t):
+        return [func(tokn, *args) for tokn in t]
+
+    try:
+        func_name = getattr(func, '__name__', 
+                            getattr(func, '__class__').__name__)
+    except Exception:
+        func_name = str(func)
+    pa.__name__ = func_name
+
+    return pa
+
+upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
+"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
+
+downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
+"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
+    
+def _makeTags(tagStr, xml):
+    """Internal helper to construct opening and closing tag expressions, given a tag name"""
+    if isinstance(tagStr,basestring):
+        resname = tagStr
+        tagStr = Keyword(tagStr, caseless=not xml)
+    else:
+        resname = tagStr.name
+
+    tagAttrName = Word(alphas,alphanums+"_-:")
+    if (xml):
+        tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
+        openTag = Suppress("<") + tagStr("tag") + \
+                Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
+                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
+    else:
+        printablesLessRAbrack = "".join(c for c in printables if c not in ">")
+        tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
+        openTag = Suppress("<") + tagStr("tag") + \
+                Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
+                Optional( Suppress("=") + tagAttrValue ) ))) + \
+                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
+    closeTag = Combine(_L("")
+
+    openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
+    closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("" % resname)
+    openTag.tag = resname
+    closeTag.tag = resname
+    return openTag, closeTag
+
+def makeHTMLTags(tagStr):
+    """
+    Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
+    tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
+
+    Example::
+        text = 'More info at the pyparsing wiki page'
+        # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
+        a,a_end = makeHTMLTags("A")
+        link_expr = a + SkipTo(a_end)("link_text") + a_end
+        
+        for link in link_expr.searchString(text):
+            # attributes in the  tag (like "href" shown here) are also accessible as named results
+            print(link.link_text, '->', link.href)
+    prints::
+        pyparsing -> http://pyparsing.wikispaces.com
+    """
+    return _makeTags( tagStr, False )
+
+def makeXMLTags(tagStr):
+    """
+    Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
+    tags only in the given upper/lower case.
+
+    Example: similar to L{makeHTMLTags}
+    """
+    return _makeTags( tagStr, True )
+
+def withAttribute(*args,**attrDict):
+    """
+    Helper to create a validating parse action to be used with start tags created
+    with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
+    with a required attribute value, to avoid false matches on common tags such as
+    C{} or C{
}. + + Call C{withAttribute} with a series of attribute names and values. Specify the list + of filter attributes names and values as: + - keyword arguments, as in C{(align="right")}, or + - as an explicit dict with C{**} operator, when an attribute name is also a Python + reserved word, as in C{**{"class":"Customer", "align":"right"}} + - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) + For attribute names with a namespace prefix, you must use the second form. Attribute + names are matched insensitive to upper/lower case. + + If just testing for C{class} (with or without a namespace), use C{L{withClass}}. + + To verify that the attribute exists, but without specifying a value, pass + C{withAttribute.ANY_VALUE} as the value. + + Example:: + html = ''' +
+ Some text +
1 4 0 1 0
+
1,3 2,3 1,1
+
this has no type
+
+ + ''' + div,div_end = makeHTMLTags("div") + + # only match div tag having a type attribute with value "grid" + div_grid = div().setParseAction(withAttribute(type="grid")) + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.searchString(html): + print(grid_header.body) + + # construct a match with any div tag having a type attribute, regardless of the value + div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.searchString(html): + print(div_header.body) + prints:: + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + if args: + attrs = args[:] + else: + attrs = attrDict.items() + attrs = [(k,v) for k,v in attrs] + def pa(s,l,tokens): + for attrName,attrValue in attrs: + if attrName not in tokens: + raise ParseException(s,l,"no matching attribute " + attrName) + if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: + raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % + (attrName, tokens[attrName], attrValue)) + return pa +withAttribute.ANY_VALUE = object() + +def withClass(classname, namespace=''): + """ + Simplified version of C{L{withAttribute}} when matching on a div class - made + difficult because C{class} is a reserved word in Python. + + Example:: + html = ''' +
+ Some text +
1 4 0 1 0
+
1,3 2,3 1,1
+
this <div> has no class
+
+ + ''' + div,div_end = makeHTMLTags("div") + div_grid = div().setParseAction(withClass("grid")) + + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.searchString(html): + print(grid_header.body) + + div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.searchString(html): + print(div_header.body) + prints:: + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + classattr = "%s:class" % namespace if namespace else "class" + return withAttribute(**{classattr : classname}) + +opAssoc = _Constants() +opAssoc.LEFT = object() +opAssoc.RIGHT = object() + +def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): + """ + Helper method for constructing grammars of expressions made up of + operators working in a precedence hierarchy. Operators may be unary or + binary, left- or right-associative. Parse actions can also be attached + to operator expressions. The generated parser will also recognize the use + of parentheses to override operator precedences (see example below). + + Note: if you define a deep operator list, you may see performance issues + when using infixNotation. See L{ParserElement.enablePackrat} for a + mechanism to potentially improve your parser performance. + + Parameters: + - baseExpr - expression representing the most basic element for the nested + - opList - list of tuples, one for each operator precedence level in the + expression grammar; each tuple is of the form + (opExpr, numTerms, rightLeftAssoc, parseAction), where: + - opExpr is the pyparsing expression for the operator; + may also be a string, which will be converted to a Literal; + if numTerms is 3, opExpr is a tuple of two expressions, for the + two operators separating the 3 terms + - numTerms is the number of terms for this operator (must + be 1, 2, or 3) + - rightLeftAssoc is the indicator whether the operator is + right or left associative, using the pyparsing-defined + constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. + - parseAction is the parse action to be associated with + expressions matching this operator expression (the + parse action tuple member may be omitted); if the parse action + is passed a tuple or list of functions, this is equivalent to + calling C{setParseAction(*fn)} (L{ParserElement.setParseAction}) + - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) + - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) + + Example:: + # simple example of four-function arithmetic with ints and variable names + integer = pyparsing_common.signed_integer + varname = pyparsing_common.identifier + + arith_expr = infixNotation(integer | varname, + [ + ('-', 1, opAssoc.RIGHT), + (oneOf('* /'), 2, opAssoc.LEFT), + (oneOf('+ -'), 2, opAssoc.LEFT), + ]) + + arith_expr.runTests(''' + 5+3*6 + (5+3)*6 + -2--11 + ''', fullDump=False) + prints:: + 5+3*6 + [[5, '+', [3, '*', 6]]] + + (5+3)*6 + [[[5, '+', 3], '*', 6]] + + -2--11 + [[['-', 2], '-', ['-', 11]]] + """ + ret = Forward() + lastExpr = baseExpr | ( lpar + ret + rpar ) + for i,operDef in enumerate(opList): + opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] + termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr + if arity == 3: + if opExpr is None or len(opExpr) != 2: + raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") + opExpr1, opExpr2 = opExpr + thisExpr = Forward().setName(termName) + if rightLeftAssoc == opAssoc.LEFT: + if arity == 1: + matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) + elif arity == 2: + if opExpr is not None: + matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) + else: + matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) + elif arity == 3: + matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ + Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) + else: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + elif rightLeftAssoc == opAssoc.RIGHT: + if arity == 1: + # try to avoid LR with this extra test + if not isinstance(opExpr, Optional): + opExpr = Optional(opExpr) + matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) + elif arity == 2: + if opExpr is not None: + matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) + else: + matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) + elif arity == 3: + matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ + Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) + else: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + else: + raise ValueError("operator must indicate right or left associativity") + if pa: + if isinstance(pa, (tuple, list)): + matchExpr.setParseAction(*pa) + else: + matchExpr.setParseAction(pa) + thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) + lastExpr = thisExpr + ret <<= lastExpr + return ret + +operatorPrecedence = infixNotation +"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release.""" + +dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") +sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") +quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'| + Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes") +unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") + +def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): + """ + Helper method for defining nested lists enclosed in opening and closing + delimiters ("(" and ")" are the default). + + Parameters: + - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression + - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression + - content - expression for items within the nested lists (default=C{None}) + - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) + + If an expression is not provided for the content argument, the nested + expression will capture all whitespace-delimited content between delimiters + as a list of separate values. + + Use the C{ignoreExpr} argument to define expressions that may contain + opening or closing characters that should not be treated as opening + or closing characters for nesting, such as quotedString or a comment + expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. + The default is L{quotedString}, but if no expressions are to be ignored, + then pass C{None} for this argument. + + Example:: + data_type = oneOf("void int short long char float double") + decl_data_type = Combine(data_type + Optional(Word('*'))) + ident = Word(alphas+'_', alphanums+'_') + number = pyparsing_common.number + arg = Group(decl_data_type + ident) + LPAR,RPAR = map(Suppress, "()") + + code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) + + c_function = (decl_data_type("type") + + ident("name") + + LPAR + Optional(delimitedList(arg), [])("args") + RPAR + + code_body("body")) + c_function.ignore(cStyleComment) + + source_code = ''' + int is_odd(int x) { + return (x%2); + } + + int dec_to_hex(char hchar) { + if (hchar >= '0' && hchar <= '9') { + return (ord(hchar)-ord('0')); + } else { + return (10+ord(hchar)-ord('A')); + } + } + ''' + for func in c_function.searchString(source_code): + print("%(name)s (%(type)s) args: %(args)s" % func) + + prints:: + is_odd (int) args: [['int', 'x']] + dec_to_hex (int) args: [['char', 'hchar']] + """ + if opener == closer: + raise ValueError("opening and closing strings cannot be the same") + if content is None: + if isinstance(opener,basestring) and isinstance(closer,basestring): + if len(opener) == 1 and len(closer)==1: + if ignoreExpr is not None: + content = (Combine(OneOrMore(~ignoreExpr + + CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) + ).setParseAction(lambda t:t[0].strip())) + else: + content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS + ).setParseAction(lambda t:t[0].strip())) + else: + if ignoreExpr is not None: + content = (Combine(OneOrMore(~ignoreExpr + + ~Literal(opener) + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) + ).setParseAction(lambda t:t[0].strip())) + else: + content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) + ).setParseAction(lambda t:t[0].strip())) + else: + raise ValueError("opening and closing arguments must be strings if no content expression is given") + ret = Forward() + if ignoreExpr is not None: + ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) + else: + ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) + ret.setName('nested %s%s expression' % (opener,closer)) + return ret + +def indentedBlock(blockStatementExpr, indentStack, indent=True): + """ + Helper method for defining space-delimited indentation blocks, such as + those used to define block statements in Python source code. + + Parameters: + - blockStatementExpr - expression defining syntax of statement that + is repeated within the indented block + - indentStack - list created by caller to manage indentation stack + (multiple statementWithIndentedBlock expressions within a single grammar + should share a common indentStack) + - indent - boolean indicating whether block must be indented beyond the + the current level; set to False for block of left-most statements + (default=C{True}) + + A valid block must contain at least one C{blockStatement}. + + Example:: + data = ''' + def A(z): + A1 + B = 100 + G = A2 + A2 + A3 + B + def BB(a,b,c): + BB1 + def BBA(): + bba1 + bba2 + bba3 + C + D + def spam(x,y): + def eggs(z): + pass + ''' + + + indentStack = [1] + stmt = Forward() + + identifier = Word(alphas, alphanums) + funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") + func_body = indentedBlock(stmt, indentStack) + funcDef = Group( funcDecl + func_body ) + + rvalue = Forward() + funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") + rvalue << (funcCall | identifier | Word(nums)) + assignment = Group(identifier + "=" + rvalue) + stmt << ( funcDef | assignment | identifier ) + + module_body = OneOrMore(stmt) + + parseTree = module_body.parseString(data) + parseTree.pprint() + prints:: + [['def', + 'A', + ['(', 'z', ')'], + ':', + [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], + 'B', + ['def', + 'BB', + ['(', 'a', 'b', 'c', ')'], + ':', + [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], + 'C', + 'D', + ['def', + 'spam', + ['(', 'x', 'y', ')'], + ':', + [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] + """ + def checkPeerIndent(s,l,t): + if l >= len(s): return + curCol = col(l,s) + if curCol != indentStack[-1]: + if curCol > indentStack[-1]: + raise ParseFatalException(s,l,"illegal nesting") + raise ParseException(s,l,"not a peer entry") + + def checkSubIndent(s,l,t): + curCol = col(l,s) + if curCol > indentStack[-1]: + indentStack.append( curCol ) + else: + raise ParseException(s,l,"not a subentry") + + def checkUnindent(s,l,t): + if l >= len(s): return + curCol = col(l,s) + if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): + raise ParseException(s,l,"not an unindent") + indentStack.pop() + + NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) + INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') + PEER = Empty().setParseAction(checkPeerIndent).setName('') + UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') + if indent: + smExpr = Group( Optional(NL) + + #~ FollowedBy(blockStatementExpr) + + INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) + else: + smExpr = Group( Optional(NL) + + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) + blockStatementExpr.ignore(_bslash + LineEnd()) + return smExpr.setName('indented block') + +alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") +punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") + +anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag')) +_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\'')) +commonHTMLEntity = Regex('&(?P' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") +def replaceHTMLEntity(t): + """Helper parser action to replace common HTML entities with their special characters""" + return _htmlEntityMap.get(t.entity) + +# it's easy to get these comment structures wrong - they're very common, so may as well make them available +cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") +"Comment of the form C{/* ... */}" + +htmlComment = Regex(r"").setName("HTML comment") +"Comment of the form C{}" + +restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") +dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") +"Comment of the form C{// ... (to end of line)}" + +cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") +"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}" + +javaStyleComment = cppStyleComment +"Same as C{L{cppStyleComment}}" + +pythonStyleComment = Regex(r"#.*").setName("Python style comment") +"Comment of the form C{# ... (to end of line)}" + +_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + + Optional( Word(" \t") + + ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") +commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") +"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. + This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}.""" + +# some other useful expressions - using lower-case class name since we are really using this as a namespace +class pyparsing_common: + """ + Here are some common low-level expressions that may be useful in jump-starting parser development: + - numeric forms (L{integers}, L{reals}, L{scientific notation}) + - common L{programming identifiers} + - network addresses (L{MAC}, L{IPv4}, L{IPv6}) + - ISO8601 L{dates} and L{datetime} + - L{UUID} + - L{comma-separated list} + Parse actions: + - C{L{convertToInteger}} + - C{L{convertToFloat}} + - C{L{convertToDate}} + - C{L{convertToDatetime}} + - C{L{stripHTMLTags}} + - C{L{upcaseTokens}} + - C{L{downcaseTokens}} + + Example:: + pyparsing_common.number.runTests(''' + # any int or real number, returned as the appropriate type + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.fnumber.runTests(''' + # any int or real number, returned as float + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.hex_integer.runTests(''' + # hex numbers + 100 + FF + ''') + + pyparsing_common.fraction.runTests(''' + # fractions + 1/2 + -3/4 + ''') + + pyparsing_common.mixed_integer.runTests(''' + # mixed fractions + 1 + 1/2 + -3/4 + 1-3/4 + ''') + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(''' + # uuid + 12345678-1234-5678-1234-567812345678 + ''') + prints:: + # any int or real number, returned as the appropriate type + 100 + [100] + + -100 + [-100] + + +100 + [100] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # any int or real number, returned as float + 100 + [100.0] + + -100 + [-100.0] + + +100 + [100.0] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # hex numbers + 100 + [256] + + FF + [255] + + # fractions + 1/2 + [0.5] + + -3/4 + [-0.75] + + # mixed fractions + 1 + [1] + + 1/2 + [0.5] + + -3/4 + [-0.75] + + 1-3/4 + [1.75] + + # uuid + 12345678-1234-5678-1234-567812345678 + [UUID('12345678-1234-5678-1234-567812345678')] + """ + + convertToInteger = tokenMap(int) + """ + Parse action for converting parsed integers to Python int + """ + + convertToFloat = tokenMap(float) + """ + Parse action for converting parsed numbers to Python float + """ + + integer = Word(nums).setName("integer").setParseAction(convertToInteger) + """expression that parses an unsigned integer, returns an int""" + + hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16)) + """expression that parses a hexadecimal integer, returns an int""" + + signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) + """expression that parses an integer with optional leading sign, returns an int""" + + fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") + """fractional expression of an integer divided by an integer, returns a float""" + fraction.addParseAction(lambda t: t[0]/t[-1]) + + mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") + """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" + mixed_integer.addParseAction(sum) + + real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat) + """expression that parses a floating point number and returns a float""" + + sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) + """expression that parses a floating point number with optional scientific notation and returns a float""" + + # streamlining this expression makes the docs nicer-looking + number = (sci_real | real | signed_integer).streamline() + """any numeric expression, returns the corresponding Python type""" + + fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) + """any int or real number, returned as float""" + + identifier = Word(alphas+'_', alphanums+'_').setName("identifier") + """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" + + ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") + "IPv4 address (C{0.0.0.0 - 255.255.255.255})" + + _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") + _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") + _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address") + _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) + _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") + ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") + "IPv6 address (long, short, or mixed form)" + + mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") + "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" + + @staticmethod + def convertToDate(fmt="%Y-%m-%d"): + """ + Helper to create a parse action for converting parsed date string to Python datetime.date + + Params - + - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) + + Example:: + date_expr = pyparsing_common.iso8601_date.copy() + date_expr.setParseAction(pyparsing_common.convertToDate()) + print(date_expr.parseString("1999-12-31")) + prints:: + [datetime.date(1999, 12, 31)] + """ + def cvt_fn(s,l,t): + try: + return datetime.strptime(t[0], fmt).date() + except ValueError as ve: + raise ParseException(s, l, str(ve)) + return cvt_fn + + @staticmethod + def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): + """ + Helper to create a parse action for converting parsed datetime string to Python datetime.datetime + + Params - + - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) + + Example:: + dt_expr = pyparsing_common.iso8601_datetime.copy() + dt_expr.setParseAction(pyparsing_common.convertToDatetime()) + print(dt_expr.parseString("1999-12-31T23:59:59.999")) + prints:: + [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] + """ + def cvt_fn(s,l,t): + try: + return datetime.strptime(t[0], fmt) + except ValueError as ve: + raise ParseException(s, l, str(ve)) + return cvt_fn + + iso8601_date = Regex(r'(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?').setName("ISO8601 date") + "ISO8601 date (C{yyyy-mm-dd})" + + iso8601_datetime = Regex(r'(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") + "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}" + + uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") + "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})" + + _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() + @staticmethod + def stripHTMLTags(s, l, tokens): + """ + Parse action to remove HTML tags from web page HTML source + + Example:: + # strip HTML links from normal text + text = 'More info at the
pyparsing wiki page' + td,td_end = makeHTMLTags("TD") + table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end + + print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' + """ + return pyparsing_common._html_stripper.transformString(tokens[0]) + + _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') + + Optional( White(" \t") ) ) ).streamline().setName("commaItem") + comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list") + """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" + + upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) + """Parse action to convert tokens to upper case.""" + + downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) + """Parse action to convert tokens to lower case.""" + + +if __name__ == "__main__": + + selectToken = CaselessLiteral("select") + fromToken = CaselessLiteral("from") + + ident = Word(alphas, alphanums + "_$") + + columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) + columnNameList = Group(delimitedList(columnName)).setName("columns") + columnSpec = ('*' | columnNameList) + + tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) + tableNameList = Group(delimitedList(tableName)).setName("tables") + + simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") + + # demo runTests method, including embedded comments in test string + simpleSQL.runTests(""" + # '*' as column list and dotted table name + select * from SYS.XYZZY + + # caseless match on "SELECT", and casts back to "select" + SELECT * from XYZZY, ABC + + # list of column names, and mixed case SELECT keyword + Select AA,BB,CC from Sys.dual + + # multiple tables + Select A, B, C from Sys.dual, Table2 + + # invalid SELECT keyword - should fail + Xelect A, B, C from Sys.dual + + # incomplete command - should fail + Select + + # invalid column name - should fail + Select ^^^ frox Sys.dual + + """) + + pyparsing_common.number.runTests(""" + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + """) + + # any int or real number, returned as float + pyparsing_common.fnumber.runTests(""" + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + """) + + pyparsing_common.hex_integer.runTests(""" + 100 + FF + """) + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(""" + 12345678-1234-5678-1234-567812345678 + """) diff --git a/pipenv/vendor/vendor.txt b/pipenv/vendor/vendor.txt index a8b1fd6358..94d9eb231a 100644 --- a/pipenv/vendor/vendor.txt +++ b/pipenv/vendor/vendor.txt @@ -22,6 +22,7 @@ pexpect==4.5.0 git+https://github.com/naiquevin/pipdeptree.git@2e9e5119160184f359131ea99993f0158a20cd31#egg=pipdeptree pipreqs==0.4.9 ptyprocess==0.5.2 +pyparsing>=2.0.2 pythonfinder pytoml==0.1.14 requests==2.18.4 From 60e108cb1753ed3a6e9009bab77e067807634376 Mon Sep 17 00:00:00 2001 From: Dan Ryan Date: Thu, 10 May 2018 22:29:47 -0400 Subject: [PATCH 12/17] Move requirements tests to requirement library Signed-off-by: Dan Ryan --- tests/unit/test_utils.py | 79 ---------------------------------------- 1 file changed, 79 deletions(-) diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index ee71b14736..7847551aec 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -4,7 +4,6 @@ from mock import patch, Mock from first import first import pipenv.utils -import pipenv.requirements # Pipfile format <-> requirements.txt format. @@ -114,39 +113,6 @@ def test_convert_deps_to_pip_unicode(): assert deps[0] == 'django==1.10' -@pytest.mark.utils -@pytest.mark.parametrize('expected, requirement', DEP_PIP_PAIRS) -def test_convert_from_pip(expected, requirement): - # We don't build requirements back up with the editable key, so lets drop it out - package = first(expected.keys()) - if hasattr(expected[package], 'keys') and expected[package].get('editable') is False: - del expected[package]['editable'] - assert pipenv.requirements.PipenvRequirement.from_line(requirement).as_pipfile() == expected - - -@pytest.mark.utils -def test_convert_from_pip_fail_if_no_egg(): - """Parsing should fail without `#egg=`. - """ - dep = 'git+https://github.com/kennethreitz/requests.git' - with pytest.raises(ValueError) as e: - dep = pipenv.requirements.PipenvRequirement.from_line(dep).as_pipfile() - assert 'pipenv requires an #egg fragment for vcs' in str(e) - - -@pytest.mark.utils -def test_convert_from_pip_git_uri_normalize(): - """Pip does not parse this correctly, but we can (by converting to ssh://). - """ - dep = 'git+git@host:user/repo.git#egg=myname' - dep = pipenv.requirements.PipenvRequirement.from_line(dep).as_pipfile() - assert dep == { - 'myname': { - 'git': 'git@host:user/repo.git', - } - } - - class TestUtils: """Test utility functions in pipenv""" @@ -343,51 +309,6 @@ def test_win_normalize_drive(self, input_path, expected): def test_nix_normalize_drive(self, input_path, expected): assert pipenv.utils.normalize_drive(input_path) == expected - @pytest.mark.utils - @pytest.mark.requirements - def test_get_requirements(self): - # Test eggs in URLs - url_with_egg = pipenv.requirements.PipenvRequirement.from_line( - 'https://github.com/IndustriaTech/django-user-clipboard/archive/0.6.1.zip#egg=django-user-clipboard' - ).requirement - assert url_with_egg.uri == 'https://github.com/IndustriaTech/django-user-clipboard/archive/0.6.1.zip' - assert url_with_egg.name == 'django-user-clipboard' - # Test URLs without eggs pointing at installable zipfiles - url = pipenv.requirements.PipenvRequirement.from_line( - 'https://github.com/kennethreitz/tablib/archive/0.12.1.zip' - ).requirement - assert url.uri == 'https://github.com/kennethreitz/tablib/archive/0.12.1.zip' - # Test VCS urls with refs and eggnames - vcs_url = pipenv.requirements.PipenvRequirement.from_line( - 'git+https://github.com/kennethreitz/tablib.git@master#egg=tablib' - ).requirement - assert vcs_url.vcs == 'git' and vcs_url.name == 'tablib' and vcs_url.revision == 'master' - assert vcs_url.uri == 'git+https://github.com/kennethreitz/tablib.git' - # Test normal package requirement - normal = pipenv.requirements.PipenvRequirement.from_line('tablib').requirement - assert normal.name == 'tablib' - # Pinned package requirement - spec = pipenv.requirements.PipenvRequirement.from_line('tablib==0.12.1').requirement - assert spec.name == 'tablib' and spec.specs == [('==', '0.12.1')] - # Test complex package with both extras and markers - extras_markers = pipenv.requirements.PipenvRequirement.from_line( - "requests[security]; os_name=='posix'" - ).requirement - assert extras_markers.extras == ['security'] - assert extras_markers.name == 'requests' - assert extras_markers.markers == "os_name=='posix'" - # Test VCS uris get generated correctly, retain git+git@ if supplied that way, and are named according to egg fragment - git_reformat = pipenv.requirements.PipenvRequirement.from_line( - '-e git+git@github.com:pypa/pipenv.git#egg=pipenv' - ).requirement - assert git_reformat.uri == 'git+git@github.com:pypa/pipenv.git' - assert git_reformat.name == 'pipenv' - assert git_reformat.editable - # Previously VCS uris were being treated as local files, so make sure these are not handled that way - assert not git_reformat.local_file - # Test regression where VCS uris were being handled as paths rather than VCS entries - assert git_reformat.vcs == 'git' - @pytest.mark.utils @pytest.mark.parametrize( 'sources, expected_args', From 4c53e6e72f859a83f0f8ce82430acbd7fdd4b491 Mon Sep 17 00:00:00 2001 From: Dan Ryan Date: Thu, 10 May 2018 22:51:03 -0400 Subject: [PATCH 13/17] Update vendored requirementslib Signed-off-by: Dan Ryan --- pipenv/vendor/requirementslib/__init__.py | 2 +- pipenv/vendor/requirementslib/requirements.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/pipenv/vendor/requirementslib/__init__.py b/pipenv/vendor/requirementslib/__init__.py index ecd72ce604..54a23abda4 100644 --- a/pipenv/vendor/requirementslib/__init__.py +++ b/pipenv/vendor/requirementslib/__init__.py @@ -1,4 +1,4 @@ # -*- coding=utf-8 -*- -__version__ = "0.0.2" +__version__ = "0.0.3" from .requirements import Requirement diff --git a/pipenv/vendor/requirementslib/requirements.py b/pipenv/vendor/requirementslib/requirements.py index 683e66fdab..2302736813 100644 --- a/pipenv/vendor/requirementslib/requirements.py +++ b/pipenv/vendor/requirementslib/requirements.py @@ -378,6 +378,8 @@ def from_pipfile(cls, name, pipfile): @property def line_part(self): seed = self.path or self.link.url or self.uri + if not self._has_hashed_name: + seed += "#egg={0}".format(self.name) editable = "-e " if self.editable else "" return "{0}{1}".format(editable, seed) From c1543509340c49e3b2b77b901832a03c86bced96 Mon Sep 17 00:00:00 2001 From: Dan Ryan Date: Thu, 10 May 2018 22:58:47 -0400 Subject: [PATCH 14/17] Update requirementslib Signed-off-by: Dan Ryan --- pipenv/vendor/requirementslib/__init__.py | 2 +- pipenv/vendor/requirementslib/requirements.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/pipenv/vendor/requirementslib/__init__.py b/pipenv/vendor/requirementslib/__init__.py index 54a23abda4..ef4d46a8d2 100644 --- a/pipenv/vendor/requirementslib/__init__.py +++ b/pipenv/vendor/requirementslib/__init__.py @@ -1,4 +1,4 @@ # -*- coding=utf-8 -*- -__version__ = "0.0.3" +__version__ = "0.0.4" from .requirements import Requirement diff --git a/pipenv/vendor/requirementslib/requirements.py b/pipenv/vendor/requirementslib/requirements.py index 2302736813..04e561c6d3 100644 --- a/pipenv/vendor/requirementslib/requirements.py +++ b/pipenv/vendor/requirementslib/requirements.py @@ -378,7 +378,8 @@ def from_pipfile(cls, name, pipfile): @property def line_part(self): seed = self.path or self.link.url or self.uri - if not self._has_hashed_name: + # add egg fragments to remote artifacts (valid urls only) + if not self._has_hashed_name and self.is_remote_artifact: seed += "#egg={0}".format(self.name) editable = "-e " if self.editable else "" return "{0}{1}".format(editable, seed) From fddde1cce365f85c072982683840586d40300be2 Mon Sep 17 00:00:00 2001 From: Dan Ryan Date: Wed, 23 May 2018 22:16:19 -0400 Subject: [PATCH 15/17] Fix requirements cleanup Signed-off-by: Dan Ryan --- pipenv/utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pipenv/utils.py b/pipenv/utils.py index 42ae82bc73..ce95ce52a9 100644 --- a/pipenv/utils.py +++ b/pipenv/utils.py @@ -317,7 +317,8 @@ class PipCommand(basecommand.Command): 'Please check your version specifier and version number. See PEP440 for more information.' ) ) - req_dir.cleanup() + if cleanup_req_dir: + req_dir.cleanup() raise RuntimeError if cleanup_req_dir: req_dir.cleanup() From e2dae9b50ebbaf4b646f179423b9a042699bb84b Mon Sep 17 00:00:00 2001 From: Dan Ryan Date: Wed, 23 May 2018 22:54:35 -0400 Subject: [PATCH 16/17] Handle prep failures for dist preparation Signed-off-by: Dan Ryan --- .../notpip/_internal/operations/prepare.py | 6 +++++- tasks/vendoring/patches/patched/pip10.patch | 17 +++++++++++++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/pipenv/patched/notpip/_internal/operations/prepare.py b/pipenv/patched/notpip/_internal/operations/prepare.py index b3f6ce568e..46538373cf 100644 --- a/pipenv/patched/notpip/_internal/operations/prepare.py +++ b/pipenv/patched/notpip/_internal/operations/prepare.py @@ -151,7 +151,11 @@ def format_reqs(rs): else: self.req.build_env = NoOpBuildEnvironment(no_clean=False) - self.req.run_egg_info() + try: + self.req.run_egg_info() + except (OSError, TypeError): + self.req._correct_build_location() + self.req.run_egg_info() self.req.assert_source_matches_version() diff --git a/tasks/vendoring/patches/patched/pip10.patch b/tasks/vendoring/patches/patched/pip10.patch index 42b3c939c7..92e87e60d3 100644 --- a/tasks/vendoring/patches/patched/pip10.patch +++ b/tasks/vendoring/patches/patched/pip10.patch @@ -498,10 +498,23 @@ index c71f17d2..3e29a49d 100644 ] + list(self.global_options) diff --git a/pipenv/patched/pip/_internal/operations/prepare.py b/pipenv/patched/pip/_internal/operations/prepare.py -index 27e3a5dd..0be76f70 100644 +index 27e3a5dd..4d120faa 100644 --- a/pipenv/patched/pip/_internal/operations/prepare.py +++ b/pipenv/patched/pip/_internal/operations/prepare.py -@@ -233,15 +233,15 @@ class RequirementPreparer(object): +@@ -151,7 +151,11 @@ class IsSDist(DistAbstraction): + else: + self.req.build_env = NoOpBuildEnvironment(no_clean=False) + +- self.req.run_egg_info() ++ try: ++ self.req.run_egg_info() ++ except (OSError, TypeError): ++ self.req._correct_build_location() ++ self.req.run_egg_info() + self.req.assert_source_matches_version() + + +@@ -233,15 +237,15 @@ class RequirementPreparer(object): # FIXME: this won't upgrade when there's an existing # package unpacked in `req.source_dir` # package unpacked in `req.source_dir` From aa4ca1bfd4c8098469c2bf753bcef3827eb093b3 Mon Sep 17 00:00:00 2001 From: Dan Ryan Date: Wed, 23 May 2018 23:55:09 -0400 Subject: [PATCH 17/17] Fix special case resolution with ignore python Signed-off-by: Dan Ryan --- pipenv/patched/piptools/repositories/pypi.py | 2 +- .../vendoring/patches/patched/piptools.patch | 39 ++++++++----------- 2 files changed, 17 insertions(+), 24 deletions(-) diff --git a/pipenv/patched/piptools/repositories/pypi.py b/pipenv/patched/piptools/repositories/pypi.py index e412a1f774..6355f959e0 100644 --- a/pipenv/patched/piptools/repositories/pypi.py +++ b/pipenv/patched/piptools/repositories/pypi.py @@ -302,7 +302,7 @@ def get_legacy_dependencies(self, ireq): upgrade_strategy="to-satisfy-only", force_reinstall=False, ignore_dependencies=False, - ignore_requires_python=False, + ignore_requires_python=True, ignore_installed=True, isolated=False, wheel_cache=self.wheel_cache, diff --git a/tasks/vendoring/patches/patched/piptools.patch b/tasks/vendoring/patches/patched/piptools.patch index a6c56e48d1..6beaad8ea8 100644 --- a/tasks/vendoring/patches/patched/piptools.patch +++ b/tasks/vendoring/patches/patched/piptools.patch @@ -19,7 +19,7 @@ index 4e6174c..75f9b49 100644 # NOTE # We used to store the cache dir under ~/.pip-tools, which is not the diff --git a/pipenv/patched/piptools/repositories/pypi.py b/pipenv/patched/piptools/repositories/pypi.py -index 1c4b943..e412a1f 100644 +index 1c4b943..8320e14 100644 --- a/pipenv/patched/piptools/repositories/pypi.py +++ b/pipenv/patched/piptools/repositories/pypi.py @@ -15,10 +15,16 @@ from .._compat import ( @@ -32,33 +32,18 @@ index 1c4b943..e412a1f 100644 + SafeFileCache, ) -+from notpip._vendor.packaging.requirements import InvalidRequirement -+from notpip._vendor.pyparsing import ParseException ++from pip._vendor.packaging.requirements import InvalidRequirement ++from pip._vendor.pyparsing import ParseException + from ..cache import CACHE_DIR +from pipenv.environments import PIPENV_CACHE_DIR from ..exceptions import NoCandidateFound from ..utils import (fs_str, is_pinned_requirement, lookup_table, make_install_requirement) -@@ -26,15 +32,49 @@ from .base import BaseRepository +@@ -37,6 +43,40 @@ except ImportError: + from pip.wheel import WheelCache - try: -- from pip._internal.operations.prepare import RequirementPreparer -- from pip._internal.resolve import Resolver as PipResolver -+ from notpip._internal.operations.prepare import RequirementPreparer -+ from notpip._internal.resolve import Resolver as PipResolver - except ImportError: - pass - - try: -- from pip._internal.cache import WheelCache -+ from notpip._internal.cache import WheelCache - except ImportError: -- from pip.wheel import WheelCache -+ from notpip.wheel import WheelCache -+ -+ +class HashCache(SafeFileCache): + """Caches hashes of PyPI artifacts so we do not need to re-download them + @@ -91,9 +76,11 @@ index 1c4b943..e412a1f 100644 + for chunk in iter(lambda: fp.read(8096), b""): + h.update(chunk) + return ":".join([FAVORITE_HASH, h.hexdigest()]) - - ++ ++ class PyPIRepository(BaseRepository): + DEFAULT_INDEX_URL = PyPI.simple_url + @@ -46,10 +86,11 @@ class PyPIRepository(BaseRepository): config), but any other PyPI mirror can be used if index_urls is changed/configured on the Finder. @@ -239,7 +226,13 @@ index 1c4b943..e412a1f 100644 ) except TypeError: # Pip >= 10 (new resolver!) -@@ -195,9 +307,39 @@ class PyPIRepository(BaseRepository): +@@ -190,14 +302,44 @@ class PyPIRepository(BaseRepository): + upgrade_strategy="to-satisfy-only", + force_reinstall=False, + ignore_dependencies=False, +- ignore_requires_python=False, ++ ignore_requires_python=True, + ignore_installed=True, isolated=False, wheel_cache=self.wheel_cache, use_user_site=False,